Mon, 18 Mar 2013 11:05:27 -0700
8009536: G1: Apache Lucene hang during reference processing
Summary: In CMTask::do_marking_step(), Skip offering termination and entering the first and second synchronization barriers if called from a serial context, i.e. the VM thread.
Reviewed-by: brutisso, tschatzl
1 /*
2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "gc_implementation/g1/concurrentG1Refine.hpp"
27 #include "gc_implementation/g1/concurrentMark.hpp"
28 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
29 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
30 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
31 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
32 #include "gc_implementation/g1/g1GCPhaseTimes.hpp"
33 #include "gc_implementation/g1/g1Log.hpp"
34 #include "gc_implementation/g1/heapRegionRemSet.hpp"
35 #include "gc_implementation/shared/gcPolicyCounters.hpp"
36 #include "runtime/arguments.hpp"
37 #include "runtime/java.hpp"
38 #include "runtime/mutexLocker.hpp"
39 #include "utilities/debug.hpp"
41 // Different defaults for different number of GC threads
42 // They were chosen by running GCOld and SPECjbb on debris with different
43 // numbers of GC threads and choosing them based on the results
45 // all the same
46 static double rs_length_diff_defaults[] = {
47 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
48 };
50 static double cost_per_card_ms_defaults[] = {
51 0.01, 0.005, 0.005, 0.003, 0.003, 0.002, 0.002, 0.0015
52 };
54 // all the same
55 static double young_cards_per_entry_ratio_defaults[] = {
56 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0
57 };
59 static double cost_per_entry_ms_defaults[] = {
60 0.015, 0.01, 0.01, 0.008, 0.008, 0.0055, 0.0055, 0.005
61 };
63 static double cost_per_byte_ms_defaults[] = {
64 0.00006, 0.00003, 0.00003, 0.000015, 0.000015, 0.00001, 0.00001, 0.000009
65 };
67 // these should be pretty consistent
68 static double constant_other_time_ms_defaults[] = {
69 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0
70 };
73 static double young_other_cost_per_region_ms_defaults[] = {
74 0.3, 0.2, 0.2, 0.15, 0.15, 0.12, 0.12, 0.1
75 };
77 static double non_young_other_cost_per_region_ms_defaults[] = {
78 1.0, 0.7, 0.7, 0.5, 0.5, 0.42, 0.42, 0.30
79 };
81 G1CollectorPolicy::G1CollectorPolicy() :
82 _parallel_gc_threads(G1CollectedHeap::use_parallel_gc_threads()
83 ? ParallelGCThreads : 1),
85 _recent_gc_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
86 _stop_world_start(0.0),
88 _concurrent_mark_remark_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
89 _concurrent_mark_cleanup_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
91 _alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
92 _prev_collection_pause_end_ms(0.0),
93 _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
94 _cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
95 _young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
96 _mixed_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
97 _cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
98 _mixed_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
99 _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
100 _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)),
101 _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
102 _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
103 _non_young_other_cost_per_region_ms_seq(
104 new TruncatedSeq(TruncatedSeqLength)),
106 _pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
107 _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)),
109 _pause_time_target_ms((double) MaxGCPauseMillis),
111 _gcs_are_young(true),
113 _during_marking(false),
114 _in_marking_window(false),
115 _in_marking_window_im(false),
117 _recent_prev_end_times_for_all_gcs_sec(
118 new TruncatedSeq(NumPrevPausesForHeuristics)),
120 _recent_avg_pause_time_ratio(0.0),
122 _initiate_conc_mark_if_possible(false),
123 _during_initial_mark_pause(false),
124 _last_young_gc(false),
125 _last_gc_was_young(false),
127 _eden_bytes_before_gc(0),
128 _survivor_bytes_before_gc(0),
129 _capacity_before_gc(0),
131 _eden_cset_region_length(0),
132 _survivor_cset_region_length(0),
133 _old_cset_region_length(0),
135 _collection_set(NULL),
136 _collection_set_bytes_used_before(0),
138 // Incremental CSet attributes
139 _inc_cset_build_state(Inactive),
140 _inc_cset_head(NULL),
141 _inc_cset_tail(NULL),
142 _inc_cset_bytes_used_before(0),
143 _inc_cset_max_finger(NULL),
144 _inc_cset_recorded_rs_lengths(0),
145 _inc_cset_recorded_rs_lengths_diffs(0),
146 _inc_cset_predicted_elapsed_time_ms(0.0),
147 _inc_cset_predicted_elapsed_time_ms_diffs(0.0),
149 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
150 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
151 #endif // _MSC_VER
153 _short_lived_surv_rate_group(new SurvRateGroup(this, "Short Lived",
154 G1YoungSurvRateNumRegionsSummary)),
155 _survivor_surv_rate_group(new SurvRateGroup(this, "Survivor",
156 G1YoungSurvRateNumRegionsSummary)),
157 // add here any more surv rate groups
158 _recorded_survivor_regions(0),
159 _recorded_survivor_head(NULL),
160 _recorded_survivor_tail(NULL),
161 _survivors_age_table(true),
163 _gc_overhead_perc(0.0) {
165 // Set up the region size and associated fields. Given that the
166 // policy is created before the heap, we have to set this up here,
167 // so it's done as soon as possible.
168 HeapRegion::setup_heap_region_size(Arguments::min_heap_size());
169 HeapRegionRemSet::setup_remset_size();
171 G1ErgoVerbose::initialize();
172 if (PrintAdaptiveSizePolicy) {
173 // Currently, we only use a single switch for all the heuristics.
174 G1ErgoVerbose::set_enabled(true);
175 // Given that we don't currently have a verboseness level
176 // parameter, we'll hardcode this to high. This can be easily
177 // changed in the future.
178 G1ErgoVerbose::set_level(ErgoHigh);
179 } else {
180 G1ErgoVerbose::set_enabled(false);
181 }
183 // Verify PLAB sizes
184 const size_t region_size = HeapRegion::GrainWords;
185 if (YoungPLABSize > region_size || OldPLABSize > region_size) {
186 char buffer[128];
187 jio_snprintf(buffer, sizeof(buffer), "%sPLABSize should be at most "SIZE_FORMAT,
188 OldPLABSize > region_size ? "Old" : "Young", region_size);
189 vm_exit_during_initialization(buffer);
190 }
192 _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
193 _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
195 _phase_times = new G1GCPhaseTimes(_parallel_gc_threads);
197 int index = MIN2(_parallel_gc_threads - 1, 7);
199 _rs_length_diff_seq->add(rs_length_diff_defaults[index]);
200 _cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]);
201 _young_cards_per_entry_ratio_seq->add(
202 young_cards_per_entry_ratio_defaults[index]);
203 _cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]);
204 _cost_per_byte_ms_seq->add(cost_per_byte_ms_defaults[index]);
205 _constant_other_time_ms_seq->add(constant_other_time_ms_defaults[index]);
206 _young_other_cost_per_region_ms_seq->add(
207 young_other_cost_per_region_ms_defaults[index]);
208 _non_young_other_cost_per_region_ms_seq->add(
209 non_young_other_cost_per_region_ms_defaults[index]);
211 // Below, we might need to calculate the pause time target based on
212 // the pause interval. When we do so we are going to give G1 maximum
213 // flexibility and allow it to do pauses when it needs to. So, we'll
214 // arrange that the pause interval to be pause time target + 1 to
215 // ensure that a) the pause time target is maximized with respect to
216 // the pause interval and b) we maintain the invariant that pause
217 // time target < pause interval. If the user does not want this
218 // maximum flexibility, they will have to set the pause interval
219 // explicitly.
221 // First make sure that, if either parameter is set, its value is
222 // reasonable.
223 if (!FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
224 if (MaxGCPauseMillis < 1) {
225 vm_exit_during_initialization("MaxGCPauseMillis should be "
226 "greater than 0");
227 }
228 }
229 if (!FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
230 if (GCPauseIntervalMillis < 1) {
231 vm_exit_during_initialization("GCPauseIntervalMillis should be "
232 "greater than 0");
233 }
234 }
236 // Then, if the pause time target parameter was not set, set it to
237 // the default value.
238 if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
239 if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
240 // The default pause time target in G1 is 200ms
241 FLAG_SET_DEFAULT(MaxGCPauseMillis, 200);
242 } else {
243 // We do not allow the pause interval to be set without the
244 // pause time target
245 vm_exit_during_initialization("GCPauseIntervalMillis cannot be set "
246 "without setting MaxGCPauseMillis");
247 }
248 }
250 // Then, if the interval parameter was not set, set it according to
251 // the pause time target (this will also deal with the case when the
252 // pause time target is the default value).
253 if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
254 FLAG_SET_DEFAULT(GCPauseIntervalMillis, MaxGCPauseMillis + 1);
255 }
257 // Finally, make sure that the two parameters are consistent.
258 if (MaxGCPauseMillis >= GCPauseIntervalMillis) {
259 char buffer[256];
260 jio_snprintf(buffer, 256,
261 "MaxGCPauseMillis (%u) should be less than "
262 "GCPauseIntervalMillis (%u)",
263 MaxGCPauseMillis, GCPauseIntervalMillis);
264 vm_exit_during_initialization(buffer);
265 }
267 double max_gc_time = (double) MaxGCPauseMillis / 1000.0;
268 double time_slice = (double) GCPauseIntervalMillis / 1000.0;
269 _mmu_tracker = new G1MMUTrackerQueue(time_slice, max_gc_time);
271 uintx confidence_perc = G1ConfidencePercent;
272 // Put an artificial ceiling on this so that it's not set to a silly value.
273 if (confidence_perc > 100) {
274 confidence_perc = 100;
275 warning("G1ConfidencePercent is set to a value that is too large, "
276 "it's been updated to %u", confidence_perc);
277 }
278 _sigma = (double) confidence_perc / 100.0;
280 // start conservatively (around 50ms is about right)
281 _concurrent_mark_remark_times_ms->add(0.05);
282 _concurrent_mark_cleanup_times_ms->add(0.20);
283 _tenuring_threshold = MaxTenuringThreshold;
284 // _max_survivor_regions will be calculated by
285 // update_young_list_target_length() during initialization.
286 _max_survivor_regions = 0;
288 assert(GCTimeRatio > 0,
289 "we should have set it to a default value set_g1_gc_flags() "
290 "if a user set it to 0");
291 _gc_overhead_perc = 100.0 * (1.0 / (1.0 + GCTimeRatio));
293 uintx reserve_perc = G1ReservePercent;
294 // Put an artificial ceiling on this so that it's not set to a silly value.
295 if (reserve_perc > 50) {
296 reserve_perc = 50;
297 warning("G1ReservePercent is set to a value that is too large, "
298 "it's been updated to %u", reserve_perc);
299 }
300 _reserve_factor = (double) reserve_perc / 100.0;
301 // This will be set when the heap is expanded
302 // for the first time during initialization.
303 _reserve_regions = 0;
305 initialize_all();
306 _collectionSetChooser = new CollectionSetChooser();
307 _young_gen_sizer = new G1YoungGenSizer(); // Must be after call to initialize_flags
308 }
310 void G1CollectorPolicy::initialize_flags() {
311 set_min_alignment(HeapRegion::GrainBytes);
312 set_max_alignment(GenRemSet::max_alignment_constraint(rem_set_name()));
313 if (SurvivorRatio < 1) {
314 vm_exit_during_initialization("Invalid survivor ratio specified");
315 }
316 CollectorPolicy::initialize_flags();
317 }
319 G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults), _adaptive_size(true) {
320 assert(G1NewSizePercent <= G1MaxNewSizePercent, "Min larger than max");
321 assert(G1NewSizePercent > 0 && G1NewSizePercent < 100, "Min out of bounds");
322 assert(G1MaxNewSizePercent > 0 && G1MaxNewSizePercent < 100, "Max out of bounds");
324 if (FLAG_IS_CMDLINE(NewRatio)) {
325 if (FLAG_IS_CMDLINE(NewSize) || FLAG_IS_CMDLINE(MaxNewSize)) {
326 warning("-XX:NewSize and -XX:MaxNewSize override -XX:NewRatio");
327 } else {
328 _sizer_kind = SizerNewRatio;
329 _adaptive_size = false;
330 return;
331 }
332 }
334 if (FLAG_IS_CMDLINE(NewSize)) {
335 _min_desired_young_length = MAX2((uint) (NewSize / HeapRegion::GrainBytes),
336 1U);
337 if (FLAG_IS_CMDLINE(MaxNewSize)) {
338 _max_desired_young_length =
339 MAX2((uint) (MaxNewSize / HeapRegion::GrainBytes),
340 1U);
341 _sizer_kind = SizerMaxAndNewSize;
342 _adaptive_size = _min_desired_young_length == _max_desired_young_length;
343 } else {
344 _sizer_kind = SizerNewSizeOnly;
345 }
346 } else if (FLAG_IS_CMDLINE(MaxNewSize)) {
347 _max_desired_young_length =
348 MAX2((uint) (MaxNewSize / HeapRegion::GrainBytes),
349 1U);
350 _sizer_kind = SizerMaxNewSizeOnly;
351 }
352 }
354 uint G1YoungGenSizer::calculate_default_min_length(uint new_number_of_heap_regions) {
355 uint default_value = (new_number_of_heap_regions * G1NewSizePercent) / 100;
356 return MAX2(1U, default_value);
357 }
359 uint G1YoungGenSizer::calculate_default_max_length(uint new_number_of_heap_regions) {
360 uint default_value = (new_number_of_heap_regions * G1MaxNewSizePercent) / 100;
361 return MAX2(1U, default_value);
362 }
364 void G1YoungGenSizer::heap_size_changed(uint new_number_of_heap_regions) {
365 assert(new_number_of_heap_regions > 0, "Heap must be initialized");
367 switch (_sizer_kind) {
368 case SizerDefaults:
369 _min_desired_young_length = calculate_default_min_length(new_number_of_heap_regions);
370 _max_desired_young_length = calculate_default_max_length(new_number_of_heap_regions);
371 break;
372 case SizerNewSizeOnly:
373 _max_desired_young_length = calculate_default_max_length(new_number_of_heap_regions);
374 _max_desired_young_length = MAX2(_min_desired_young_length, _max_desired_young_length);
375 break;
376 case SizerMaxNewSizeOnly:
377 _min_desired_young_length = calculate_default_min_length(new_number_of_heap_regions);
378 _min_desired_young_length = MIN2(_min_desired_young_length, _max_desired_young_length);
379 break;
380 case SizerMaxAndNewSize:
381 // Do nothing. Values set on the command line, don't update them at runtime.
382 break;
383 case SizerNewRatio:
384 _min_desired_young_length = new_number_of_heap_regions / (NewRatio + 1);
385 _max_desired_young_length = _min_desired_young_length;
386 break;
387 default:
388 ShouldNotReachHere();
389 }
391 assert(_min_desired_young_length <= _max_desired_young_length, "Invalid min/max young gen size values");
392 }
394 void G1CollectorPolicy::init() {
395 // Set aside an initial future to_space.
396 _g1 = G1CollectedHeap::heap();
398 assert(Heap_lock->owned_by_self(), "Locking discipline.");
400 initialize_gc_policy_counters();
402 if (adaptive_young_list_length()) {
403 _young_list_fixed_length = 0;
404 } else {
405 _young_list_fixed_length = _young_gen_sizer->min_desired_young_length();
406 }
407 _free_regions_at_end_of_collection = _g1->free_regions();
408 update_young_list_target_length();
409 _prev_eden_capacity = _young_list_target_length * HeapRegion::GrainBytes;
411 // We may immediately start allocating regions and placing them on the
412 // collection set list. Initialize the per-collection set info
413 start_incremental_cset_building();
414 }
416 // Create the jstat counters for the policy.
417 void G1CollectorPolicy::initialize_gc_policy_counters() {
418 _gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 3);
419 }
421 bool G1CollectorPolicy::predict_will_fit(uint young_length,
422 double base_time_ms,
423 uint base_free_regions,
424 double target_pause_time_ms) {
425 if (young_length >= base_free_regions) {
426 // end condition 1: not enough space for the young regions
427 return false;
428 }
430 double accum_surv_rate = accum_yg_surv_rate_pred((int) young_length - 1);
431 size_t bytes_to_copy =
432 (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes);
433 double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy);
434 double young_other_time_ms = predict_young_other_time_ms(young_length);
435 double pause_time_ms = base_time_ms + copy_time_ms + young_other_time_ms;
436 if (pause_time_ms > target_pause_time_ms) {
437 // end condition 2: prediction is over the target pause time
438 return false;
439 }
441 size_t free_bytes =
442 (base_free_regions - young_length) * HeapRegion::GrainBytes;
443 if ((2.0 * sigma()) * (double) bytes_to_copy > (double) free_bytes) {
444 // end condition 3: out-of-space (conservatively!)
445 return false;
446 }
448 // success!
449 return true;
450 }
452 void G1CollectorPolicy::record_new_heap_size(uint new_number_of_regions) {
453 // re-calculate the necessary reserve
454 double reserve_regions_d = (double) new_number_of_regions * _reserve_factor;
455 // We use ceiling so that if reserve_regions_d is > 0.0 (but
456 // smaller than 1.0) we'll get 1.
457 _reserve_regions = (uint) ceil(reserve_regions_d);
459 _young_gen_sizer->heap_size_changed(new_number_of_regions);
460 }
462 uint G1CollectorPolicy::calculate_young_list_desired_min_length(
463 uint base_min_length) {
464 uint desired_min_length = 0;
465 if (adaptive_young_list_length()) {
466 if (_alloc_rate_ms_seq->num() > 3) {
467 double now_sec = os::elapsedTime();
468 double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
469 double alloc_rate_ms = predict_alloc_rate_ms();
470 desired_min_length = (uint) ceil(alloc_rate_ms * when_ms);
471 } else {
472 // otherwise we don't have enough info to make the prediction
473 }
474 }
475 desired_min_length += base_min_length;
476 // make sure we don't go below any user-defined minimum bound
477 return MAX2(_young_gen_sizer->min_desired_young_length(), desired_min_length);
478 }
480 uint G1CollectorPolicy::calculate_young_list_desired_max_length() {
481 // Here, we might want to also take into account any additional
482 // constraints (i.e., user-defined minimum bound). Currently, we
483 // effectively don't set this bound.
484 return _young_gen_sizer->max_desired_young_length();
485 }
487 void G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths) {
488 if (rs_lengths == (size_t) -1) {
489 // if it's set to the default value (-1), we should predict it;
490 // otherwise, use the given value.
491 rs_lengths = (size_t) get_new_prediction(_rs_lengths_seq);
492 }
494 // Calculate the absolute and desired min bounds.
496 // This is how many young regions we already have (currently: the survivors).
497 uint base_min_length = recorded_survivor_regions();
498 // This is the absolute minimum young length, which ensures that we
499 // can allocate one eden region in the worst-case.
500 uint absolute_min_length = base_min_length + 1;
501 uint desired_min_length =
502 calculate_young_list_desired_min_length(base_min_length);
503 if (desired_min_length < absolute_min_length) {
504 desired_min_length = absolute_min_length;
505 }
507 // Calculate the absolute and desired max bounds.
509 // We will try our best not to "eat" into the reserve.
510 uint absolute_max_length = 0;
511 if (_free_regions_at_end_of_collection > _reserve_regions) {
512 absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions;
513 }
514 uint desired_max_length = calculate_young_list_desired_max_length();
515 if (desired_max_length > absolute_max_length) {
516 desired_max_length = absolute_max_length;
517 }
519 uint young_list_target_length = 0;
520 if (adaptive_young_list_length()) {
521 if (gcs_are_young()) {
522 young_list_target_length =
523 calculate_young_list_target_length(rs_lengths,
524 base_min_length,
525 desired_min_length,
526 desired_max_length);
527 _rs_lengths_prediction = rs_lengths;
528 } else {
529 // Don't calculate anything and let the code below bound it to
530 // the desired_min_length, i.e., do the next GC as soon as
531 // possible to maximize how many old regions we can add to it.
532 }
533 } else {
534 // The user asked for a fixed young gen so we'll fix the young gen
535 // whether the next GC is young or mixed.
536 young_list_target_length = _young_list_fixed_length;
537 }
539 // Make sure we don't go over the desired max length, nor under the
540 // desired min length. In case they clash, desired_min_length wins
541 // which is why that test is second.
542 if (young_list_target_length > desired_max_length) {
543 young_list_target_length = desired_max_length;
544 }
545 if (young_list_target_length < desired_min_length) {
546 young_list_target_length = desired_min_length;
547 }
549 assert(young_list_target_length > recorded_survivor_regions(),
550 "we should be able to allocate at least one eden region");
551 assert(young_list_target_length >= absolute_min_length, "post-condition");
552 _young_list_target_length = young_list_target_length;
554 update_max_gc_locker_expansion();
555 }
557 uint
558 G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths,
559 uint base_min_length,
560 uint desired_min_length,
561 uint desired_max_length) {
562 assert(adaptive_young_list_length(), "pre-condition");
563 assert(gcs_are_young(), "only call this for young GCs");
565 // In case some edge-condition makes the desired max length too small...
566 if (desired_max_length <= desired_min_length) {
567 return desired_min_length;
568 }
570 // We'll adjust min_young_length and max_young_length not to include
571 // the already allocated young regions (i.e., so they reflect the
572 // min and max eden regions we'll allocate). The base_min_length
573 // will be reflected in the predictions by the
574 // survivor_regions_evac_time prediction.
575 assert(desired_min_length > base_min_length, "invariant");
576 uint min_young_length = desired_min_length - base_min_length;
577 assert(desired_max_length > base_min_length, "invariant");
578 uint max_young_length = desired_max_length - base_min_length;
580 double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
581 double survivor_regions_evac_time = predict_survivor_regions_evac_time();
582 size_t pending_cards = (size_t) get_new_prediction(_pending_cards_seq);
583 size_t adj_rs_lengths = rs_lengths + predict_rs_length_diff();
584 size_t scanned_cards = predict_young_card_num(adj_rs_lengths);
585 double base_time_ms =
586 predict_base_elapsed_time_ms(pending_cards, scanned_cards) +
587 survivor_regions_evac_time;
588 uint available_free_regions = _free_regions_at_end_of_collection;
589 uint base_free_regions = 0;
590 if (available_free_regions > _reserve_regions) {
591 base_free_regions = available_free_regions - _reserve_regions;
592 }
594 // Here, we will make sure that the shortest young length that
595 // makes sense fits within the target pause time.
597 if (predict_will_fit(min_young_length, base_time_ms,
598 base_free_regions, target_pause_time_ms)) {
599 // The shortest young length will fit into the target pause time;
600 // we'll now check whether the absolute maximum number of young
601 // regions will fit in the target pause time. If not, we'll do
602 // a binary search between min_young_length and max_young_length.
603 if (predict_will_fit(max_young_length, base_time_ms,
604 base_free_regions, target_pause_time_ms)) {
605 // The maximum young length will fit into the target pause time.
606 // We are done so set min young length to the maximum length (as
607 // the result is assumed to be returned in min_young_length).
608 min_young_length = max_young_length;
609 } else {
610 // The maximum possible number of young regions will not fit within
611 // the target pause time so we'll search for the optimal
612 // length. The loop invariants are:
613 //
614 // min_young_length < max_young_length
615 // min_young_length is known to fit into the target pause time
616 // max_young_length is known not to fit into the target pause time
617 //
618 // Going into the loop we know the above hold as we've just
619 // checked them. Every time around the loop we check whether
620 // the middle value between min_young_length and
621 // max_young_length fits into the target pause time. If it
622 // does, it becomes the new min. If it doesn't, it becomes
623 // the new max. This way we maintain the loop invariants.
625 assert(min_young_length < max_young_length, "invariant");
626 uint diff = (max_young_length - min_young_length) / 2;
627 while (diff > 0) {
628 uint young_length = min_young_length + diff;
629 if (predict_will_fit(young_length, base_time_ms,
630 base_free_regions, target_pause_time_ms)) {
631 min_young_length = young_length;
632 } else {
633 max_young_length = young_length;
634 }
635 assert(min_young_length < max_young_length, "invariant");
636 diff = (max_young_length - min_young_length) / 2;
637 }
638 // The results is min_young_length which, according to the
639 // loop invariants, should fit within the target pause time.
641 // These are the post-conditions of the binary search above:
642 assert(min_young_length < max_young_length,
643 "otherwise we should have discovered that max_young_length "
644 "fits into the pause target and not done the binary search");
645 assert(predict_will_fit(min_young_length, base_time_ms,
646 base_free_regions, target_pause_time_ms),
647 "min_young_length, the result of the binary search, should "
648 "fit into the pause target");
649 assert(!predict_will_fit(min_young_length + 1, base_time_ms,
650 base_free_regions, target_pause_time_ms),
651 "min_young_length, the result of the binary search, should be "
652 "optimal, so no larger length should fit into the pause target");
653 }
654 } else {
655 // Even the minimum length doesn't fit into the pause time
656 // target, return it as the result nevertheless.
657 }
658 return base_min_length + min_young_length;
659 }
661 double G1CollectorPolicy::predict_survivor_regions_evac_time() {
662 double survivor_regions_evac_time = 0.0;
663 for (HeapRegion * r = _recorded_survivor_head;
664 r != NULL && r != _recorded_survivor_tail->get_next_young_region();
665 r = r->get_next_young_region()) {
666 survivor_regions_evac_time += predict_region_elapsed_time_ms(r, gcs_are_young());
667 }
668 return survivor_regions_evac_time;
669 }
671 void G1CollectorPolicy::revise_young_list_target_length_if_necessary() {
672 guarantee( adaptive_young_list_length(), "should not call this otherwise" );
674 size_t rs_lengths = _g1->young_list()->sampled_rs_lengths();
675 if (rs_lengths > _rs_lengths_prediction) {
676 // add 10% to avoid having to recalculate often
677 size_t rs_lengths_prediction = rs_lengths * 1100 / 1000;
678 update_young_list_target_length(rs_lengths_prediction);
679 }
680 }
684 HeapWord* G1CollectorPolicy::mem_allocate_work(size_t size,
685 bool is_tlab,
686 bool* gc_overhead_limit_was_exceeded) {
687 guarantee(false, "Not using this policy feature yet.");
688 return NULL;
689 }
691 // This method controls how a collector handles one or more
692 // of its generations being fully allocated.
693 HeapWord* G1CollectorPolicy::satisfy_failed_allocation(size_t size,
694 bool is_tlab) {
695 guarantee(false, "Not using this policy feature yet.");
696 return NULL;
697 }
700 #ifndef PRODUCT
701 bool G1CollectorPolicy::verify_young_ages() {
702 HeapRegion* head = _g1->young_list()->first_region();
703 return
704 verify_young_ages(head, _short_lived_surv_rate_group);
705 // also call verify_young_ages on any additional surv rate groups
706 }
708 bool
709 G1CollectorPolicy::verify_young_ages(HeapRegion* head,
710 SurvRateGroup *surv_rate_group) {
711 guarantee( surv_rate_group != NULL, "pre-condition" );
713 const char* name = surv_rate_group->name();
714 bool ret = true;
715 int prev_age = -1;
717 for (HeapRegion* curr = head;
718 curr != NULL;
719 curr = curr->get_next_young_region()) {
720 SurvRateGroup* group = curr->surv_rate_group();
721 if (group == NULL && !curr->is_survivor()) {
722 gclog_or_tty->print_cr("## %s: encountered NULL surv_rate_group", name);
723 ret = false;
724 }
726 if (surv_rate_group == group) {
727 int age = curr->age_in_surv_rate_group();
729 if (age < 0) {
730 gclog_or_tty->print_cr("## %s: encountered negative age", name);
731 ret = false;
732 }
734 if (age <= prev_age) {
735 gclog_or_tty->print_cr("## %s: region ages are not strictly increasing "
736 "(%d, %d)", name, age, prev_age);
737 ret = false;
738 }
739 prev_age = age;
740 }
741 }
743 return ret;
744 }
745 #endif // PRODUCT
747 void G1CollectorPolicy::record_full_collection_start() {
748 _full_collection_start_sec = os::elapsedTime();
749 // Release the future to-space so that it is available for compaction into.
750 _g1->set_full_collection();
751 }
753 void G1CollectorPolicy::record_full_collection_end() {
754 // Consider this like a collection pause for the purposes of allocation
755 // since last pause.
756 double end_sec = os::elapsedTime();
757 double full_gc_time_sec = end_sec - _full_collection_start_sec;
758 double full_gc_time_ms = full_gc_time_sec * 1000.0;
760 _trace_gen1_time_data.record_full_collection(full_gc_time_ms);
762 update_recent_gc_times(end_sec, full_gc_time_ms);
764 _g1->clear_full_collection();
766 // "Nuke" the heuristics that control the young/mixed GC
767 // transitions and make sure we start with young GCs after the Full GC.
768 set_gcs_are_young(true);
769 _last_young_gc = false;
770 clear_initiate_conc_mark_if_possible();
771 clear_during_initial_mark_pause();
772 _in_marking_window = false;
773 _in_marking_window_im = false;
775 _short_lived_surv_rate_group->start_adding_regions();
776 // also call this on any additional surv rate groups
778 record_survivor_regions(0, NULL, NULL);
780 _free_regions_at_end_of_collection = _g1->free_regions();
781 // Reset survivors SurvRateGroup.
782 _survivor_surv_rate_group->reset();
783 update_young_list_target_length();
784 _collectionSetChooser->clear();
785 }
787 void G1CollectorPolicy::record_stop_world_start() {
788 _stop_world_start = os::elapsedTime();
789 }
791 void G1CollectorPolicy::record_collection_pause_start(double start_time_sec,
792 size_t start_used) {
793 // We only need to do this here as the policy will only be applied
794 // to the GC we're about to start. so, no point is calculating this
795 // every time we calculate / recalculate the target young length.
796 update_survivors_policy();
798 assert(_g1->used() == _g1->recalculate_used(),
799 err_msg("sanity, used: "SIZE_FORMAT" recalculate_used: "SIZE_FORMAT,
800 _g1->used(), _g1->recalculate_used()));
802 double s_w_t_ms = (start_time_sec - _stop_world_start) * 1000.0;
803 _trace_gen0_time_data.record_start_collection(s_w_t_ms);
804 _stop_world_start = 0.0;
806 phase_times()->record_cur_collection_start_sec(start_time_sec);
807 _cur_collection_pause_used_at_start_bytes = start_used;
808 _cur_collection_pause_used_regions_at_start = _g1->used_regions();
809 _pending_cards = _g1->pending_card_num();
811 _collection_set_bytes_used_before = 0;
812 _bytes_copied_during_gc = 0;
814 YoungList* young_list = _g1->young_list();
815 _eden_bytes_before_gc = young_list->eden_used_bytes();
816 _survivor_bytes_before_gc = young_list->survivor_used_bytes();
817 _capacity_before_gc = _g1->capacity();
819 _last_gc_was_young = false;
821 // do that for any other surv rate groups
822 _short_lived_surv_rate_group->stop_adding_regions();
823 _survivors_age_table.clear();
825 assert( verify_young_ages(), "region age verification" );
826 }
828 void G1CollectorPolicy::record_concurrent_mark_init_end(double
829 mark_init_elapsed_time_ms) {
830 _during_marking = true;
831 assert(!initiate_conc_mark_if_possible(), "we should have cleared it by now");
832 clear_during_initial_mark_pause();
833 _cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms;
834 }
836 void G1CollectorPolicy::record_concurrent_mark_remark_start() {
837 _mark_remark_start_sec = os::elapsedTime();
838 _during_marking = false;
839 }
841 void G1CollectorPolicy::record_concurrent_mark_remark_end() {
842 double end_time_sec = os::elapsedTime();
843 double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
844 _concurrent_mark_remark_times_ms->add(elapsed_time_ms);
845 _cur_mark_stop_world_time_ms += elapsed_time_ms;
846 _prev_collection_pause_end_ms += elapsed_time_ms;
848 _mmu_tracker->add_pause(_mark_remark_start_sec, end_time_sec, true);
849 }
851 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() {
852 _mark_cleanup_start_sec = os::elapsedTime();
853 }
855 void G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
856 _last_young_gc = true;
857 _in_marking_window = false;
858 }
860 void G1CollectorPolicy::record_concurrent_pause() {
861 if (_stop_world_start > 0.0) {
862 double yield_ms = (os::elapsedTime() - _stop_world_start) * 1000.0;
863 _trace_gen0_time_data.record_yield_time(yield_ms);
864 }
865 }
867 bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) {
868 if (_g1->concurrent_mark()->cmThread()->during_cycle()) {
869 return false;
870 }
872 size_t marking_initiating_used_threshold =
873 (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
874 size_t cur_used_bytes = _g1->non_young_capacity_bytes();
875 size_t alloc_byte_size = alloc_word_size * HeapWordSize;
877 if ((cur_used_bytes + alloc_byte_size) > marking_initiating_used_threshold) {
878 if (gcs_are_young()) {
879 ergo_verbose5(ErgoConcCycles,
880 "request concurrent cycle initiation",
881 ergo_format_reason("occupancy higher than threshold")
882 ergo_format_byte("occupancy")
883 ergo_format_byte("allocation request")
884 ergo_format_byte_perc("threshold")
885 ergo_format_str("source"),
886 cur_used_bytes,
887 alloc_byte_size,
888 marking_initiating_used_threshold,
889 (double) InitiatingHeapOccupancyPercent,
890 source);
891 return true;
892 } else {
893 ergo_verbose5(ErgoConcCycles,
894 "do not request concurrent cycle initiation",
895 ergo_format_reason("still doing mixed collections")
896 ergo_format_byte("occupancy")
897 ergo_format_byte("allocation request")
898 ergo_format_byte_perc("threshold")
899 ergo_format_str("source"),
900 cur_used_bytes,
901 alloc_byte_size,
902 marking_initiating_used_threshold,
903 (double) InitiatingHeapOccupancyPercent,
904 source);
905 }
906 }
908 return false;
909 }
911 // Anything below that is considered to be zero
912 #define MIN_TIMER_GRANULARITY 0.0000001
914 void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms) {
915 double end_time_sec = os::elapsedTime();
916 assert(_cur_collection_pause_used_regions_at_start >= cset_region_length(),
917 "otherwise, the subtraction below does not make sense");
918 size_t rs_size =
919 _cur_collection_pause_used_regions_at_start - cset_region_length();
920 size_t cur_used_bytes = _g1->used();
921 assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
922 bool last_pause_included_initial_mark = false;
923 bool update_stats = !_g1->evacuation_failed();
925 #ifndef PRODUCT
926 if (G1YoungSurvRateVerbose) {
927 gclog_or_tty->print_cr("");
928 _short_lived_surv_rate_group->print();
929 // do that for any other surv rate groups too
930 }
931 #endif // PRODUCT
933 last_pause_included_initial_mark = during_initial_mark_pause();
934 if (last_pause_included_initial_mark) {
935 record_concurrent_mark_init_end(0.0);
936 } else if (!_last_young_gc && need_to_start_conc_mark("end of GC")) {
937 // Note: this might have already been set, if during the last
938 // pause we decided to start a cycle but at the beginning of
939 // this pause we decided to postpone it. That's OK.
940 set_initiate_conc_mark_if_possible();
941 }
943 _mmu_tracker->add_pause(end_time_sec - pause_time_ms/1000.0,
944 end_time_sec, false);
946 size_t freed_bytes =
947 _cur_collection_pause_used_at_start_bytes - cur_used_bytes;
948 size_t surviving_bytes = _collection_set_bytes_used_before - freed_bytes;
950 double survival_fraction =
951 (double)surviving_bytes/
952 (double)_collection_set_bytes_used_before;
954 if (update_stats) {
955 _trace_gen0_time_data.record_end_collection(pause_time_ms, phase_times());
956 // this is where we update the allocation rate of the application
957 double app_time_ms =
958 (phase_times()->cur_collection_start_sec() * 1000.0 - _prev_collection_pause_end_ms);
959 if (app_time_ms < MIN_TIMER_GRANULARITY) {
960 // This usually happens due to the timer not having the required
961 // granularity. Some Linuxes are the usual culprits.
962 // We'll just set it to something (arbitrarily) small.
963 app_time_ms = 1.0;
964 }
965 // We maintain the invariant that all objects allocated by mutator
966 // threads will be allocated out of eden regions. So, we can use
967 // the eden region number allocated since the previous GC to
968 // calculate the application's allocate rate. The only exception
969 // to that is humongous objects that are allocated separately. But
970 // given that humongous object allocations do not really affect
971 // either the pause's duration nor when the next pause will take
972 // place we can safely ignore them here.
973 uint regions_allocated = eden_cset_region_length();
974 double alloc_rate_ms = (double) regions_allocated / app_time_ms;
975 _alloc_rate_ms_seq->add(alloc_rate_ms);
977 double interval_ms =
978 (end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0;
979 update_recent_gc_times(end_time_sec, pause_time_ms);
980 _recent_avg_pause_time_ratio = _recent_gc_times_ms->sum()/interval_ms;
981 if (recent_avg_pause_time_ratio() < 0.0 ||
982 (recent_avg_pause_time_ratio() - 1.0 > 0.0)) {
983 #ifndef PRODUCT
984 // Dump info to allow post-facto debugging
985 gclog_or_tty->print_cr("recent_avg_pause_time_ratio() out of bounds");
986 gclog_or_tty->print_cr("-------------------------------------------");
987 gclog_or_tty->print_cr("Recent GC Times (ms):");
988 _recent_gc_times_ms->dump();
989 gclog_or_tty->print_cr("(End Time=%3.3f) Recent GC End Times (s):", end_time_sec);
990 _recent_prev_end_times_for_all_gcs_sec->dump();
991 gclog_or_tty->print_cr("GC = %3.3f, Interval = %3.3f, Ratio = %3.3f",
992 _recent_gc_times_ms->sum(), interval_ms, recent_avg_pause_time_ratio());
993 // In debug mode, terminate the JVM if the user wants to debug at this point.
994 assert(!G1FailOnFPError, "Debugging data for CR 6898948 has been dumped above");
995 #endif // !PRODUCT
996 // Clip ratio between 0.0 and 1.0, and continue. This will be fixed in
997 // CR 6902692 by redoing the manner in which the ratio is incrementally computed.
998 if (_recent_avg_pause_time_ratio < 0.0) {
999 _recent_avg_pause_time_ratio = 0.0;
1000 } else {
1001 assert(_recent_avg_pause_time_ratio - 1.0 > 0.0, "Ctl-point invariant");
1002 _recent_avg_pause_time_ratio = 1.0;
1003 }
1004 }
1005 }
1006 bool new_in_marking_window = _in_marking_window;
1007 bool new_in_marking_window_im = false;
1008 if (during_initial_mark_pause()) {
1009 new_in_marking_window = true;
1010 new_in_marking_window_im = true;
1011 }
1013 if (_last_young_gc) {
1014 // This is supposed to to be the "last young GC" before we start
1015 // doing mixed GCs. Here we decide whether to start mixed GCs or not.
1017 if (!last_pause_included_initial_mark) {
1018 if (next_gc_should_be_mixed("start mixed GCs",
1019 "do not start mixed GCs")) {
1020 set_gcs_are_young(false);
1021 }
1022 } else {
1023 ergo_verbose0(ErgoMixedGCs,
1024 "do not start mixed GCs",
1025 ergo_format_reason("concurrent cycle is about to start"));
1026 }
1027 _last_young_gc = false;
1028 }
1030 if (!_last_gc_was_young) {
1031 // This is a mixed GC. Here we decide whether to continue doing
1032 // mixed GCs or not.
1034 if (!next_gc_should_be_mixed("continue mixed GCs",
1035 "do not continue mixed GCs")) {
1036 set_gcs_are_young(true);
1037 }
1038 }
1040 _short_lived_surv_rate_group->start_adding_regions();
1041 // do that for any other surv rate groupsx
1043 if (update_stats) {
1044 double cost_per_card_ms = 0.0;
1045 if (_pending_cards > 0) {
1046 cost_per_card_ms = phase_times()->average_last_update_rs_time() / (double) _pending_cards;
1047 _cost_per_card_ms_seq->add(cost_per_card_ms);
1048 }
1050 size_t cards_scanned = _g1->cards_scanned();
1052 double cost_per_entry_ms = 0.0;
1053 if (cards_scanned > 10) {
1054 cost_per_entry_ms = phase_times()->average_last_scan_rs_time() / (double) cards_scanned;
1055 if (_last_gc_was_young) {
1056 _cost_per_entry_ms_seq->add(cost_per_entry_ms);
1057 } else {
1058 _mixed_cost_per_entry_ms_seq->add(cost_per_entry_ms);
1059 }
1060 }
1062 if (_max_rs_lengths > 0) {
1063 double cards_per_entry_ratio =
1064 (double) cards_scanned / (double) _max_rs_lengths;
1065 if (_last_gc_was_young) {
1066 _young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
1067 } else {
1068 _mixed_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
1069 }
1070 }
1072 // This is defensive. For a while _max_rs_lengths could get
1073 // smaller than _recorded_rs_lengths which was causing
1074 // rs_length_diff to get very large and mess up the RSet length
1075 // predictions. The reason was unsafe concurrent updates to the
1076 // _inc_cset_recorded_rs_lengths field which the code below guards
1077 // against (see CR 7118202). This bug has now been fixed (see CR
1078 // 7119027). However, I'm still worried that
1079 // _inc_cset_recorded_rs_lengths might still end up somewhat
1080 // inaccurate. The concurrent refinement thread calculates an
1081 // RSet's length concurrently with other CR threads updating it
1082 // which might cause it to calculate the length incorrectly (if,
1083 // say, it's in mid-coarsening). So I'll leave in the defensive
1084 // conditional below just in case.
1085 size_t rs_length_diff = 0;
1086 if (_max_rs_lengths > _recorded_rs_lengths) {
1087 rs_length_diff = _max_rs_lengths - _recorded_rs_lengths;
1088 }
1089 _rs_length_diff_seq->add((double) rs_length_diff);
1091 size_t copied_bytes = surviving_bytes;
1092 double cost_per_byte_ms = 0.0;
1093 if (copied_bytes > 0) {
1094 cost_per_byte_ms = phase_times()->average_last_obj_copy_time() / (double) copied_bytes;
1095 if (_in_marking_window) {
1096 _cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms);
1097 } else {
1098 _cost_per_byte_ms_seq->add(cost_per_byte_ms);
1099 }
1100 }
1102 double all_other_time_ms = pause_time_ms -
1103 (phase_times()->average_last_update_rs_time() + phase_times()->average_last_scan_rs_time()
1104 + phase_times()->average_last_obj_copy_time() + phase_times()->average_last_termination_time());
1106 double young_other_time_ms = 0.0;
1107 if (young_cset_region_length() > 0) {
1108 young_other_time_ms =
1109 phase_times()->young_cset_choice_time_ms() +
1110 phase_times()->young_free_cset_time_ms();
1111 _young_other_cost_per_region_ms_seq->add(young_other_time_ms /
1112 (double) young_cset_region_length());
1113 }
1114 double non_young_other_time_ms = 0.0;
1115 if (old_cset_region_length() > 0) {
1116 non_young_other_time_ms =
1117 phase_times()->non_young_cset_choice_time_ms() +
1118 phase_times()->non_young_free_cset_time_ms();
1120 _non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms /
1121 (double) old_cset_region_length());
1122 }
1124 double constant_other_time_ms = all_other_time_ms -
1125 (young_other_time_ms + non_young_other_time_ms);
1126 _constant_other_time_ms_seq->add(constant_other_time_ms);
1128 double survival_ratio = 0.0;
1129 if (_collection_set_bytes_used_before > 0) {
1130 survival_ratio = (double) _bytes_copied_during_gc /
1131 (double) _collection_set_bytes_used_before;
1132 }
1134 _pending_cards_seq->add((double) _pending_cards);
1135 _rs_lengths_seq->add((double) _max_rs_lengths);
1136 }
1138 _in_marking_window = new_in_marking_window;
1139 _in_marking_window_im = new_in_marking_window_im;
1140 _free_regions_at_end_of_collection = _g1->free_regions();
1141 update_young_list_target_length();
1143 // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
1144 double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
1145 adjust_concurrent_refinement(phase_times()->average_last_update_rs_time(),
1146 phase_times()->sum_last_update_rs_processed_buffers(), update_rs_time_goal_ms);
1148 _collectionSetChooser->verify();
1149 }
1151 #define EXT_SIZE_FORMAT "%.1f%s"
1152 #define EXT_SIZE_PARAMS(bytes) \
1153 byte_size_in_proper_unit((double)(bytes)), \
1154 proper_unit_for_byte_size((bytes))
1156 void G1CollectorPolicy::print_heap_transition() {
1157 _g1->print_size_transition(gclog_or_tty,
1158 _cur_collection_pause_used_at_start_bytes, _g1->used(), _g1->capacity());
1159 }
1161 void G1CollectorPolicy::print_detailed_heap_transition() {
1162 YoungList* young_list = _g1->young_list();
1163 size_t eden_bytes = young_list->eden_used_bytes();
1164 size_t survivor_bytes = young_list->survivor_used_bytes();
1165 size_t used_before_gc = _cur_collection_pause_used_at_start_bytes;
1166 size_t used = _g1->used();
1167 size_t capacity = _g1->capacity();
1168 size_t eden_capacity =
1169 (_young_list_target_length * HeapRegion::GrainBytes) - survivor_bytes;
1171 gclog_or_tty->print_cr(
1172 " [Eden: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->"EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT") "
1173 "Survivors: "EXT_SIZE_FORMAT"->"EXT_SIZE_FORMAT" "
1174 "Heap: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->"
1175 EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")]",
1176 EXT_SIZE_PARAMS(_eden_bytes_before_gc),
1177 EXT_SIZE_PARAMS(_prev_eden_capacity),
1178 EXT_SIZE_PARAMS(eden_bytes),
1179 EXT_SIZE_PARAMS(eden_capacity),
1180 EXT_SIZE_PARAMS(_survivor_bytes_before_gc),
1181 EXT_SIZE_PARAMS(survivor_bytes),
1182 EXT_SIZE_PARAMS(used_before_gc),
1183 EXT_SIZE_PARAMS(_capacity_before_gc),
1184 EXT_SIZE_PARAMS(used),
1185 EXT_SIZE_PARAMS(capacity));
1187 _prev_eden_capacity = eden_capacity;
1188 }
1190 void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time,
1191 double update_rs_processed_buffers,
1192 double goal_ms) {
1193 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
1194 ConcurrentG1Refine *cg1r = G1CollectedHeap::heap()->concurrent_g1_refine();
1196 if (G1UseAdaptiveConcRefinement) {
1197 const int k_gy = 3, k_gr = 6;
1198 const double inc_k = 1.1, dec_k = 0.9;
1200 int g = cg1r->green_zone();
1201 if (update_rs_time > goal_ms) {
1202 g = (int)(g * dec_k); // Can become 0, that's OK. That would mean a mutator-only processing.
1203 } else {
1204 if (update_rs_time < goal_ms && update_rs_processed_buffers > g) {
1205 g = (int)MAX2(g * inc_k, g + 1.0);
1206 }
1207 }
1208 // Change the refinement threads params
1209 cg1r->set_green_zone(g);
1210 cg1r->set_yellow_zone(g * k_gy);
1211 cg1r->set_red_zone(g * k_gr);
1212 cg1r->reinitialize_threads();
1214 int processing_threshold_delta = MAX2((int)(cg1r->green_zone() * sigma()), 1);
1215 int processing_threshold = MIN2(cg1r->green_zone() + processing_threshold_delta,
1216 cg1r->yellow_zone());
1217 // Change the barrier params
1218 dcqs.set_process_completed_threshold(processing_threshold);
1219 dcqs.set_max_completed_queue(cg1r->red_zone());
1220 }
1222 int curr_queue_size = dcqs.completed_buffers_num();
1223 if (curr_queue_size >= cg1r->yellow_zone()) {
1224 dcqs.set_completed_queue_padding(curr_queue_size);
1225 } else {
1226 dcqs.set_completed_queue_padding(0);
1227 }
1228 dcqs.notify_if_necessary();
1229 }
1231 double
1232 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards,
1233 size_t scanned_cards) {
1234 return
1235 predict_rs_update_time_ms(pending_cards) +
1236 predict_rs_scan_time_ms(scanned_cards) +
1237 predict_constant_other_time_ms();
1238 }
1240 double
1241 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) {
1242 size_t rs_length = predict_rs_length_diff();
1243 size_t card_num;
1244 if (gcs_are_young()) {
1245 card_num = predict_young_card_num(rs_length);
1246 } else {
1247 card_num = predict_non_young_card_num(rs_length);
1248 }
1249 return predict_base_elapsed_time_ms(pending_cards, card_num);
1250 }
1252 size_t G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) {
1253 size_t bytes_to_copy;
1254 if (hr->is_marked())
1255 bytes_to_copy = hr->max_live_bytes();
1256 else {
1257 assert(hr->is_young() && hr->age_in_surv_rate_group() != -1, "invariant");
1258 int age = hr->age_in_surv_rate_group();
1259 double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group());
1260 bytes_to_copy = (size_t) ((double) hr->used() * yg_surv_rate);
1261 }
1262 return bytes_to_copy;
1263 }
1265 double
1266 G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr,
1267 bool for_young_gc) {
1268 size_t rs_length = hr->rem_set()->occupied();
1269 size_t card_num;
1271 // Predicting the number of cards is based on which type of GC
1272 // we're predicting for.
1273 if (for_young_gc) {
1274 card_num = predict_young_card_num(rs_length);
1275 } else {
1276 card_num = predict_non_young_card_num(rs_length);
1277 }
1278 size_t bytes_to_copy = predict_bytes_to_copy(hr);
1280 double region_elapsed_time_ms =
1281 predict_rs_scan_time_ms(card_num) +
1282 predict_object_copy_time_ms(bytes_to_copy);
1284 // The prediction of the "other" time for this region is based
1285 // upon the region type and NOT the GC type.
1286 if (hr->is_young()) {
1287 region_elapsed_time_ms += predict_young_other_time_ms(1);
1288 } else {
1289 region_elapsed_time_ms += predict_non_young_other_time_ms(1);
1290 }
1291 return region_elapsed_time_ms;
1292 }
1294 void
1295 G1CollectorPolicy::init_cset_region_lengths(uint eden_cset_region_length,
1296 uint survivor_cset_region_length) {
1297 _eden_cset_region_length = eden_cset_region_length;
1298 _survivor_cset_region_length = survivor_cset_region_length;
1299 _old_cset_region_length = 0;
1300 }
1302 void G1CollectorPolicy::set_recorded_rs_lengths(size_t rs_lengths) {
1303 _recorded_rs_lengths = rs_lengths;
1304 }
1306 void G1CollectorPolicy::update_recent_gc_times(double end_time_sec,
1307 double elapsed_ms) {
1308 _recent_gc_times_ms->add(elapsed_ms);
1309 _recent_prev_end_times_for_all_gcs_sec->add(end_time_sec);
1310 _prev_collection_pause_end_ms = end_time_sec * 1000.0;
1311 }
1313 size_t G1CollectorPolicy::expansion_amount() {
1314 double recent_gc_overhead = recent_avg_pause_time_ratio() * 100.0;
1315 double threshold = _gc_overhead_perc;
1316 if (recent_gc_overhead > threshold) {
1317 // We will double the existing space, or take
1318 // G1ExpandByPercentOfAvailable % of the available expansion
1319 // space, whichever is smaller, bounded below by a minimum
1320 // expansion (unless that's all that's left.)
1321 const size_t min_expand_bytes = 1*M;
1322 size_t reserved_bytes = _g1->max_capacity();
1323 size_t committed_bytes = _g1->capacity();
1324 size_t uncommitted_bytes = reserved_bytes - committed_bytes;
1325 size_t expand_bytes;
1326 size_t expand_bytes_via_pct =
1327 uncommitted_bytes * G1ExpandByPercentOfAvailable / 100;
1328 expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes);
1329 expand_bytes = MAX2(expand_bytes, min_expand_bytes);
1330 expand_bytes = MIN2(expand_bytes, uncommitted_bytes);
1332 ergo_verbose5(ErgoHeapSizing,
1333 "attempt heap expansion",
1334 ergo_format_reason("recent GC overhead higher than "
1335 "threshold after GC")
1336 ergo_format_perc("recent GC overhead")
1337 ergo_format_perc("threshold")
1338 ergo_format_byte("uncommitted")
1339 ergo_format_byte_perc("calculated expansion amount"),
1340 recent_gc_overhead, threshold,
1341 uncommitted_bytes,
1342 expand_bytes_via_pct, (double) G1ExpandByPercentOfAvailable);
1344 return expand_bytes;
1345 } else {
1346 return 0;
1347 }
1348 }
1350 void G1CollectorPolicy::print_tracing_info() const {
1351 _trace_gen0_time_data.print();
1352 _trace_gen1_time_data.print();
1353 }
1355 void G1CollectorPolicy::print_yg_surv_rate_info() const {
1356 #ifndef PRODUCT
1357 _short_lived_surv_rate_group->print_surv_rate_summary();
1358 // add this call for any other surv rate groups
1359 #endif // PRODUCT
1360 }
1362 #ifndef PRODUCT
1363 // for debugging, bit of a hack...
1364 static char*
1365 region_num_to_mbs(int length) {
1366 static char buffer[64];
1367 double bytes = (double) (length * HeapRegion::GrainBytes);
1368 double mbs = bytes / (double) (1024 * 1024);
1369 sprintf(buffer, "%7.2lfMB", mbs);
1370 return buffer;
1371 }
1372 #endif // PRODUCT
1374 uint G1CollectorPolicy::max_regions(int purpose) {
1375 switch (purpose) {
1376 case GCAllocForSurvived:
1377 return _max_survivor_regions;
1378 case GCAllocForTenured:
1379 return REGIONS_UNLIMITED;
1380 default:
1381 ShouldNotReachHere();
1382 return REGIONS_UNLIMITED;
1383 };
1384 }
1386 void G1CollectorPolicy::update_max_gc_locker_expansion() {
1387 uint expansion_region_num = 0;
1388 if (GCLockerEdenExpansionPercent > 0) {
1389 double perc = (double) GCLockerEdenExpansionPercent / 100.0;
1390 double expansion_region_num_d = perc * (double) _young_list_target_length;
1391 // We use ceiling so that if expansion_region_num_d is > 0.0 (but
1392 // less than 1.0) we'll get 1.
1393 expansion_region_num = (uint) ceil(expansion_region_num_d);
1394 } else {
1395 assert(expansion_region_num == 0, "sanity");
1396 }
1397 _young_list_max_length = _young_list_target_length + expansion_region_num;
1398 assert(_young_list_target_length <= _young_list_max_length, "post-condition");
1399 }
1401 // Calculates survivor space parameters.
1402 void G1CollectorPolicy::update_survivors_policy() {
1403 double max_survivor_regions_d =
1404 (double) _young_list_target_length / (double) SurvivorRatio;
1405 // We use ceiling so that if max_survivor_regions_d is > 0.0 (but
1406 // smaller than 1.0) we'll get 1.
1407 _max_survivor_regions = (uint) ceil(max_survivor_regions_d);
1409 _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(
1410 HeapRegion::GrainWords * _max_survivor_regions);
1411 }
1413 bool G1CollectorPolicy::force_initial_mark_if_outside_cycle(
1414 GCCause::Cause gc_cause) {
1415 bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
1416 if (!during_cycle) {
1417 ergo_verbose1(ErgoConcCycles,
1418 "request concurrent cycle initiation",
1419 ergo_format_reason("requested by GC cause")
1420 ergo_format_str("GC cause"),
1421 GCCause::to_string(gc_cause));
1422 set_initiate_conc_mark_if_possible();
1423 return true;
1424 } else {
1425 ergo_verbose1(ErgoConcCycles,
1426 "do not request concurrent cycle initiation",
1427 ergo_format_reason("concurrent cycle already in progress")
1428 ergo_format_str("GC cause"),
1429 GCCause::to_string(gc_cause));
1430 return false;
1431 }
1432 }
1434 void
1435 G1CollectorPolicy::decide_on_conc_mark_initiation() {
1436 // We are about to decide on whether this pause will be an
1437 // initial-mark pause.
1439 // First, during_initial_mark_pause() should not be already set. We
1440 // will set it here if we have to. However, it should be cleared by
1441 // the end of the pause (it's only set for the duration of an
1442 // initial-mark pause).
1443 assert(!during_initial_mark_pause(), "pre-condition");
1445 if (initiate_conc_mark_if_possible()) {
1446 // We had noticed on a previous pause that the heap occupancy has
1447 // gone over the initiating threshold and we should start a
1448 // concurrent marking cycle. So we might initiate one.
1450 bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
1451 if (!during_cycle) {
1452 // The concurrent marking thread is not "during a cycle", i.e.,
1453 // it has completed the last one. So we can go ahead and
1454 // initiate a new cycle.
1456 set_during_initial_mark_pause();
1457 // We do not allow mixed GCs during marking.
1458 if (!gcs_are_young()) {
1459 set_gcs_are_young(true);
1460 ergo_verbose0(ErgoMixedGCs,
1461 "end mixed GCs",
1462 ergo_format_reason("concurrent cycle is about to start"));
1463 }
1465 // And we can now clear initiate_conc_mark_if_possible() as
1466 // we've already acted on it.
1467 clear_initiate_conc_mark_if_possible();
1469 ergo_verbose0(ErgoConcCycles,
1470 "initiate concurrent cycle",
1471 ergo_format_reason("concurrent cycle initiation requested"));
1472 } else {
1473 // The concurrent marking thread is still finishing up the
1474 // previous cycle. If we start one right now the two cycles
1475 // overlap. In particular, the concurrent marking thread might
1476 // be in the process of clearing the next marking bitmap (which
1477 // we will use for the next cycle if we start one). Starting a
1478 // cycle now will be bad given that parts of the marking
1479 // information might get cleared by the marking thread. And we
1480 // cannot wait for the marking thread to finish the cycle as it
1481 // periodically yields while clearing the next marking bitmap
1482 // and, if it's in a yield point, it's waiting for us to
1483 // finish. So, at this point we will not start a cycle and we'll
1484 // let the concurrent marking thread complete the last one.
1485 ergo_verbose0(ErgoConcCycles,
1486 "do not initiate concurrent cycle",
1487 ergo_format_reason("concurrent cycle already in progress"));
1488 }
1489 }
1490 }
1492 class KnownGarbageClosure: public HeapRegionClosure {
1493 G1CollectedHeap* _g1h;
1494 CollectionSetChooser* _hrSorted;
1496 public:
1497 KnownGarbageClosure(CollectionSetChooser* hrSorted) :
1498 _g1h(G1CollectedHeap::heap()), _hrSorted(hrSorted) { }
1500 bool doHeapRegion(HeapRegion* r) {
1501 // We only include humongous regions in collection
1502 // sets when concurrent mark shows that their contained object is
1503 // unreachable.
1505 // Do we have any marking information for this region?
1506 if (r->is_marked()) {
1507 // We will skip any region that's currently used as an old GC
1508 // alloc region (we should not consider those for collection
1509 // before we fill them up).
1510 if (_hrSorted->should_add(r) && !_g1h->is_old_gc_alloc_region(r)) {
1511 _hrSorted->add_region(r);
1512 }
1513 }
1514 return false;
1515 }
1516 };
1518 class ParKnownGarbageHRClosure: public HeapRegionClosure {
1519 G1CollectedHeap* _g1h;
1520 CSetChooserParUpdater _cset_updater;
1522 public:
1523 ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted,
1524 uint chunk_size) :
1525 _g1h(G1CollectedHeap::heap()),
1526 _cset_updater(hrSorted, true /* parallel */, chunk_size) { }
1528 bool doHeapRegion(HeapRegion* r) {
1529 // Do we have any marking information for this region?
1530 if (r->is_marked()) {
1531 // We will skip any region that's currently used as an old GC
1532 // alloc region (we should not consider those for collection
1533 // before we fill them up).
1534 if (_cset_updater.should_add(r) && !_g1h->is_old_gc_alloc_region(r)) {
1535 _cset_updater.add_region(r);
1536 }
1537 }
1538 return false;
1539 }
1540 };
1542 class ParKnownGarbageTask: public AbstractGangTask {
1543 CollectionSetChooser* _hrSorted;
1544 uint _chunk_size;
1545 G1CollectedHeap* _g1;
1546 public:
1547 ParKnownGarbageTask(CollectionSetChooser* hrSorted, uint chunk_size) :
1548 AbstractGangTask("ParKnownGarbageTask"),
1549 _hrSorted(hrSorted), _chunk_size(chunk_size),
1550 _g1(G1CollectedHeap::heap()) { }
1552 void work(uint worker_id) {
1553 ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, _chunk_size);
1555 // Back to zero for the claim value.
1556 _g1->heap_region_par_iterate_chunked(&parKnownGarbageCl, worker_id,
1557 _g1->workers()->active_workers(),
1558 HeapRegion::InitialClaimValue);
1559 }
1560 };
1562 void
1563 G1CollectorPolicy::record_concurrent_mark_cleanup_end(int no_of_gc_threads) {
1564 _collectionSetChooser->clear();
1566 uint region_num = _g1->n_regions();
1567 if (G1CollectedHeap::use_parallel_gc_threads()) {
1568 const uint OverpartitionFactor = 4;
1569 uint WorkUnit;
1570 // The use of MinChunkSize = 8 in the original code
1571 // causes some assertion failures when the total number of
1572 // region is less than 8. The code here tries to fix that.
1573 // Should the original code also be fixed?
1574 if (no_of_gc_threads > 0) {
1575 const uint MinWorkUnit = MAX2(region_num / no_of_gc_threads, 1U);
1576 WorkUnit = MAX2(region_num / (no_of_gc_threads * OverpartitionFactor),
1577 MinWorkUnit);
1578 } else {
1579 assert(no_of_gc_threads > 0,
1580 "The active gc workers should be greater than 0");
1581 // In a product build do something reasonable to avoid a crash.
1582 const uint MinWorkUnit = MAX2(region_num / (uint) ParallelGCThreads, 1U);
1583 WorkUnit =
1584 MAX2(region_num / (uint) (ParallelGCThreads * OverpartitionFactor),
1585 MinWorkUnit);
1586 }
1587 _collectionSetChooser->prepare_for_par_region_addition(_g1->n_regions(),
1588 WorkUnit);
1589 ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser,
1590 (int) WorkUnit);
1591 _g1->workers()->run_task(&parKnownGarbageTask);
1593 assert(_g1->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
1594 "sanity check");
1595 } else {
1596 KnownGarbageClosure knownGarbagecl(_collectionSetChooser);
1597 _g1->heap_region_iterate(&knownGarbagecl);
1598 }
1600 _collectionSetChooser->sort_regions();
1602 double end_sec = os::elapsedTime();
1603 double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0;
1604 _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms);
1605 _cur_mark_stop_world_time_ms += elapsed_time_ms;
1606 _prev_collection_pause_end_ms += elapsed_time_ms;
1607 _mmu_tracker->add_pause(_mark_cleanup_start_sec, end_sec, true);
1608 }
1610 // Add the heap region at the head of the non-incremental collection set
1611 void G1CollectorPolicy::add_old_region_to_cset(HeapRegion* hr) {
1612 assert(_inc_cset_build_state == Active, "Precondition");
1613 assert(!hr->is_young(), "non-incremental add of young region");
1615 assert(!hr->in_collection_set(), "should not already be in the CSet");
1616 hr->set_in_collection_set(true);
1617 hr->set_next_in_collection_set(_collection_set);
1618 _collection_set = hr;
1619 _collection_set_bytes_used_before += hr->used();
1620 _g1->register_region_with_in_cset_fast_test(hr);
1621 size_t rs_length = hr->rem_set()->occupied();
1622 _recorded_rs_lengths += rs_length;
1623 _old_cset_region_length += 1;
1624 }
1626 // Initialize the per-collection-set information
1627 void G1CollectorPolicy::start_incremental_cset_building() {
1628 assert(_inc_cset_build_state == Inactive, "Precondition");
1630 _inc_cset_head = NULL;
1631 _inc_cset_tail = NULL;
1632 _inc_cset_bytes_used_before = 0;
1634 _inc_cset_max_finger = 0;
1635 _inc_cset_recorded_rs_lengths = 0;
1636 _inc_cset_recorded_rs_lengths_diffs = 0;
1637 _inc_cset_predicted_elapsed_time_ms = 0.0;
1638 _inc_cset_predicted_elapsed_time_ms_diffs = 0.0;
1639 _inc_cset_build_state = Active;
1640 }
1642 void G1CollectorPolicy::finalize_incremental_cset_building() {
1643 assert(_inc_cset_build_state == Active, "Precondition");
1644 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
1646 // The two "main" fields, _inc_cset_recorded_rs_lengths and
1647 // _inc_cset_predicted_elapsed_time_ms, are updated by the thread
1648 // that adds a new region to the CSet. Further updates by the
1649 // concurrent refinement thread that samples the young RSet lengths
1650 // are accumulated in the *_diffs fields. Here we add the diffs to
1651 // the "main" fields.
1653 if (_inc_cset_recorded_rs_lengths_diffs >= 0) {
1654 _inc_cset_recorded_rs_lengths += _inc_cset_recorded_rs_lengths_diffs;
1655 } else {
1656 // This is defensive. The diff should in theory be always positive
1657 // as RSets can only grow between GCs. However, given that we
1658 // sample their size concurrently with other threads updating them
1659 // it's possible that we might get the wrong size back, which
1660 // could make the calculations somewhat inaccurate.
1661 size_t diffs = (size_t) (-_inc_cset_recorded_rs_lengths_diffs);
1662 if (_inc_cset_recorded_rs_lengths >= diffs) {
1663 _inc_cset_recorded_rs_lengths -= diffs;
1664 } else {
1665 _inc_cset_recorded_rs_lengths = 0;
1666 }
1667 }
1668 _inc_cset_predicted_elapsed_time_ms +=
1669 _inc_cset_predicted_elapsed_time_ms_diffs;
1671 _inc_cset_recorded_rs_lengths_diffs = 0;
1672 _inc_cset_predicted_elapsed_time_ms_diffs = 0.0;
1673 }
1675 void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length) {
1676 // This routine is used when:
1677 // * adding survivor regions to the incremental cset at the end of an
1678 // evacuation pause,
1679 // * adding the current allocation region to the incremental cset
1680 // when it is retired, and
1681 // * updating existing policy information for a region in the
1682 // incremental cset via young list RSet sampling.
1683 // Therefore this routine may be called at a safepoint by the
1684 // VM thread, or in-between safepoints by mutator threads (when
1685 // retiring the current allocation region) or a concurrent
1686 // refine thread (RSet sampling).
1688 double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, gcs_are_young());
1689 size_t used_bytes = hr->used();
1690 _inc_cset_recorded_rs_lengths += rs_length;
1691 _inc_cset_predicted_elapsed_time_ms += region_elapsed_time_ms;
1692 _inc_cset_bytes_used_before += used_bytes;
1694 // Cache the values we have added to the aggregated informtion
1695 // in the heap region in case we have to remove this region from
1696 // the incremental collection set, or it is updated by the
1697 // rset sampling code
1698 hr->set_recorded_rs_length(rs_length);
1699 hr->set_predicted_elapsed_time_ms(region_elapsed_time_ms);
1700 }
1702 void G1CollectorPolicy::update_incremental_cset_info(HeapRegion* hr,
1703 size_t new_rs_length) {
1704 // Update the CSet information that is dependent on the new RS length
1705 assert(hr->is_young(), "Precondition");
1706 assert(!SafepointSynchronize::is_at_safepoint(),
1707 "should not be at a safepoint");
1709 // We could have updated _inc_cset_recorded_rs_lengths and
1710 // _inc_cset_predicted_elapsed_time_ms directly but we'd need to do
1711 // that atomically, as this code is executed by a concurrent
1712 // refinement thread, potentially concurrently with a mutator thread
1713 // allocating a new region and also updating the same fields. To
1714 // avoid the atomic operations we accumulate these updates on two
1715 // separate fields (*_diffs) and we'll just add them to the "main"
1716 // fields at the start of a GC.
1718 ssize_t old_rs_length = (ssize_t) hr->recorded_rs_length();
1719 ssize_t rs_lengths_diff = (ssize_t) new_rs_length - old_rs_length;
1720 _inc_cset_recorded_rs_lengths_diffs += rs_lengths_diff;
1722 double old_elapsed_time_ms = hr->predicted_elapsed_time_ms();
1723 double new_region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, gcs_are_young());
1724 double elapsed_ms_diff = new_region_elapsed_time_ms - old_elapsed_time_ms;
1725 _inc_cset_predicted_elapsed_time_ms_diffs += elapsed_ms_diff;
1727 hr->set_recorded_rs_length(new_rs_length);
1728 hr->set_predicted_elapsed_time_ms(new_region_elapsed_time_ms);
1729 }
1731 void G1CollectorPolicy::add_region_to_incremental_cset_common(HeapRegion* hr) {
1732 assert(hr->is_young(), "invariant");
1733 assert(hr->young_index_in_cset() > -1, "should have already been set");
1734 assert(_inc_cset_build_state == Active, "Precondition");
1736 // We need to clear and set the cached recorded/cached collection set
1737 // information in the heap region here (before the region gets added
1738 // to the collection set). An individual heap region's cached values
1739 // are calculated, aggregated with the policy collection set info,
1740 // and cached in the heap region here (initially) and (subsequently)
1741 // by the Young List sampling code.
1743 size_t rs_length = hr->rem_set()->occupied();
1744 add_to_incremental_cset_info(hr, rs_length);
1746 HeapWord* hr_end = hr->end();
1747 _inc_cset_max_finger = MAX2(_inc_cset_max_finger, hr_end);
1749 assert(!hr->in_collection_set(), "invariant");
1750 hr->set_in_collection_set(true);
1751 assert( hr->next_in_collection_set() == NULL, "invariant");
1753 _g1->register_region_with_in_cset_fast_test(hr);
1754 }
1756 // Add the region at the RHS of the incremental cset
1757 void G1CollectorPolicy::add_region_to_incremental_cset_rhs(HeapRegion* hr) {
1758 // We should only ever be appending survivors at the end of a pause
1759 assert( hr->is_survivor(), "Logic");
1761 // Do the 'common' stuff
1762 add_region_to_incremental_cset_common(hr);
1764 // Now add the region at the right hand side
1765 if (_inc_cset_tail == NULL) {
1766 assert(_inc_cset_head == NULL, "invariant");
1767 _inc_cset_head = hr;
1768 } else {
1769 _inc_cset_tail->set_next_in_collection_set(hr);
1770 }
1771 _inc_cset_tail = hr;
1772 }
1774 // Add the region to the LHS of the incremental cset
1775 void G1CollectorPolicy::add_region_to_incremental_cset_lhs(HeapRegion* hr) {
1776 // Survivors should be added to the RHS at the end of a pause
1777 assert(!hr->is_survivor(), "Logic");
1779 // Do the 'common' stuff
1780 add_region_to_incremental_cset_common(hr);
1782 // Add the region at the left hand side
1783 hr->set_next_in_collection_set(_inc_cset_head);
1784 if (_inc_cset_head == NULL) {
1785 assert(_inc_cset_tail == NULL, "Invariant");
1786 _inc_cset_tail = hr;
1787 }
1788 _inc_cset_head = hr;
1789 }
1791 #ifndef PRODUCT
1792 void G1CollectorPolicy::print_collection_set(HeapRegion* list_head, outputStream* st) {
1793 assert(list_head == inc_cset_head() || list_head == collection_set(), "must be");
1795 st->print_cr("\nCollection_set:");
1796 HeapRegion* csr = list_head;
1797 while (csr != NULL) {
1798 HeapRegion* next = csr->next_in_collection_set();
1799 assert(csr->in_collection_set(), "bad CS");
1800 st->print_cr(" "HR_FORMAT", P: "PTR_FORMAT "N: "PTR_FORMAT", age: %4d",
1801 HR_FORMAT_PARAMS(csr),
1802 csr->prev_top_at_mark_start(), csr->next_top_at_mark_start(),
1803 csr->age_in_surv_rate_group_cond());
1804 csr = next;
1805 }
1806 }
1807 #endif // !PRODUCT
1809 double G1CollectorPolicy::reclaimable_bytes_perc(size_t reclaimable_bytes) {
1810 // Returns the given amount of reclaimable bytes (that represents
1811 // the amount of reclaimable space still to be collected) as a
1812 // percentage of the current heap capacity.
1813 size_t capacity_bytes = _g1->capacity();
1814 return (double) reclaimable_bytes * 100.0 / (double) capacity_bytes;
1815 }
1817 bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str,
1818 const char* false_action_str) {
1819 CollectionSetChooser* cset_chooser = _collectionSetChooser;
1820 if (cset_chooser->is_empty()) {
1821 ergo_verbose0(ErgoMixedGCs,
1822 false_action_str,
1823 ergo_format_reason("candidate old regions not available"));
1824 return false;
1825 }
1827 // Is the amount of uncollected reclaimable space above G1HeapWastePercent?
1828 size_t reclaimable_bytes = cset_chooser->remaining_reclaimable_bytes();
1829 double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes);
1830 double threshold = (double) G1HeapWastePercent;
1831 if (reclaimable_perc <= threshold) {
1832 ergo_verbose4(ErgoMixedGCs,
1833 false_action_str,
1834 ergo_format_reason("reclaimable percentage not over threshold")
1835 ergo_format_region("candidate old regions")
1836 ergo_format_byte_perc("reclaimable")
1837 ergo_format_perc("threshold"),
1838 cset_chooser->remaining_regions(),
1839 reclaimable_bytes,
1840 reclaimable_perc, threshold);
1841 return false;
1842 }
1844 ergo_verbose4(ErgoMixedGCs,
1845 true_action_str,
1846 ergo_format_reason("candidate old regions available")
1847 ergo_format_region("candidate old regions")
1848 ergo_format_byte_perc("reclaimable")
1849 ergo_format_perc("threshold"),
1850 cset_chooser->remaining_regions(),
1851 reclaimable_bytes,
1852 reclaimable_perc, threshold);
1853 return true;
1854 }
1856 uint G1CollectorPolicy::calc_min_old_cset_length() {
1857 // The min old CSet region bound is based on the maximum desired
1858 // number of mixed GCs after a cycle. I.e., even if some old regions
1859 // look expensive, we should add them to the CSet anyway to make
1860 // sure we go through the available old regions in no more than the
1861 // maximum desired number of mixed GCs.
1862 //
1863 // The calculation is based on the number of marked regions we added
1864 // to the CSet chooser in the first place, not how many remain, so
1865 // that the result is the same during all mixed GCs that follow a cycle.
1867 const size_t region_num = (size_t) _collectionSetChooser->length();
1868 const size_t gc_num = (size_t) MAX2(G1MixedGCCountTarget, (uintx) 1);
1869 size_t result = region_num / gc_num;
1870 // emulate ceiling
1871 if (result * gc_num < region_num) {
1872 result += 1;
1873 }
1874 return (uint) result;
1875 }
1877 uint G1CollectorPolicy::calc_max_old_cset_length() {
1878 // The max old CSet region bound is based on the threshold expressed
1879 // as a percentage of the heap size. I.e., it should bound the
1880 // number of old regions added to the CSet irrespective of how many
1881 // of them are available.
1883 G1CollectedHeap* g1h = G1CollectedHeap::heap();
1884 const size_t region_num = g1h->n_regions();
1885 const size_t perc = (size_t) G1OldCSetRegionThresholdPercent;
1886 size_t result = region_num * perc / 100;
1887 // emulate ceiling
1888 if (100 * result < region_num * perc) {
1889 result += 1;
1890 }
1891 return (uint) result;
1892 }
1895 void G1CollectorPolicy::finalize_cset(double target_pause_time_ms) {
1896 double young_start_time_sec = os::elapsedTime();
1898 YoungList* young_list = _g1->young_list();
1899 finalize_incremental_cset_building();
1901 guarantee(target_pause_time_ms > 0.0,
1902 err_msg("target_pause_time_ms = %1.6lf should be positive",
1903 target_pause_time_ms));
1904 guarantee(_collection_set == NULL, "Precondition");
1906 double base_time_ms = predict_base_elapsed_time_ms(_pending_cards);
1907 double predicted_pause_time_ms = base_time_ms;
1908 double time_remaining_ms = MAX2(target_pause_time_ms - base_time_ms, 0.0);
1910 ergo_verbose4(ErgoCSetConstruction | ErgoHigh,
1911 "start choosing CSet",
1912 ergo_format_size("_pending_cards")
1913 ergo_format_ms("predicted base time")
1914 ergo_format_ms("remaining time")
1915 ergo_format_ms("target pause time"),
1916 _pending_cards, base_time_ms, time_remaining_ms, target_pause_time_ms);
1918 _last_gc_was_young = gcs_are_young() ? true : false;
1920 if (_last_gc_was_young) {
1921 _trace_gen0_time_data.increment_young_collection_count();
1922 } else {
1923 _trace_gen0_time_data.increment_mixed_collection_count();
1924 }
1926 // The young list is laid with the survivor regions from the previous
1927 // pause are appended to the RHS of the young list, i.e.
1928 // [Newly Young Regions ++ Survivors from last pause].
1930 uint survivor_region_length = young_list->survivor_length();
1931 uint eden_region_length = young_list->length() - survivor_region_length;
1932 init_cset_region_lengths(eden_region_length, survivor_region_length);
1934 HeapRegion* hr = young_list->first_survivor_region();
1935 while (hr != NULL) {
1936 assert(hr->is_survivor(), "badly formed young list");
1937 hr->set_young();
1938 hr = hr->get_next_young_region();
1939 }
1941 // Clear the fields that point to the survivor list - they are all young now.
1942 young_list->clear_survivors();
1944 _collection_set = _inc_cset_head;
1945 _collection_set_bytes_used_before = _inc_cset_bytes_used_before;
1946 time_remaining_ms = MAX2(time_remaining_ms - _inc_cset_predicted_elapsed_time_ms, 0.0);
1947 predicted_pause_time_ms += _inc_cset_predicted_elapsed_time_ms;
1949 ergo_verbose3(ErgoCSetConstruction | ErgoHigh,
1950 "add young regions to CSet",
1951 ergo_format_region("eden")
1952 ergo_format_region("survivors")
1953 ergo_format_ms("predicted young region time"),
1954 eden_region_length, survivor_region_length,
1955 _inc_cset_predicted_elapsed_time_ms);
1957 // The number of recorded young regions is the incremental
1958 // collection set's current size
1959 set_recorded_rs_lengths(_inc_cset_recorded_rs_lengths);
1961 double young_end_time_sec = os::elapsedTime();
1962 phase_times()->record_young_cset_choice_time_ms((young_end_time_sec - young_start_time_sec) * 1000.0);
1964 // Set the start of the non-young choice time.
1965 double non_young_start_time_sec = young_end_time_sec;
1967 if (!gcs_are_young()) {
1968 CollectionSetChooser* cset_chooser = _collectionSetChooser;
1969 cset_chooser->verify();
1970 const uint min_old_cset_length = calc_min_old_cset_length();
1971 const uint max_old_cset_length = calc_max_old_cset_length();
1973 uint expensive_region_num = 0;
1974 bool check_time_remaining = adaptive_young_list_length();
1976 HeapRegion* hr = cset_chooser->peek();
1977 while (hr != NULL) {
1978 if (old_cset_region_length() >= max_old_cset_length) {
1979 // Added maximum number of old regions to the CSet.
1980 ergo_verbose2(ErgoCSetConstruction,
1981 "finish adding old regions to CSet",
1982 ergo_format_reason("old CSet region num reached max")
1983 ergo_format_region("old")
1984 ergo_format_region("max"),
1985 old_cset_region_length(), max_old_cset_length);
1986 break;
1987 }
1990 // Stop adding regions if the remaining reclaimable space is
1991 // not above G1HeapWastePercent.
1992 size_t reclaimable_bytes = cset_chooser->remaining_reclaimable_bytes();
1993 double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes);
1994 double threshold = (double) G1HeapWastePercent;
1995 if (reclaimable_perc <= threshold) {
1996 // We've added enough old regions that the amount of uncollected
1997 // reclaimable space is at or below the waste threshold. Stop
1998 // adding old regions to the CSet.
1999 ergo_verbose5(ErgoCSetConstruction,
2000 "finish adding old regions to CSet",
2001 ergo_format_reason("reclaimable percentage not over threshold")
2002 ergo_format_region("old")
2003 ergo_format_region("max")
2004 ergo_format_byte_perc("reclaimable")
2005 ergo_format_perc("threshold"),
2006 old_cset_region_length(),
2007 max_old_cset_length,
2008 reclaimable_bytes,
2009 reclaimable_perc, threshold);
2010 break;
2011 }
2013 double predicted_time_ms = predict_region_elapsed_time_ms(hr, gcs_are_young());
2014 if (check_time_remaining) {
2015 if (predicted_time_ms > time_remaining_ms) {
2016 // Too expensive for the current CSet.
2018 if (old_cset_region_length() >= min_old_cset_length) {
2019 // We have added the minimum number of old regions to the CSet,
2020 // we are done with this CSet.
2021 ergo_verbose4(ErgoCSetConstruction,
2022 "finish adding old regions to CSet",
2023 ergo_format_reason("predicted time is too high")
2024 ergo_format_ms("predicted time")
2025 ergo_format_ms("remaining time")
2026 ergo_format_region("old")
2027 ergo_format_region("min"),
2028 predicted_time_ms, time_remaining_ms,
2029 old_cset_region_length(), min_old_cset_length);
2030 break;
2031 }
2033 // We'll add it anyway given that we haven't reached the
2034 // minimum number of old regions.
2035 expensive_region_num += 1;
2036 }
2037 } else {
2038 if (old_cset_region_length() >= min_old_cset_length) {
2039 // In the non-auto-tuning case, we'll finish adding regions
2040 // to the CSet if we reach the minimum.
2041 ergo_verbose2(ErgoCSetConstruction,
2042 "finish adding old regions to CSet",
2043 ergo_format_reason("old CSet region num reached min")
2044 ergo_format_region("old")
2045 ergo_format_region("min"),
2046 old_cset_region_length(), min_old_cset_length);
2047 break;
2048 }
2049 }
2051 // We will add this region to the CSet.
2052 time_remaining_ms = MAX2(time_remaining_ms - predicted_time_ms, 0.0);
2053 predicted_pause_time_ms += predicted_time_ms;
2054 cset_chooser->remove_and_move_to_next(hr);
2055 _g1->old_set_remove(hr);
2056 add_old_region_to_cset(hr);
2058 hr = cset_chooser->peek();
2059 }
2060 if (hr == NULL) {
2061 ergo_verbose0(ErgoCSetConstruction,
2062 "finish adding old regions to CSet",
2063 ergo_format_reason("candidate old regions not available"));
2064 }
2066 if (expensive_region_num > 0) {
2067 // We print the information once here at the end, predicated on
2068 // whether we added any apparently expensive regions or not, to
2069 // avoid generating output per region.
2070 ergo_verbose4(ErgoCSetConstruction,
2071 "added expensive regions to CSet",
2072 ergo_format_reason("old CSet region num not reached min")
2073 ergo_format_region("old")
2074 ergo_format_region("expensive")
2075 ergo_format_region("min")
2076 ergo_format_ms("remaining time"),
2077 old_cset_region_length(),
2078 expensive_region_num,
2079 min_old_cset_length,
2080 time_remaining_ms);
2081 }
2083 cset_chooser->verify();
2084 }
2086 stop_incremental_cset_building();
2088 ergo_verbose5(ErgoCSetConstruction,
2089 "finish choosing CSet",
2090 ergo_format_region("eden")
2091 ergo_format_region("survivors")
2092 ergo_format_region("old")
2093 ergo_format_ms("predicted pause time")
2094 ergo_format_ms("target pause time"),
2095 eden_region_length, survivor_region_length,
2096 old_cset_region_length(),
2097 predicted_pause_time_ms, target_pause_time_ms);
2099 double non_young_end_time_sec = os::elapsedTime();
2100 phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0);
2101 }
2103 void TraceGen0TimeData::record_start_collection(double time_to_stop_the_world_ms) {
2104 if(TraceGen0Time) {
2105 _all_stop_world_times_ms.add(time_to_stop_the_world_ms);
2106 }
2107 }
2109 void TraceGen0TimeData::record_yield_time(double yield_time_ms) {
2110 if(TraceGen0Time) {
2111 _all_yield_times_ms.add(yield_time_ms);
2112 }
2113 }
2115 void TraceGen0TimeData::record_end_collection(double pause_time_ms, G1GCPhaseTimes* phase_times) {
2116 if(TraceGen0Time) {
2117 _total.add(pause_time_ms);
2118 _other.add(pause_time_ms - phase_times->accounted_time_ms());
2119 _root_region_scan_wait.add(phase_times->root_region_scan_wait_time_ms());
2120 _parallel.add(phase_times->cur_collection_par_time_ms());
2121 _ext_root_scan.add(phase_times->average_last_ext_root_scan_time());
2122 _satb_filtering.add(phase_times->average_last_satb_filtering_times_ms());
2123 _update_rs.add(phase_times->average_last_update_rs_time());
2124 _scan_rs.add(phase_times->average_last_scan_rs_time());
2125 _obj_copy.add(phase_times->average_last_obj_copy_time());
2126 _termination.add(phase_times->average_last_termination_time());
2128 double parallel_known_time = phase_times->average_last_ext_root_scan_time() +
2129 phase_times->average_last_satb_filtering_times_ms() +
2130 phase_times->average_last_update_rs_time() +
2131 phase_times->average_last_scan_rs_time() +
2132 phase_times->average_last_obj_copy_time() +
2133 + phase_times->average_last_termination_time();
2135 double parallel_other_time = phase_times->cur_collection_par_time_ms() - parallel_known_time;
2136 _parallel_other.add(parallel_other_time);
2137 _clear_ct.add(phase_times->cur_clear_ct_time_ms());
2138 }
2139 }
2141 void TraceGen0TimeData::increment_young_collection_count() {
2142 if(TraceGen0Time) {
2143 ++_young_pause_num;
2144 }
2145 }
2147 void TraceGen0TimeData::increment_mixed_collection_count() {
2148 if(TraceGen0Time) {
2149 ++_mixed_pause_num;
2150 }
2151 }
2153 void TraceGen0TimeData::print_summary(const char* str,
2154 const NumberSeq* seq) const {
2155 double sum = seq->sum();
2156 gclog_or_tty->print_cr("%-27s = %8.2lf s (avg = %8.2lf ms)",
2157 str, sum / 1000.0, seq->avg());
2158 }
2160 void TraceGen0TimeData::print_summary_sd(const char* str,
2161 const NumberSeq* seq) const {
2162 print_summary(str, seq);
2163 gclog_or_tty->print_cr("%+45s = %5d, std dev = %8.2lf ms, max = %8.2lf ms)",
2164 "(num", seq->num(), seq->sd(), seq->maximum());
2165 }
2167 void TraceGen0TimeData::print() const {
2168 if (!TraceGen0Time) {
2169 return;
2170 }
2172 gclog_or_tty->print_cr("ALL PAUSES");
2173 print_summary_sd(" Total", &_total);
2174 gclog_or_tty->print_cr("");
2175 gclog_or_tty->print_cr("");
2176 gclog_or_tty->print_cr(" Young GC Pauses: %8d", _young_pause_num);
2177 gclog_or_tty->print_cr(" Mixed GC Pauses: %8d", _mixed_pause_num);
2178 gclog_or_tty->print_cr("");
2180 gclog_or_tty->print_cr("EVACUATION PAUSES");
2182 if (_young_pause_num == 0 && _mixed_pause_num == 0) {
2183 gclog_or_tty->print_cr("none");
2184 } else {
2185 print_summary_sd(" Evacuation Pauses", &_total);
2186 print_summary(" Root Region Scan Wait", &_root_region_scan_wait);
2187 print_summary(" Parallel Time", &_parallel);
2188 print_summary(" Ext Root Scanning", &_ext_root_scan);
2189 print_summary(" SATB Filtering", &_satb_filtering);
2190 print_summary(" Update RS", &_update_rs);
2191 print_summary(" Scan RS", &_scan_rs);
2192 print_summary(" Object Copy", &_obj_copy);
2193 print_summary(" Termination", &_termination);
2194 print_summary(" Parallel Other", &_parallel_other);
2195 print_summary(" Clear CT", &_clear_ct);
2196 print_summary(" Other", &_other);
2197 }
2198 gclog_or_tty->print_cr("");
2200 gclog_or_tty->print_cr("MISC");
2201 print_summary_sd(" Stop World", &_all_stop_world_times_ms);
2202 print_summary_sd(" Yields", &_all_yield_times_ms);
2203 }
2205 void TraceGen1TimeData::record_full_collection(double full_gc_time_ms) {
2206 if (TraceGen1Time) {
2207 _all_full_gc_times.add(full_gc_time_ms);
2208 }
2209 }
2211 void TraceGen1TimeData::print() const {
2212 if (!TraceGen1Time) {
2213 return;
2214 }
2216 if (_all_full_gc_times.num() > 0) {
2217 gclog_or_tty->print("\n%4d full_gcs: total time = %8.2f s",
2218 _all_full_gc_times.num(),
2219 _all_full_gc_times.sum() / 1000.0);
2220 gclog_or_tty->print_cr(" (avg = %8.2fms).", _all_full_gc_times.avg());
2221 gclog_or_tty->print_cr(" [std. dev = %8.2f ms, max = %8.2f ms]",
2222 _all_full_gc_times.sd(),
2223 _all_full_gc_times.maximum());
2224 }
2225 }