Fri, 14 Oct 2011 11:12:24 -0400
7088680: G1: Cleanup in the G1CollectorPolicy class
Summary: Removed unused fields and methods, removed the G1CollectoryPolicy_BestRegionsFirst class and folded its functionality into the G1CollectorPolicy class.
Reviewed-by: ysr, brutisso, jcoomes
1 /*
2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "gc_implementation/g1/concurrentG1Refine.hpp"
27 #include "gc_implementation/g1/concurrentMark.hpp"
28 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
29 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
30 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
31 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
32 #include "gc_implementation/g1/heapRegionRemSet.hpp"
33 #include "gc_implementation/shared/gcPolicyCounters.hpp"
34 #include "runtime/arguments.hpp"
35 #include "runtime/java.hpp"
36 #include "runtime/mutexLocker.hpp"
37 #include "utilities/debug.hpp"
39 #define PREDICTIONS_VERBOSE 0
41 // <NEW PREDICTION>
43 // Different defaults for different number of GC threads
44 // They were chosen by running GCOld and SPECjbb on debris with different
45 // numbers of GC threads and choosing them based on the results
47 // all the same
48 static double rs_length_diff_defaults[] = {
49 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
50 };
52 static double cost_per_card_ms_defaults[] = {
53 0.01, 0.005, 0.005, 0.003, 0.003, 0.002, 0.002, 0.0015
54 };
56 // all the same
57 static double fully_young_cards_per_entry_ratio_defaults[] = {
58 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0
59 };
61 static double cost_per_entry_ms_defaults[] = {
62 0.015, 0.01, 0.01, 0.008, 0.008, 0.0055, 0.0055, 0.005
63 };
65 static double cost_per_byte_ms_defaults[] = {
66 0.00006, 0.00003, 0.00003, 0.000015, 0.000015, 0.00001, 0.00001, 0.000009
67 };
69 // these should be pretty consistent
70 static double constant_other_time_ms_defaults[] = {
71 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0
72 };
75 static double young_other_cost_per_region_ms_defaults[] = {
76 0.3, 0.2, 0.2, 0.15, 0.15, 0.12, 0.12, 0.1
77 };
79 static double non_young_other_cost_per_region_ms_defaults[] = {
80 1.0, 0.7, 0.7, 0.5, 0.5, 0.42, 0.42, 0.30
81 };
83 // </NEW PREDICTION>
85 // Help class for avoiding interleaved logging
86 class LineBuffer: public StackObj {
88 private:
89 static const int BUFFER_LEN = 1024;
90 static const int INDENT_CHARS = 3;
91 char _buffer[BUFFER_LEN];
92 int _indent_level;
93 int _cur;
95 void vappend(const char* format, va_list ap) {
96 int res = vsnprintf(&_buffer[_cur], BUFFER_LEN - _cur, format, ap);
97 if (res != -1) {
98 _cur += res;
99 } else {
100 DEBUG_ONLY(warning("buffer too small in LineBuffer");)
101 _buffer[BUFFER_LEN -1] = 0;
102 _cur = BUFFER_LEN; // vsnprintf above should not add to _buffer if we are called again
103 }
104 }
106 public:
107 explicit LineBuffer(int indent_level): _indent_level(indent_level), _cur(0) {
108 for (; (_cur < BUFFER_LEN && _cur < (_indent_level * INDENT_CHARS)); _cur++) {
109 _buffer[_cur] = ' ';
110 }
111 }
113 #ifndef PRODUCT
114 ~LineBuffer() {
115 assert(_cur == _indent_level * INDENT_CHARS, "pending data in buffer - append_and_print_cr() not called?");
116 }
117 #endif
119 void append(const char* format, ...) {
120 va_list ap;
121 va_start(ap, format);
122 vappend(format, ap);
123 va_end(ap);
124 }
126 void append_and_print_cr(const char* format, ...) {
127 va_list ap;
128 va_start(ap, format);
129 vappend(format, ap);
130 va_end(ap);
131 gclog_or_tty->print_cr("%s", _buffer);
132 _cur = _indent_level * INDENT_CHARS;
133 }
134 };
136 G1CollectorPolicy::G1CollectorPolicy() :
137 _parallel_gc_threads(G1CollectedHeap::use_parallel_gc_threads()
138 ? ParallelGCThreads : 1),
140 _n_pauses(0),
141 _recent_rs_scan_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
142 _recent_pause_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
143 _recent_rs_sizes(new TruncatedSeq(NumPrevPausesForHeuristics)),
144 _recent_gc_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
145 _all_pause_times_ms(new NumberSeq()),
146 _stop_world_start(0.0),
147 _all_stop_world_times_ms(new NumberSeq()),
148 _all_yield_times_ms(new NumberSeq()),
149 _using_new_ratio_calculations(false),
151 _all_mod_union_times_ms(new NumberSeq()),
153 _summary(new Summary()),
155 _cur_clear_ct_time_ms(0.0),
157 _cur_ref_proc_time_ms(0.0),
158 _cur_ref_enq_time_ms(0.0),
160 #ifndef PRODUCT
161 _min_clear_cc_time_ms(-1.0),
162 _max_clear_cc_time_ms(-1.0),
163 _cur_clear_cc_time_ms(0.0),
164 _cum_clear_cc_time_ms(0.0),
165 _num_cc_clears(0L),
166 #endif
168 _region_num_young(0),
169 _region_num_tenured(0),
170 _prev_region_num_young(0),
171 _prev_region_num_tenured(0),
173 _aux_num(10),
174 _all_aux_times_ms(new NumberSeq[_aux_num]),
175 _cur_aux_start_times_ms(new double[_aux_num]),
176 _cur_aux_times_ms(new double[_aux_num]),
177 _cur_aux_times_set(new bool[_aux_num]),
179 _concurrent_mark_remark_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
180 _concurrent_mark_cleanup_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
182 // <NEW PREDICTION>
184 _alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
185 _prev_collection_pause_end_ms(0.0),
186 _pending_card_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
187 _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
188 _cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
189 _fully_young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
190 _partially_young_cards_per_entry_ratio_seq(
191 new TruncatedSeq(TruncatedSeqLength)),
192 _cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
193 _partially_young_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
194 _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
195 _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)),
196 _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
197 _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
198 _non_young_other_cost_per_region_ms_seq(
199 new TruncatedSeq(TruncatedSeqLength)),
201 _pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
202 _scanned_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
203 _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)),
205 _pause_time_target_ms((double) MaxGCPauseMillis),
207 // </NEW PREDICTION>
209 _full_young_gcs(true),
210 _full_young_pause_num(0),
211 _partial_young_pause_num(0),
213 _during_marking(false),
214 _in_marking_window(false),
215 _in_marking_window_im(false),
217 _known_garbage_ratio(0.0),
218 _known_garbage_bytes(0),
220 _young_gc_eff_seq(new TruncatedSeq(TruncatedSeqLength)),
222 _recent_prev_end_times_for_all_gcs_sec(new TruncatedSeq(NumPrevPausesForHeuristics)),
224 _recent_CS_bytes_used_before(new TruncatedSeq(NumPrevPausesForHeuristics)),
225 _recent_CS_bytes_surviving(new TruncatedSeq(NumPrevPausesForHeuristics)),
227 _recent_avg_pause_time_ratio(0.0),
229 _all_full_gc_times_ms(new NumberSeq()),
231 // G1PausesBtwnConcMark defaults to -1
232 // so the hack is to do the cast QQQ FIXME
233 _pauses_btwn_concurrent_mark((size_t)G1PausesBtwnConcMark),
234 _initiate_conc_mark_if_possible(false),
235 _during_initial_mark_pause(false),
236 _should_revert_to_full_young_gcs(false),
237 _last_full_young_gc(false),
239 _eden_bytes_before_gc(0),
240 _survivor_bytes_before_gc(0),
241 _capacity_before_gc(0),
243 _prev_collection_pause_used_at_end_bytes(0),
245 _collection_set(NULL),
246 _collection_set_size(0),
247 _collection_set_bytes_used_before(0),
249 // Incremental CSet attributes
250 _inc_cset_build_state(Inactive),
251 _inc_cset_head(NULL),
252 _inc_cset_tail(NULL),
253 _inc_cset_size(0),
254 _inc_cset_young_index(0),
255 _inc_cset_bytes_used_before(0),
256 _inc_cset_max_finger(NULL),
257 _inc_cset_recorded_young_bytes(0),
258 _inc_cset_recorded_rs_lengths(0),
259 _inc_cset_predicted_elapsed_time_ms(0.0),
260 _inc_cset_predicted_bytes_to_copy(0),
262 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
263 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
264 #endif // _MSC_VER
266 _short_lived_surv_rate_group(new SurvRateGroup(this, "Short Lived",
267 G1YoungSurvRateNumRegionsSummary)),
268 _survivor_surv_rate_group(new SurvRateGroup(this, "Survivor",
269 G1YoungSurvRateNumRegionsSummary)),
270 // add here any more surv rate groups
271 _recorded_survivor_regions(0),
272 _recorded_survivor_head(NULL),
273 _recorded_survivor_tail(NULL),
274 _survivors_age_table(true),
276 _gc_overhead_perc(0.0) {
278 // Set up the region size and associated fields. Given that the
279 // policy is created before the heap, we have to set this up here,
280 // so it's done as soon as possible.
281 HeapRegion::setup_heap_region_size(Arguments::min_heap_size());
282 HeapRegionRemSet::setup_remset_size();
284 G1ErgoVerbose::initialize();
285 if (PrintAdaptiveSizePolicy) {
286 // Currently, we only use a single switch for all the heuristics.
287 G1ErgoVerbose::set_enabled(true);
288 // Given that we don't currently have a verboseness level
289 // parameter, we'll hardcode this to high. This can be easily
290 // changed in the future.
291 G1ErgoVerbose::set_level(ErgoHigh);
292 } else {
293 G1ErgoVerbose::set_enabled(false);
294 }
296 // Verify PLAB sizes
297 const size_t region_size = HeapRegion::GrainWords;
298 if (YoungPLABSize > region_size || OldPLABSize > region_size) {
299 char buffer[128];
300 jio_snprintf(buffer, sizeof(buffer), "%sPLABSize should be at most "SIZE_FORMAT,
301 OldPLABSize > region_size ? "Old" : "Young", region_size);
302 vm_exit_during_initialization(buffer);
303 }
305 _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
306 _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
308 _par_last_gc_worker_start_times_ms = new double[_parallel_gc_threads];
309 _par_last_ext_root_scan_times_ms = new double[_parallel_gc_threads];
310 _par_last_mark_stack_scan_times_ms = new double[_parallel_gc_threads];
312 _par_last_update_rs_times_ms = new double[_parallel_gc_threads];
313 _par_last_update_rs_processed_buffers = new double[_parallel_gc_threads];
315 _par_last_scan_rs_times_ms = new double[_parallel_gc_threads];
317 _par_last_obj_copy_times_ms = new double[_parallel_gc_threads];
319 _par_last_termination_times_ms = new double[_parallel_gc_threads];
320 _par_last_termination_attempts = new double[_parallel_gc_threads];
321 _par_last_gc_worker_end_times_ms = new double[_parallel_gc_threads];
322 _par_last_gc_worker_times_ms = new double[_parallel_gc_threads];
324 // start conservatively
325 _expensive_region_limit_ms = 0.5 * (double) MaxGCPauseMillis;
327 // <NEW PREDICTION>
329 int index;
330 if (ParallelGCThreads == 0)
331 index = 0;
332 else if (ParallelGCThreads > 8)
333 index = 7;
334 else
335 index = ParallelGCThreads - 1;
337 _pending_card_diff_seq->add(0.0);
338 _rs_length_diff_seq->add(rs_length_diff_defaults[index]);
339 _cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]);
340 _fully_young_cards_per_entry_ratio_seq->add(
341 fully_young_cards_per_entry_ratio_defaults[index]);
342 _cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]);
343 _cost_per_byte_ms_seq->add(cost_per_byte_ms_defaults[index]);
344 _constant_other_time_ms_seq->add(constant_other_time_ms_defaults[index]);
345 _young_other_cost_per_region_ms_seq->add(
346 young_other_cost_per_region_ms_defaults[index]);
347 _non_young_other_cost_per_region_ms_seq->add(
348 non_young_other_cost_per_region_ms_defaults[index]);
350 // </NEW PREDICTION>
352 // Below, we might need to calculate the pause time target based on
353 // the pause interval. When we do so we are going to give G1 maximum
354 // flexibility and allow it to do pauses when it needs to. So, we'll
355 // arrange that the pause interval to be pause time target + 1 to
356 // ensure that a) the pause time target is maximized with respect to
357 // the pause interval and b) we maintain the invariant that pause
358 // time target < pause interval. If the user does not want this
359 // maximum flexibility, they will have to set the pause interval
360 // explicitly.
362 // First make sure that, if either parameter is set, its value is
363 // reasonable.
364 if (!FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
365 if (MaxGCPauseMillis < 1) {
366 vm_exit_during_initialization("MaxGCPauseMillis should be "
367 "greater than 0");
368 }
369 }
370 if (!FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
371 if (GCPauseIntervalMillis < 1) {
372 vm_exit_during_initialization("GCPauseIntervalMillis should be "
373 "greater than 0");
374 }
375 }
377 // Then, if the pause time target parameter was not set, set it to
378 // the default value.
379 if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
380 if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
381 // The default pause time target in G1 is 200ms
382 FLAG_SET_DEFAULT(MaxGCPauseMillis, 200);
383 } else {
384 // We do not allow the pause interval to be set without the
385 // pause time target
386 vm_exit_during_initialization("GCPauseIntervalMillis cannot be set "
387 "without setting MaxGCPauseMillis");
388 }
389 }
391 // Then, if the interval parameter was not set, set it according to
392 // the pause time target (this will also deal with the case when the
393 // pause time target is the default value).
394 if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
395 FLAG_SET_DEFAULT(GCPauseIntervalMillis, MaxGCPauseMillis + 1);
396 }
398 // Finally, make sure that the two parameters are consistent.
399 if (MaxGCPauseMillis >= GCPauseIntervalMillis) {
400 char buffer[256];
401 jio_snprintf(buffer, 256,
402 "MaxGCPauseMillis (%u) should be less than "
403 "GCPauseIntervalMillis (%u)",
404 MaxGCPauseMillis, GCPauseIntervalMillis);
405 vm_exit_during_initialization(buffer);
406 }
408 double max_gc_time = (double) MaxGCPauseMillis / 1000.0;
409 double time_slice = (double) GCPauseIntervalMillis / 1000.0;
410 _mmu_tracker = new G1MMUTrackerQueue(time_slice, max_gc_time);
411 _sigma = (double) G1ConfidencePercent / 100.0;
413 // start conservatively (around 50ms is about right)
414 _concurrent_mark_remark_times_ms->add(0.05);
415 _concurrent_mark_cleanup_times_ms->add(0.20);
416 _tenuring_threshold = MaxTenuringThreshold;
417 // _max_survivor_regions will be calculated by
418 // update_young_list_target_length() during initialization.
419 _max_survivor_regions = 0;
421 assert(GCTimeRatio > 0,
422 "we should have set it to a default value set_g1_gc_flags() "
423 "if a user set it to 0");
424 _gc_overhead_perc = 100.0 * (1.0 / (1.0 + GCTimeRatio));
426 uintx reserve_perc = G1ReservePercent;
427 // Put an artificial ceiling on this so that it's not set to a silly value.
428 if (reserve_perc > 50) {
429 reserve_perc = 50;
430 warning("G1ReservePercent is set to a value that is too large, "
431 "it's been updated to %u", reserve_perc);
432 }
433 _reserve_factor = (double) reserve_perc / 100.0;
434 // This will be set when the heap is expanded
435 // for the first time during initialization.
436 _reserve_regions = 0;
438 initialize_all();
439 _collectionSetChooser = new CollectionSetChooser();
440 }
442 // Increment "i", mod "len"
443 static void inc_mod(int& i, int len) {
444 i++; if (i == len) i = 0;
445 }
447 void G1CollectorPolicy::initialize_flags() {
448 set_min_alignment(HeapRegion::GrainBytes);
449 set_max_alignment(GenRemSet::max_alignment_constraint(rem_set_name()));
450 if (SurvivorRatio < 1) {
451 vm_exit_during_initialization("Invalid survivor ratio specified");
452 }
453 CollectorPolicy::initialize_flags();
454 }
456 // The easiest way to deal with the parsing of the NewSize /
457 // MaxNewSize / etc. parameteres is to re-use the code in the
458 // TwoGenerationCollectorPolicy class. This is similar to what
459 // ParallelScavenge does with its GenerationSizer class (see
460 // ParallelScavengeHeap::initialize()). We might change this in the
461 // future, but it's a good start.
462 class G1YoungGenSizer : public TwoGenerationCollectorPolicy {
463 private:
464 size_t size_to_region_num(size_t byte_size) {
465 return MAX2((size_t) 1, byte_size / HeapRegion::GrainBytes);
466 }
468 public:
469 G1YoungGenSizer() {
470 initialize_flags();
471 initialize_size_info();
472 }
473 size_t min_young_region_num() {
474 return size_to_region_num(_min_gen0_size);
475 }
476 size_t initial_young_region_num() {
477 return size_to_region_num(_initial_gen0_size);
478 }
479 size_t max_young_region_num() {
480 return size_to_region_num(_max_gen0_size);
481 }
482 };
484 void G1CollectorPolicy::update_young_list_size_using_newratio(size_t number_of_heap_regions) {
485 assert(number_of_heap_regions > 0, "Heap must be initialized");
486 size_t young_size = number_of_heap_regions / (NewRatio + 1);
487 _min_desired_young_length = young_size;
488 _max_desired_young_length = young_size;
489 }
491 void G1CollectorPolicy::init() {
492 // Set aside an initial future to_space.
493 _g1 = G1CollectedHeap::heap();
495 assert(Heap_lock->owned_by_self(), "Locking discipline.");
497 initialize_gc_policy_counters();
499 G1YoungGenSizer sizer;
500 size_t initial_region_num = sizer.initial_young_region_num();
501 _min_desired_young_length = sizer.min_young_region_num();
502 _max_desired_young_length = sizer.max_young_region_num();
504 if (FLAG_IS_CMDLINE(NewRatio)) {
505 if (FLAG_IS_CMDLINE(NewSize) || FLAG_IS_CMDLINE(MaxNewSize)) {
506 warning("-XX:NewSize and -XX:MaxNewSize override -XX:NewRatio");
507 } else {
508 // Treat NewRatio as a fixed size that is only recalculated when the heap size changes
509 update_young_list_size_using_newratio(_g1->n_regions());
510 _using_new_ratio_calculations = true;
511 }
512 }
514 // GenCollectorPolicy guarantees that min <= initial <= max.
515 // Asserting here just to state that we rely on this property.
516 assert(_min_desired_young_length <= _max_desired_young_length, "Invalid min/max young gen size values");
517 assert(initial_region_num <= _max_desired_young_length, "Initial young gen size too large");
518 assert(_min_desired_young_length <= initial_region_num, "Initial young gen size too small");
520 set_adaptive_young_list_length(_min_desired_young_length < _max_desired_young_length);
521 if (adaptive_young_list_length()) {
522 _young_list_fixed_length = 0;
523 } else {
524 _young_list_fixed_length = initial_region_num;
525 }
526 _free_regions_at_end_of_collection = _g1->free_regions();
527 update_young_list_target_length();
528 _prev_eden_capacity = _young_list_target_length * HeapRegion::GrainBytes;
530 // We may immediately start allocating regions and placing them on the
531 // collection set list. Initialize the per-collection set info
532 start_incremental_cset_building();
533 }
535 // Create the jstat counters for the policy.
536 void G1CollectorPolicy::initialize_gc_policy_counters() {
537 _gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 3);
538 }
540 bool G1CollectorPolicy::predict_will_fit(size_t young_length,
541 double base_time_ms,
542 size_t base_free_regions,
543 double target_pause_time_ms) {
544 if (young_length >= base_free_regions) {
545 // end condition 1: not enough space for the young regions
546 return false;
547 }
549 double accum_surv_rate = accum_yg_surv_rate_pred((int)(young_length - 1));
550 size_t bytes_to_copy =
551 (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes);
552 double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy);
553 double young_other_time_ms = predict_young_other_time_ms(young_length);
554 double pause_time_ms = base_time_ms + copy_time_ms + young_other_time_ms;
555 if (pause_time_ms > target_pause_time_ms) {
556 // end condition 2: prediction is over the target pause time
557 return false;
558 }
560 size_t free_bytes =
561 (base_free_regions - young_length) * HeapRegion::GrainBytes;
562 if ((2.0 * sigma()) * (double) bytes_to_copy > (double) free_bytes) {
563 // end condition 3: out-of-space (conservatively!)
564 return false;
565 }
567 // success!
568 return true;
569 }
571 void G1CollectorPolicy::record_new_heap_size(size_t new_number_of_regions) {
572 // re-calculate the necessary reserve
573 double reserve_regions_d = (double) new_number_of_regions * _reserve_factor;
574 // We use ceiling so that if reserve_regions_d is > 0.0 (but
575 // smaller than 1.0) we'll get 1.
576 _reserve_regions = (size_t) ceil(reserve_regions_d);
578 if (_using_new_ratio_calculations) {
579 // -XX:NewRatio was specified so we need to update the
580 // young gen length when the heap size has changed.
581 update_young_list_size_using_newratio(new_number_of_regions);
582 }
583 }
585 size_t G1CollectorPolicy::calculate_young_list_desired_min_length(
586 size_t base_min_length) {
587 size_t desired_min_length = 0;
588 if (adaptive_young_list_length()) {
589 if (_alloc_rate_ms_seq->num() > 3) {
590 double now_sec = os::elapsedTime();
591 double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
592 double alloc_rate_ms = predict_alloc_rate_ms();
593 desired_min_length = (size_t) ceil(alloc_rate_ms * when_ms);
594 } else {
595 // otherwise we don't have enough info to make the prediction
596 }
597 }
598 desired_min_length += base_min_length;
599 // make sure we don't go below any user-defined minimum bound
600 return MAX2(_min_desired_young_length, desired_min_length);
601 }
603 size_t G1CollectorPolicy::calculate_young_list_desired_max_length() {
604 // Here, we might want to also take into account any additional
605 // constraints (i.e., user-defined minimum bound). Currently, we
606 // effectively don't set this bound.
607 return _max_desired_young_length;
608 }
610 void G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths) {
611 if (rs_lengths == (size_t) -1) {
612 // if it's set to the default value (-1), we should predict it;
613 // otherwise, use the given value.
614 rs_lengths = (size_t) get_new_prediction(_rs_lengths_seq);
615 }
617 // Calculate the absolute and desired min bounds.
619 // This is how many young regions we already have (currently: the survivors).
620 size_t base_min_length = recorded_survivor_regions();
621 // This is the absolute minimum young length, which ensures that we
622 // can allocate one eden region in the worst-case.
623 size_t absolute_min_length = base_min_length + 1;
624 size_t desired_min_length =
625 calculate_young_list_desired_min_length(base_min_length);
626 if (desired_min_length < absolute_min_length) {
627 desired_min_length = absolute_min_length;
628 }
630 // Calculate the absolute and desired max bounds.
632 // We will try our best not to "eat" into the reserve.
633 size_t absolute_max_length = 0;
634 if (_free_regions_at_end_of_collection > _reserve_regions) {
635 absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions;
636 }
637 size_t desired_max_length = calculate_young_list_desired_max_length();
638 if (desired_max_length > absolute_max_length) {
639 desired_max_length = absolute_max_length;
640 }
642 size_t young_list_target_length = 0;
643 if (adaptive_young_list_length()) {
644 if (full_young_gcs()) {
645 young_list_target_length =
646 calculate_young_list_target_length(rs_lengths,
647 base_min_length,
648 desired_min_length,
649 desired_max_length);
650 _rs_lengths_prediction = rs_lengths;
651 } else {
652 // Don't calculate anything and let the code below bound it to
653 // the desired_min_length, i.e., do the next GC as soon as
654 // possible to maximize how many old regions we can add to it.
655 }
656 } else {
657 if (full_young_gcs()) {
658 young_list_target_length = _young_list_fixed_length;
659 } else {
660 // A bit arbitrary: during partially-young GCs we allocate half
661 // the young regions to try to add old regions to the CSet.
662 young_list_target_length = _young_list_fixed_length / 2;
663 // We choose to accept that we might go under the desired min
664 // length given that we intentionally ask for a smaller young gen.
665 desired_min_length = absolute_min_length;
666 }
667 }
669 // Make sure we don't go over the desired max length, nor under the
670 // desired min length. In case they clash, desired_min_length wins
671 // which is why that test is second.
672 if (young_list_target_length > desired_max_length) {
673 young_list_target_length = desired_max_length;
674 }
675 if (young_list_target_length < desired_min_length) {
676 young_list_target_length = desired_min_length;
677 }
679 assert(young_list_target_length > recorded_survivor_regions(),
680 "we should be able to allocate at least one eden region");
681 assert(young_list_target_length >= absolute_min_length, "post-condition");
682 _young_list_target_length = young_list_target_length;
684 update_max_gc_locker_expansion();
685 }
687 size_t
688 G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths,
689 size_t base_min_length,
690 size_t desired_min_length,
691 size_t desired_max_length) {
692 assert(adaptive_young_list_length(), "pre-condition");
693 assert(full_young_gcs(), "only call this for fully-young GCs");
695 // In case some edge-condition makes the desired max length too small...
696 if (desired_max_length <= desired_min_length) {
697 return desired_min_length;
698 }
700 // We'll adjust min_young_length and max_young_length not to include
701 // the already allocated young regions (i.e., so they reflect the
702 // min and max eden regions we'll allocate). The base_min_length
703 // will be reflected in the predictions by the
704 // survivor_regions_evac_time prediction.
705 assert(desired_min_length > base_min_length, "invariant");
706 size_t min_young_length = desired_min_length - base_min_length;
707 assert(desired_max_length > base_min_length, "invariant");
708 size_t max_young_length = desired_max_length - base_min_length;
710 double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
711 double survivor_regions_evac_time = predict_survivor_regions_evac_time();
712 size_t pending_cards = (size_t) get_new_prediction(_pending_cards_seq);
713 size_t adj_rs_lengths = rs_lengths + predict_rs_length_diff();
714 size_t scanned_cards = predict_young_card_num(adj_rs_lengths);
715 double base_time_ms =
716 predict_base_elapsed_time_ms(pending_cards, scanned_cards) +
717 survivor_regions_evac_time;
718 size_t available_free_regions = _free_regions_at_end_of_collection;
719 size_t base_free_regions = 0;
720 if (available_free_regions > _reserve_regions) {
721 base_free_regions = available_free_regions - _reserve_regions;
722 }
724 // Here, we will make sure that the shortest young length that
725 // makes sense fits within the target pause time.
727 if (predict_will_fit(min_young_length, base_time_ms,
728 base_free_regions, target_pause_time_ms)) {
729 // The shortest young length will fit into the target pause time;
730 // we'll now check whether the absolute maximum number of young
731 // regions will fit in the target pause time. If not, we'll do
732 // a binary search between min_young_length and max_young_length.
733 if (predict_will_fit(max_young_length, base_time_ms,
734 base_free_regions, target_pause_time_ms)) {
735 // The maximum young length will fit into the target pause time.
736 // We are done so set min young length to the maximum length (as
737 // the result is assumed to be returned in min_young_length).
738 min_young_length = max_young_length;
739 } else {
740 // The maximum possible number of young regions will not fit within
741 // the target pause time so we'll search for the optimal
742 // length. The loop invariants are:
743 //
744 // min_young_length < max_young_length
745 // min_young_length is known to fit into the target pause time
746 // max_young_length is known not to fit into the target pause time
747 //
748 // Going into the loop we know the above hold as we've just
749 // checked them. Every time around the loop we check whether
750 // the middle value between min_young_length and
751 // max_young_length fits into the target pause time. If it
752 // does, it becomes the new min. If it doesn't, it becomes
753 // the new max. This way we maintain the loop invariants.
755 assert(min_young_length < max_young_length, "invariant");
756 size_t diff = (max_young_length - min_young_length) / 2;
757 while (diff > 0) {
758 size_t young_length = min_young_length + diff;
759 if (predict_will_fit(young_length, base_time_ms,
760 base_free_regions, target_pause_time_ms)) {
761 min_young_length = young_length;
762 } else {
763 max_young_length = young_length;
764 }
765 assert(min_young_length < max_young_length, "invariant");
766 diff = (max_young_length - min_young_length) / 2;
767 }
768 // The results is min_young_length which, according to the
769 // loop invariants, should fit within the target pause time.
771 // These are the post-conditions of the binary search above:
772 assert(min_young_length < max_young_length,
773 "otherwise we should have discovered that max_young_length "
774 "fits into the pause target and not done the binary search");
775 assert(predict_will_fit(min_young_length, base_time_ms,
776 base_free_regions, target_pause_time_ms),
777 "min_young_length, the result of the binary search, should "
778 "fit into the pause target");
779 assert(!predict_will_fit(min_young_length + 1, base_time_ms,
780 base_free_regions, target_pause_time_ms),
781 "min_young_length, the result of the binary search, should be "
782 "optimal, so no larger length should fit into the pause target");
783 }
784 } else {
785 // Even the minimum length doesn't fit into the pause time
786 // target, return it as the result nevertheless.
787 }
788 return base_min_length + min_young_length;
789 }
791 double G1CollectorPolicy::predict_survivor_regions_evac_time() {
792 double survivor_regions_evac_time = 0.0;
793 for (HeapRegion * r = _recorded_survivor_head;
794 r != NULL && r != _recorded_survivor_tail->get_next_young_region();
795 r = r->get_next_young_region()) {
796 survivor_regions_evac_time += predict_region_elapsed_time_ms(r, true);
797 }
798 return survivor_regions_evac_time;
799 }
801 void G1CollectorPolicy::revise_young_list_target_length_if_necessary() {
802 guarantee( adaptive_young_list_length(), "should not call this otherwise" );
804 size_t rs_lengths = _g1->young_list()->sampled_rs_lengths();
805 if (rs_lengths > _rs_lengths_prediction) {
806 // add 10% to avoid having to recalculate often
807 size_t rs_lengths_prediction = rs_lengths * 1100 / 1000;
808 update_young_list_target_length(rs_lengths_prediction);
809 }
810 }
814 HeapWord* G1CollectorPolicy::mem_allocate_work(size_t size,
815 bool is_tlab,
816 bool* gc_overhead_limit_was_exceeded) {
817 guarantee(false, "Not using this policy feature yet.");
818 return NULL;
819 }
821 // This method controls how a collector handles one or more
822 // of its generations being fully allocated.
823 HeapWord* G1CollectorPolicy::satisfy_failed_allocation(size_t size,
824 bool is_tlab) {
825 guarantee(false, "Not using this policy feature yet.");
826 return NULL;
827 }
830 #ifndef PRODUCT
831 bool G1CollectorPolicy::verify_young_ages() {
832 HeapRegion* head = _g1->young_list()->first_region();
833 return
834 verify_young_ages(head, _short_lived_surv_rate_group);
835 // also call verify_young_ages on any additional surv rate groups
836 }
838 bool
839 G1CollectorPolicy::verify_young_ages(HeapRegion* head,
840 SurvRateGroup *surv_rate_group) {
841 guarantee( surv_rate_group != NULL, "pre-condition" );
843 const char* name = surv_rate_group->name();
844 bool ret = true;
845 int prev_age = -1;
847 for (HeapRegion* curr = head;
848 curr != NULL;
849 curr = curr->get_next_young_region()) {
850 SurvRateGroup* group = curr->surv_rate_group();
851 if (group == NULL && !curr->is_survivor()) {
852 gclog_or_tty->print_cr("## %s: encountered NULL surv_rate_group", name);
853 ret = false;
854 }
856 if (surv_rate_group == group) {
857 int age = curr->age_in_surv_rate_group();
859 if (age < 0) {
860 gclog_or_tty->print_cr("## %s: encountered negative age", name);
861 ret = false;
862 }
864 if (age <= prev_age) {
865 gclog_or_tty->print_cr("## %s: region ages are not strictly increasing "
866 "(%d, %d)", name, age, prev_age);
867 ret = false;
868 }
869 prev_age = age;
870 }
871 }
873 return ret;
874 }
875 #endif // PRODUCT
877 void G1CollectorPolicy::record_full_collection_start() {
878 _cur_collection_start_sec = os::elapsedTime();
879 // Release the future to-space so that it is available for compaction into.
880 _g1->set_full_collection();
881 }
883 void G1CollectorPolicy::record_full_collection_end() {
884 // Consider this like a collection pause for the purposes of allocation
885 // since last pause.
886 double end_sec = os::elapsedTime();
887 double full_gc_time_sec = end_sec - _cur_collection_start_sec;
888 double full_gc_time_ms = full_gc_time_sec * 1000.0;
890 _all_full_gc_times_ms->add(full_gc_time_ms);
892 update_recent_gc_times(end_sec, full_gc_time_ms);
894 _g1->clear_full_collection();
896 // "Nuke" the heuristics that control the fully/partially young GC
897 // transitions and make sure we start with fully young GCs after the
898 // Full GC.
899 set_full_young_gcs(true);
900 _last_full_young_gc = false;
901 _should_revert_to_full_young_gcs = false;
902 clear_initiate_conc_mark_if_possible();
903 clear_during_initial_mark_pause();
904 _known_garbage_bytes = 0;
905 _known_garbage_ratio = 0.0;
906 _in_marking_window = false;
907 _in_marking_window_im = false;
909 _short_lived_surv_rate_group->start_adding_regions();
910 // also call this on any additional surv rate groups
912 record_survivor_regions(0, NULL, NULL);
914 _prev_region_num_young = _region_num_young;
915 _prev_region_num_tenured = _region_num_tenured;
917 _free_regions_at_end_of_collection = _g1->free_regions();
918 // Reset survivors SurvRateGroup.
919 _survivor_surv_rate_group->reset();
920 update_young_list_target_length();
921 _collectionSetChooser->updateAfterFullCollection();
922 }
924 void G1CollectorPolicy::record_stop_world_start() {
925 _stop_world_start = os::elapsedTime();
926 }
928 void G1CollectorPolicy::record_collection_pause_start(double start_time_sec,
929 size_t start_used) {
930 if (PrintGCDetails) {
931 gclog_or_tty->stamp(PrintGCTimeStamps);
932 gclog_or_tty->print("[GC pause");
933 gclog_or_tty->print(" (%s)", full_young_gcs() ? "young" : "partial");
934 }
936 // We only need to do this here as the policy will only be applied
937 // to the GC we're about to start. so, no point is calculating this
938 // every time we calculate / recalculate the target young length.
939 update_survivors_policy();
941 assert(_g1->used() == _g1->recalculate_used(),
942 err_msg("sanity, used: "SIZE_FORMAT" recalculate_used: "SIZE_FORMAT,
943 _g1->used(), _g1->recalculate_used()));
945 double s_w_t_ms = (start_time_sec - _stop_world_start) * 1000.0;
946 _all_stop_world_times_ms->add(s_w_t_ms);
947 _stop_world_start = 0.0;
949 _cur_collection_start_sec = start_time_sec;
950 _cur_collection_pause_used_at_start_bytes = start_used;
951 _cur_collection_pause_used_regions_at_start = _g1->used_regions();
952 _pending_cards = _g1->pending_card_num();
953 _max_pending_cards = _g1->max_pending_card_num();
955 _bytes_in_collection_set_before_gc = 0;
956 _bytes_copied_during_gc = 0;
958 YoungList* young_list = _g1->young_list();
959 _eden_bytes_before_gc = young_list->eden_used_bytes();
960 _survivor_bytes_before_gc = young_list->survivor_used_bytes();
961 _capacity_before_gc = _g1->capacity();
963 #ifdef DEBUG
964 // initialise these to something well known so that we can spot
965 // if they are not set properly
967 for (int i = 0; i < _parallel_gc_threads; ++i) {
968 _par_last_gc_worker_start_times_ms[i] = -1234.0;
969 _par_last_ext_root_scan_times_ms[i] = -1234.0;
970 _par_last_mark_stack_scan_times_ms[i] = -1234.0;
971 _par_last_update_rs_times_ms[i] = -1234.0;
972 _par_last_update_rs_processed_buffers[i] = -1234.0;
973 _par_last_scan_rs_times_ms[i] = -1234.0;
974 _par_last_obj_copy_times_ms[i] = -1234.0;
975 _par_last_termination_times_ms[i] = -1234.0;
976 _par_last_termination_attempts[i] = -1234.0;
977 _par_last_gc_worker_end_times_ms[i] = -1234.0;
978 _par_last_gc_worker_times_ms[i] = -1234.0;
979 }
980 #endif
982 for (int i = 0; i < _aux_num; ++i) {
983 _cur_aux_times_ms[i] = 0.0;
984 _cur_aux_times_set[i] = false;
985 }
987 _satb_drain_time_set = false;
988 _last_satb_drain_processed_buffers = -1;
990 _last_young_gc_full = false;
992 // do that for any other surv rate groups
993 _short_lived_surv_rate_group->stop_adding_regions();
994 _survivors_age_table.clear();
996 assert( verify_young_ages(), "region age verification" );
997 }
999 void G1CollectorPolicy::record_mark_closure_time(double mark_closure_time_ms) {
1000 _mark_closure_time_ms = mark_closure_time_ms;
1001 }
1003 void G1CollectorPolicy::record_concurrent_mark_init_end(double
1004 mark_init_elapsed_time_ms) {
1005 _during_marking = true;
1006 assert(!initiate_conc_mark_if_possible(), "we should have cleared it by now");
1007 clear_during_initial_mark_pause();
1008 _cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms;
1009 }
1011 void G1CollectorPolicy::record_concurrent_mark_remark_start() {
1012 _mark_remark_start_sec = os::elapsedTime();
1013 _during_marking = false;
1014 }
1016 void G1CollectorPolicy::record_concurrent_mark_remark_end() {
1017 double end_time_sec = os::elapsedTime();
1018 double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
1019 _concurrent_mark_remark_times_ms->add(elapsed_time_ms);
1020 _cur_mark_stop_world_time_ms += elapsed_time_ms;
1021 _prev_collection_pause_end_ms += elapsed_time_ms;
1023 _mmu_tracker->add_pause(_mark_remark_start_sec, end_time_sec, true);
1024 }
1026 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() {
1027 _mark_cleanup_start_sec = os::elapsedTime();
1028 }
1030 void G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
1031 _should_revert_to_full_young_gcs = false;
1032 _last_full_young_gc = true;
1033 _in_marking_window = false;
1034 }
1036 void G1CollectorPolicy::record_concurrent_pause() {
1037 if (_stop_world_start > 0.0) {
1038 double yield_ms = (os::elapsedTime() - _stop_world_start) * 1000.0;
1039 _all_yield_times_ms->add(yield_ms);
1040 }
1041 }
1043 void G1CollectorPolicy::record_concurrent_pause_end() {
1044 }
1046 template<class T>
1047 T sum_of(T* sum_arr, int start, int n, int N) {
1048 T sum = (T)0;
1049 for (int i = 0; i < n; i++) {
1050 int j = (start + i) % N;
1051 sum += sum_arr[j];
1052 }
1053 return sum;
1054 }
1056 void G1CollectorPolicy::print_par_stats(int level,
1057 const char* str,
1058 double* data) {
1059 double min = data[0], max = data[0];
1060 double total = 0.0;
1061 LineBuffer buf(level);
1062 buf.append("[%s (ms):", str);
1063 for (uint i = 0; i < ParallelGCThreads; ++i) {
1064 double val = data[i];
1065 if (val < min)
1066 min = val;
1067 if (val > max)
1068 max = val;
1069 total += val;
1070 buf.append(" %3.1lf", val);
1071 }
1072 buf.append_and_print_cr("");
1073 double avg = total / (double) ParallelGCThreads;
1074 buf.append_and_print_cr(" Avg: %5.1lf, Min: %5.1lf, Max: %5.1lf, Diff: %5.1lf]",
1075 avg, min, max, max - min);
1076 }
1078 void G1CollectorPolicy::print_par_sizes(int level,
1079 const char* str,
1080 double* data) {
1081 double min = data[0], max = data[0];
1082 double total = 0.0;
1083 LineBuffer buf(level);
1084 buf.append("[%s :", str);
1085 for (uint i = 0; i < ParallelGCThreads; ++i) {
1086 double val = data[i];
1087 if (val < min)
1088 min = val;
1089 if (val > max)
1090 max = val;
1091 total += val;
1092 buf.append(" %d", (int) val);
1093 }
1094 buf.append_and_print_cr("");
1095 double avg = total / (double) ParallelGCThreads;
1096 buf.append_and_print_cr(" Sum: %d, Avg: %d, Min: %d, Max: %d, Diff: %d]",
1097 (int)total, (int)avg, (int)min, (int)max, (int)max - (int)min);
1098 }
1100 void G1CollectorPolicy::print_stats (int level,
1101 const char* str,
1102 double value) {
1103 LineBuffer(level).append_and_print_cr("[%s: %5.1lf ms]", str, value);
1104 }
1106 void G1CollectorPolicy::print_stats (int level,
1107 const char* str,
1108 int value) {
1109 LineBuffer(level).append_and_print_cr("[%s: %d]", str, value);
1110 }
1112 double G1CollectorPolicy::avg_value (double* data) {
1113 if (G1CollectedHeap::use_parallel_gc_threads()) {
1114 double ret = 0.0;
1115 for (uint i = 0; i < ParallelGCThreads; ++i)
1116 ret += data[i];
1117 return ret / (double) ParallelGCThreads;
1118 } else {
1119 return data[0];
1120 }
1121 }
1123 double G1CollectorPolicy::max_value (double* data) {
1124 if (G1CollectedHeap::use_parallel_gc_threads()) {
1125 double ret = data[0];
1126 for (uint i = 1; i < ParallelGCThreads; ++i)
1127 if (data[i] > ret)
1128 ret = data[i];
1129 return ret;
1130 } else {
1131 return data[0];
1132 }
1133 }
1135 double G1CollectorPolicy::sum_of_values (double* data) {
1136 if (G1CollectedHeap::use_parallel_gc_threads()) {
1137 double sum = 0.0;
1138 for (uint i = 0; i < ParallelGCThreads; i++)
1139 sum += data[i];
1140 return sum;
1141 } else {
1142 return data[0];
1143 }
1144 }
1146 double G1CollectorPolicy::max_sum (double* data1,
1147 double* data2) {
1148 double ret = data1[0] + data2[0];
1150 if (G1CollectedHeap::use_parallel_gc_threads()) {
1151 for (uint i = 1; i < ParallelGCThreads; ++i) {
1152 double data = data1[i] + data2[i];
1153 if (data > ret)
1154 ret = data;
1155 }
1156 }
1157 return ret;
1158 }
1160 // Anything below that is considered to be zero
1161 #define MIN_TIMER_GRANULARITY 0.0000001
1163 void G1CollectorPolicy::record_collection_pause_end() {
1164 double end_time_sec = os::elapsedTime();
1165 double elapsed_ms = _last_pause_time_ms;
1166 bool parallel = G1CollectedHeap::use_parallel_gc_threads();
1167 size_t rs_size =
1168 _cur_collection_pause_used_regions_at_start - collection_set_size();
1169 size_t cur_used_bytes = _g1->used();
1170 assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
1171 bool last_pause_included_initial_mark = false;
1172 bool update_stats = !_g1->evacuation_failed();
1174 #ifndef PRODUCT
1175 if (G1YoungSurvRateVerbose) {
1176 gclog_or_tty->print_cr("");
1177 _short_lived_surv_rate_group->print();
1178 // do that for any other surv rate groups too
1179 }
1180 #endif // PRODUCT
1182 last_pause_included_initial_mark = during_initial_mark_pause();
1183 if (last_pause_included_initial_mark)
1184 record_concurrent_mark_init_end(0.0);
1186 size_t marking_initiating_used_threshold =
1187 (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
1189 if (!_g1->mark_in_progress() && !_last_full_young_gc) {
1190 assert(!last_pause_included_initial_mark, "invariant");
1191 if (cur_used_bytes > marking_initiating_used_threshold) {
1192 if (cur_used_bytes > _prev_collection_pause_used_at_end_bytes) {
1193 assert(!during_initial_mark_pause(), "we should not see this here");
1195 ergo_verbose3(ErgoConcCycles,
1196 "request concurrent cycle initiation",
1197 ergo_format_reason("occupancy higher than threshold")
1198 ergo_format_byte("occupancy")
1199 ergo_format_byte_perc("threshold"),
1200 cur_used_bytes,
1201 marking_initiating_used_threshold,
1202 (double) InitiatingHeapOccupancyPercent);
1204 // Note: this might have already been set, if during the last
1205 // pause we decided to start a cycle but at the beginning of
1206 // this pause we decided to postpone it. That's OK.
1207 set_initiate_conc_mark_if_possible();
1208 } else {
1209 ergo_verbose2(ErgoConcCycles,
1210 "do not request concurrent cycle initiation",
1211 ergo_format_reason("occupancy lower than previous occupancy")
1212 ergo_format_byte("occupancy")
1213 ergo_format_byte("previous occupancy"),
1214 cur_used_bytes,
1215 _prev_collection_pause_used_at_end_bytes);
1216 }
1217 }
1218 }
1220 _prev_collection_pause_used_at_end_bytes = cur_used_bytes;
1222 _mmu_tracker->add_pause(end_time_sec - elapsed_ms/1000.0,
1223 end_time_sec, false);
1225 guarantee(_cur_collection_pause_used_regions_at_start >=
1226 collection_set_size(),
1227 "Negative RS size?");
1229 // This assert is exempted when we're doing parallel collection pauses,
1230 // because the fragmentation caused by the parallel GC allocation buffers
1231 // can lead to more memory being used during collection than was used
1232 // before. Best leave this out until the fragmentation problem is fixed.
1233 // Pauses in which evacuation failed can also lead to negative
1234 // collections, since no space is reclaimed from a region containing an
1235 // object whose evacuation failed.
1236 // Further, we're now always doing parallel collection. But I'm still
1237 // leaving this here as a placeholder for a more precise assertion later.
1238 // (DLD, 10/05.)
1239 assert((true || parallel) // Always using GC LABs now.
1240 || _g1->evacuation_failed()
1241 || _cur_collection_pause_used_at_start_bytes >= cur_used_bytes,
1242 "Negative collection");
1244 size_t freed_bytes =
1245 _cur_collection_pause_used_at_start_bytes - cur_used_bytes;
1246 size_t surviving_bytes = _collection_set_bytes_used_before - freed_bytes;
1248 double survival_fraction =
1249 (double)surviving_bytes/
1250 (double)_collection_set_bytes_used_before;
1252 _n_pauses++;
1254 double ext_root_scan_time = avg_value(_par_last_ext_root_scan_times_ms);
1255 double mark_stack_scan_time = avg_value(_par_last_mark_stack_scan_times_ms);
1256 double update_rs_time = avg_value(_par_last_update_rs_times_ms);
1257 double update_rs_processed_buffers =
1258 sum_of_values(_par_last_update_rs_processed_buffers);
1259 double scan_rs_time = avg_value(_par_last_scan_rs_times_ms);
1260 double obj_copy_time = avg_value(_par_last_obj_copy_times_ms);
1261 double termination_time = avg_value(_par_last_termination_times_ms);
1263 double parallel_known_time = update_rs_time +
1264 ext_root_scan_time +
1265 mark_stack_scan_time +
1266 scan_rs_time +
1267 obj_copy_time +
1268 termination_time;
1270 double parallel_other_time = _cur_collection_par_time_ms - parallel_known_time;
1272 PauseSummary* summary = _summary;
1274 if (update_stats) {
1275 _recent_rs_scan_times_ms->add(scan_rs_time);
1276 _recent_pause_times_ms->add(elapsed_ms);
1277 _recent_rs_sizes->add(rs_size);
1279 MainBodySummary* body_summary = summary->main_body_summary();
1280 guarantee(body_summary != NULL, "should not be null!");
1282 if (_satb_drain_time_set)
1283 body_summary->record_satb_drain_time_ms(_cur_satb_drain_time_ms);
1284 else
1285 body_summary->record_satb_drain_time_ms(0.0);
1287 body_summary->record_ext_root_scan_time_ms(ext_root_scan_time);
1288 body_summary->record_mark_stack_scan_time_ms(mark_stack_scan_time);
1289 body_summary->record_update_rs_time_ms(update_rs_time);
1290 body_summary->record_scan_rs_time_ms(scan_rs_time);
1291 body_summary->record_obj_copy_time_ms(obj_copy_time);
1292 if (parallel) {
1293 body_summary->record_parallel_time_ms(_cur_collection_par_time_ms);
1294 body_summary->record_clear_ct_time_ms(_cur_clear_ct_time_ms);
1295 body_summary->record_termination_time_ms(termination_time);
1296 body_summary->record_parallel_other_time_ms(parallel_other_time);
1297 }
1298 body_summary->record_mark_closure_time_ms(_mark_closure_time_ms);
1300 // We exempt parallel collection from this check because Alloc Buffer
1301 // fragmentation can produce negative collections. Same with evac
1302 // failure.
1303 // Further, we're now always doing parallel collection. But I'm still
1304 // leaving this here as a placeholder for a more precise assertion later.
1305 // (DLD, 10/05.
1306 assert((true || parallel)
1307 || _g1->evacuation_failed()
1308 || surviving_bytes <= _collection_set_bytes_used_before,
1309 "Or else negative collection!");
1310 _recent_CS_bytes_used_before->add(_collection_set_bytes_used_before);
1311 _recent_CS_bytes_surviving->add(surviving_bytes);
1313 // this is where we update the allocation rate of the application
1314 double app_time_ms =
1315 (_cur_collection_start_sec * 1000.0 - _prev_collection_pause_end_ms);
1316 if (app_time_ms < MIN_TIMER_GRANULARITY) {
1317 // This usually happens due to the timer not having the required
1318 // granularity. Some Linuxes are the usual culprits.
1319 // We'll just set it to something (arbitrarily) small.
1320 app_time_ms = 1.0;
1321 }
1322 size_t regions_allocated =
1323 (_region_num_young - _prev_region_num_young) +
1324 (_region_num_tenured - _prev_region_num_tenured);
1325 double alloc_rate_ms = (double) regions_allocated / app_time_ms;
1326 _alloc_rate_ms_seq->add(alloc_rate_ms);
1327 _prev_region_num_young = _region_num_young;
1328 _prev_region_num_tenured = _region_num_tenured;
1330 double interval_ms =
1331 (end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0;
1332 update_recent_gc_times(end_time_sec, elapsed_ms);
1333 _recent_avg_pause_time_ratio = _recent_gc_times_ms->sum()/interval_ms;
1334 if (recent_avg_pause_time_ratio() < 0.0 ||
1335 (recent_avg_pause_time_ratio() - 1.0 > 0.0)) {
1336 #ifndef PRODUCT
1337 // Dump info to allow post-facto debugging
1338 gclog_or_tty->print_cr("recent_avg_pause_time_ratio() out of bounds");
1339 gclog_or_tty->print_cr("-------------------------------------------");
1340 gclog_or_tty->print_cr("Recent GC Times (ms):");
1341 _recent_gc_times_ms->dump();
1342 gclog_or_tty->print_cr("(End Time=%3.3f) Recent GC End Times (s):", end_time_sec);
1343 _recent_prev_end_times_for_all_gcs_sec->dump();
1344 gclog_or_tty->print_cr("GC = %3.3f, Interval = %3.3f, Ratio = %3.3f",
1345 _recent_gc_times_ms->sum(), interval_ms, recent_avg_pause_time_ratio());
1346 // In debug mode, terminate the JVM if the user wants to debug at this point.
1347 assert(!G1FailOnFPError, "Debugging data for CR 6898948 has been dumped above");
1348 #endif // !PRODUCT
1349 // Clip ratio between 0.0 and 1.0, and continue. This will be fixed in
1350 // CR 6902692 by redoing the manner in which the ratio is incrementally computed.
1351 if (_recent_avg_pause_time_ratio < 0.0) {
1352 _recent_avg_pause_time_ratio = 0.0;
1353 } else {
1354 assert(_recent_avg_pause_time_ratio - 1.0 > 0.0, "Ctl-point invariant");
1355 _recent_avg_pause_time_ratio = 1.0;
1356 }
1357 }
1358 }
1360 if (G1PolicyVerbose > 1) {
1361 gclog_or_tty->print_cr(" Recording collection pause(%d)", _n_pauses);
1362 }
1364 if (G1PolicyVerbose > 1) {
1365 gclog_or_tty->print_cr(" ET: %10.6f ms (avg: %10.6f ms)\n"
1366 " ET-RS: %10.6f ms (avg: %10.6f ms)\n"
1367 " |RS|: " SIZE_FORMAT,
1368 elapsed_ms, recent_avg_time_for_pauses_ms(),
1369 scan_rs_time, recent_avg_time_for_rs_scan_ms(),
1370 rs_size);
1372 gclog_or_tty->print_cr(" Used at start: " SIZE_FORMAT"K"
1373 " At end " SIZE_FORMAT "K\n"
1374 " garbage : " SIZE_FORMAT "K"
1375 " of " SIZE_FORMAT "K\n"
1376 " survival : %6.2f%% (%6.2f%% avg)",
1377 _cur_collection_pause_used_at_start_bytes/K,
1378 _g1->used()/K, freed_bytes/K,
1379 _collection_set_bytes_used_before/K,
1380 survival_fraction*100.0,
1381 recent_avg_survival_fraction()*100.0);
1382 gclog_or_tty->print_cr(" Recent %% gc pause time: %6.2f",
1383 recent_avg_pause_time_ratio() * 100.0);
1384 }
1386 double other_time_ms = elapsed_ms;
1388 if (_satb_drain_time_set) {
1389 other_time_ms -= _cur_satb_drain_time_ms;
1390 }
1392 if (parallel) {
1393 other_time_ms -= _cur_collection_par_time_ms + _cur_clear_ct_time_ms;
1394 } else {
1395 other_time_ms -=
1396 update_rs_time +
1397 ext_root_scan_time + mark_stack_scan_time +
1398 scan_rs_time + obj_copy_time;
1399 }
1401 if (PrintGCDetails) {
1402 gclog_or_tty->print_cr("%s, %1.8lf secs]",
1403 (last_pause_included_initial_mark) ? " (initial-mark)" : "",
1404 elapsed_ms / 1000.0);
1406 if (_satb_drain_time_set) {
1407 print_stats(1, "SATB Drain Time", _cur_satb_drain_time_ms);
1408 }
1409 if (_last_satb_drain_processed_buffers >= 0) {
1410 print_stats(2, "Processed Buffers", _last_satb_drain_processed_buffers);
1411 }
1412 if (parallel) {
1413 print_stats(1, "Parallel Time", _cur_collection_par_time_ms);
1414 print_par_stats(2, "GC Worker Start Time", _par_last_gc_worker_start_times_ms);
1415 print_par_stats(2, "Update RS", _par_last_update_rs_times_ms);
1416 print_par_sizes(3, "Processed Buffers", _par_last_update_rs_processed_buffers);
1417 print_par_stats(2, "Ext Root Scanning", _par_last_ext_root_scan_times_ms);
1418 print_par_stats(2, "Mark Stack Scanning", _par_last_mark_stack_scan_times_ms);
1419 print_par_stats(2, "Scan RS", _par_last_scan_rs_times_ms);
1420 print_par_stats(2, "Object Copy", _par_last_obj_copy_times_ms);
1421 print_par_stats(2, "Termination", _par_last_termination_times_ms);
1422 print_par_sizes(3, "Termination Attempts", _par_last_termination_attempts);
1423 print_par_stats(2, "GC Worker End Time", _par_last_gc_worker_end_times_ms);
1425 for (int i = 0; i < _parallel_gc_threads; i++) {
1426 _par_last_gc_worker_times_ms[i] = _par_last_gc_worker_end_times_ms[i] - _par_last_gc_worker_start_times_ms[i];
1427 }
1428 print_par_stats(2, "GC Worker Times", _par_last_gc_worker_times_ms);
1430 print_stats(2, "Parallel Other", parallel_other_time);
1431 print_stats(1, "Clear CT", _cur_clear_ct_time_ms);
1432 } else {
1433 print_stats(1, "Update RS", update_rs_time);
1434 print_stats(2, "Processed Buffers",
1435 (int)update_rs_processed_buffers);
1436 print_stats(1, "Ext Root Scanning", ext_root_scan_time);
1437 print_stats(1, "Mark Stack Scanning", mark_stack_scan_time);
1438 print_stats(1, "Scan RS", scan_rs_time);
1439 print_stats(1, "Object Copying", obj_copy_time);
1440 }
1441 #ifndef PRODUCT
1442 print_stats(1, "Cur Clear CC", _cur_clear_cc_time_ms);
1443 print_stats(1, "Cum Clear CC", _cum_clear_cc_time_ms);
1444 print_stats(1, "Min Clear CC", _min_clear_cc_time_ms);
1445 print_stats(1, "Max Clear CC", _max_clear_cc_time_ms);
1446 if (_num_cc_clears > 0) {
1447 print_stats(1, "Avg Clear CC", _cum_clear_cc_time_ms / ((double)_num_cc_clears));
1448 }
1449 #endif
1450 print_stats(1, "Other", other_time_ms);
1451 print_stats(2, "Choose CSet", _recorded_young_cset_choice_time_ms);
1452 print_stats(2, "Ref Proc", _cur_ref_proc_time_ms);
1453 print_stats(2, "Ref Enq", _cur_ref_enq_time_ms);
1455 for (int i = 0; i < _aux_num; ++i) {
1456 if (_cur_aux_times_set[i]) {
1457 char buffer[96];
1458 sprintf(buffer, "Aux%d", i);
1459 print_stats(1, buffer, _cur_aux_times_ms[i]);
1460 }
1461 }
1462 }
1464 _all_pause_times_ms->add(elapsed_ms);
1465 if (update_stats) {
1466 summary->record_total_time_ms(elapsed_ms);
1467 summary->record_other_time_ms(other_time_ms);
1468 }
1469 for (int i = 0; i < _aux_num; ++i)
1470 if (_cur_aux_times_set[i]) {
1471 _all_aux_times_ms[i].add(_cur_aux_times_ms[i]);
1472 }
1474 // Update the efficiency-since-mark vars.
1475 double proc_ms = elapsed_ms * (double) _parallel_gc_threads;
1476 if (elapsed_ms < MIN_TIMER_GRANULARITY) {
1477 // This usually happens due to the timer not having the required
1478 // granularity. Some Linuxes are the usual culprits.
1479 // We'll just set it to something (arbitrarily) small.
1480 proc_ms = 1.0;
1481 }
1482 double cur_efficiency = (double) freed_bytes / proc_ms;
1484 bool new_in_marking_window = _in_marking_window;
1485 bool new_in_marking_window_im = false;
1486 if (during_initial_mark_pause()) {
1487 new_in_marking_window = true;
1488 new_in_marking_window_im = true;
1489 }
1491 if (_last_full_young_gc) {
1492 if (!last_pause_included_initial_mark) {
1493 ergo_verbose2(ErgoPartiallyYoungGCs,
1494 "start partially-young GCs",
1495 ergo_format_byte_perc("known garbage"),
1496 _known_garbage_bytes, _known_garbage_ratio * 100.0);
1497 set_full_young_gcs(false);
1498 } else {
1499 ergo_verbose0(ErgoPartiallyYoungGCs,
1500 "do not start partially-young GCs",
1501 ergo_format_reason("concurrent cycle is about to start"));
1502 }
1503 _last_full_young_gc = false;
1504 }
1506 if ( !_last_young_gc_full ) {
1507 if (_should_revert_to_full_young_gcs) {
1508 ergo_verbose2(ErgoPartiallyYoungGCs,
1509 "end partially-young GCs",
1510 ergo_format_reason("partially-young GCs end requested")
1511 ergo_format_byte_perc("known garbage"),
1512 _known_garbage_bytes, _known_garbage_ratio * 100.0);
1513 set_full_young_gcs(true);
1514 } else if (_known_garbage_ratio < 0.05) {
1515 ergo_verbose3(ErgoPartiallyYoungGCs,
1516 "end partially-young GCs",
1517 ergo_format_reason("known garbage percent lower than threshold")
1518 ergo_format_byte_perc("known garbage")
1519 ergo_format_perc("threshold"),
1520 _known_garbage_bytes, _known_garbage_ratio * 100.0,
1521 0.05 * 100.0);
1522 set_full_young_gcs(true);
1523 } else if (adaptive_young_list_length() &&
1524 (get_gc_eff_factor() * cur_efficiency < predict_young_gc_eff())) {
1525 ergo_verbose5(ErgoPartiallyYoungGCs,
1526 "end partially-young GCs",
1527 ergo_format_reason("current GC efficiency lower than "
1528 "predicted fully-young GC efficiency")
1529 ergo_format_double("GC efficiency factor")
1530 ergo_format_double("current GC efficiency")
1531 ergo_format_double("predicted fully-young GC efficiency")
1532 ergo_format_byte_perc("known garbage"),
1533 get_gc_eff_factor(), cur_efficiency,
1534 predict_young_gc_eff(),
1535 _known_garbage_bytes, _known_garbage_ratio * 100.0);
1536 set_full_young_gcs(true);
1537 }
1538 }
1539 _should_revert_to_full_young_gcs = false;
1541 if (_last_young_gc_full && !_during_marking) {
1542 _young_gc_eff_seq->add(cur_efficiency);
1543 }
1545 _short_lived_surv_rate_group->start_adding_regions();
1546 // do that for any other surv rate groupsx
1548 // <NEW PREDICTION>
1550 if (update_stats) {
1551 double pause_time_ms = elapsed_ms;
1553 size_t diff = 0;
1554 if (_max_pending_cards >= _pending_cards)
1555 diff = _max_pending_cards - _pending_cards;
1556 _pending_card_diff_seq->add((double) diff);
1558 double cost_per_card_ms = 0.0;
1559 if (_pending_cards > 0) {
1560 cost_per_card_ms = update_rs_time / (double) _pending_cards;
1561 _cost_per_card_ms_seq->add(cost_per_card_ms);
1562 }
1564 size_t cards_scanned = _g1->cards_scanned();
1566 double cost_per_entry_ms = 0.0;
1567 if (cards_scanned > 10) {
1568 cost_per_entry_ms = scan_rs_time / (double) cards_scanned;
1569 if (_last_young_gc_full)
1570 _cost_per_entry_ms_seq->add(cost_per_entry_ms);
1571 else
1572 _partially_young_cost_per_entry_ms_seq->add(cost_per_entry_ms);
1573 }
1575 if (_max_rs_lengths > 0) {
1576 double cards_per_entry_ratio =
1577 (double) cards_scanned / (double) _max_rs_lengths;
1578 if (_last_young_gc_full)
1579 _fully_young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
1580 else
1581 _partially_young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
1582 }
1584 size_t rs_length_diff = _max_rs_lengths - _recorded_rs_lengths;
1585 if (rs_length_diff >= 0)
1586 _rs_length_diff_seq->add((double) rs_length_diff);
1588 size_t copied_bytes = surviving_bytes;
1589 double cost_per_byte_ms = 0.0;
1590 if (copied_bytes > 0) {
1591 cost_per_byte_ms = obj_copy_time / (double) copied_bytes;
1592 if (_in_marking_window)
1593 _cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms);
1594 else
1595 _cost_per_byte_ms_seq->add(cost_per_byte_ms);
1596 }
1598 double all_other_time_ms = pause_time_ms -
1599 (update_rs_time + scan_rs_time + obj_copy_time +
1600 _mark_closure_time_ms + termination_time);
1602 double young_other_time_ms = 0.0;
1603 if (_recorded_young_regions > 0) {
1604 young_other_time_ms =
1605 _recorded_young_cset_choice_time_ms +
1606 _recorded_young_free_cset_time_ms;
1607 _young_other_cost_per_region_ms_seq->add(young_other_time_ms /
1608 (double) _recorded_young_regions);
1609 }
1610 double non_young_other_time_ms = 0.0;
1611 if (_recorded_non_young_regions > 0) {
1612 non_young_other_time_ms =
1613 _recorded_non_young_cset_choice_time_ms +
1614 _recorded_non_young_free_cset_time_ms;
1616 _non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms /
1617 (double) _recorded_non_young_regions);
1618 }
1620 double constant_other_time_ms = all_other_time_ms -
1621 (young_other_time_ms + non_young_other_time_ms);
1622 _constant_other_time_ms_seq->add(constant_other_time_ms);
1624 double survival_ratio = 0.0;
1625 if (_bytes_in_collection_set_before_gc > 0) {
1626 survival_ratio = (double) _bytes_copied_during_gc /
1627 (double) _bytes_in_collection_set_before_gc;
1628 }
1630 _pending_cards_seq->add((double) _pending_cards);
1631 _scanned_cards_seq->add((double) cards_scanned);
1632 _rs_lengths_seq->add((double) _max_rs_lengths);
1634 double expensive_region_limit_ms =
1635 (double) MaxGCPauseMillis - predict_constant_other_time_ms();
1636 if (expensive_region_limit_ms < 0.0) {
1637 // this means that the other time was predicted to be longer than
1638 // than the max pause time
1639 expensive_region_limit_ms = (double) MaxGCPauseMillis;
1640 }
1641 _expensive_region_limit_ms = expensive_region_limit_ms;
1643 if (PREDICTIONS_VERBOSE) {
1644 gclog_or_tty->print_cr("");
1645 gclog_or_tty->print_cr("PREDICTIONS %1.4lf %d "
1646 "REGIONS %d %d %d "
1647 "PENDING_CARDS %d %d "
1648 "CARDS_SCANNED %d %d "
1649 "RS_LENGTHS %d %d "
1650 "RS_UPDATE %1.6lf %1.6lf RS_SCAN %1.6lf %1.6lf "
1651 "SURVIVAL_RATIO %1.6lf %1.6lf "
1652 "OBJECT_COPY %1.6lf %1.6lf OTHER_CONSTANT %1.6lf %1.6lf "
1653 "OTHER_YOUNG %1.6lf %1.6lf "
1654 "OTHER_NON_YOUNG %1.6lf %1.6lf "
1655 "VTIME_DIFF %1.6lf TERMINATION %1.6lf "
1656 "ELAPSED %1.6lf %1.6lf ",
1657 _cur_collection_start_sec,
1658 (!_last_young_gc_full) ? 2 :
1659 (last_pause_included_initial_mark) ? 1 : 0,
1660 _recorded_region_num,
1661 _recorded_young_regions,
1662 _recorded_non_young_regions,
1663 _predicted_pending_cards, _pending_cards,
1664 _predicted_cards_scanned, cards_scanned,
1665 _predicted_rs_lengths, _max_rs_lengths,
1666 _predicted_rs_update_time_ms, update_rs_time,
1667 _predicted_rs_scan_time_ms, scan_rs_time,
1668 _predicted_survival_ratio, survival_ratio,
1669 _predicted_object_copy_time_ms, obj_copy_time,
1670 _predicted_constant_other_time_ms, constant_other_time_ms,
1671 _predicted_young_other_time_ms, young_other_time_ms,
1672 _predicted_non_young_other_time_ms,
1673 non_young_other_time_ms,
1674 _vtime_diff_ms, termination_time,
1675 _predicted_pause_time_ms, elapsed_ms);
1676 }
1678 if (G1PolicyVerbose > 0) {
1679 gclog_or_tty->print_cr("Pause Time, predicted: %1.4lfms (predicted %s), actual: %1.4lfms",
1680 _predicted_pause_time_ms,
1681 (_within_target) ? "within" : "outside",
1682 elapsed_ms);
1683 }
1685 }
1687 _in_marking_window = new_in_marking_window;
1688 _in_marking_window_im = new_in_marking_window_im;
1689 _free_regions_at_end_of_collection = _g1->free_regions();
1690 update_young_list_target_length();
1692 // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
1693 double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
1694 adjust_concurrent_refinement(update_rs_time, update_rs_processed_buffers, update_rs_time_goal_ms);
1695 // </NEW PREDICTION>
1697 assert(assertMarkedBytesDataOK(), "Marked regions not OK at pause end.");
1698 }
1700 #define EXT_SIZE_FORMAT "%d%s"
1701 #define EXT_SIZE_PARAMS(bytes) \
1702 byte_size_in_proper_unit((bytes)), \
1703 proper_unit_for_byte_size((bytes))
1705 void G1CollectorPolicy::print_heap_transition() {
1706 if (PrintGCDetails) {
1707 YoungList* young_list = _g1->young_list();
1708 size_t eden_bytes = young_list->eden_used_bytes();
1709 size_t survivor_bytes = young_list->survivor_used_bytes();
1710 size_t used_before_gc = _cur_collection_pause_used_at_start_bytes;
1711 size_t used = _g1->used();
1712 size_t capacity = _g1->capacity();
1713 size_t eden_capacity =
1714 (_young_list_target_length * HeapRegion::GrainBytes) - survivor_bytes;
1716 gclog_or_tty->print_cr(
1717 " [Eden: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->"EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT") "
1718 "Survivors: "EXT_SIZE_FORMAT"->"EXT_SIZE_FORMAT" "
1719 "Heap: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->"
1720 EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")]",
1721 EXT_SIZE_PARAMS(_eden_bytes_before_gc),
1722 EXT_SIZE_PARAMS(_prev_eden_capacity),
1723 EXT_SIZE_PARAMS(eden_bytes),
1724 EXT_SIZE_PARAMS(eden_capacity),
1725 EXT_SIZE_PARAMS(_survivor_bytes_before_gc),
1726 EXT_SIZE_PARAMS(survivor_bytes),
1727 EXT_SIZE_PARAMS(used_before_gc),
1728 EXT_SIZE_PARAMS(_capacity_before_gc),
1729 EXT_SIZE_PARAMS(used),
1730 EXT_SIZE_PARAMS(capacity));
1732 _prev_eden_capacity = eden_capacity;
1733 } else if (PrintGC) {
1734 _g1->print_size_transition(gclog_or_tty,
1735 _cur_collection_pause_used_at_start_bytes,
1736 _g1->used(), _g1->capacity());
1737 }
1738 }
1740 // <NEW PREDICTION>
1742 void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time,
1743 double update_rs_processed_buffers,
1744 double goal_ms) {
1745 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
1746 ConcurrentG1Refine *cg1r = G1CollectedHeap::heap()->concurrent_g1_refine();
1748 if (G1UseAdaptiveConcRefinement) {
1749 const int k_gy = 3, k_gr = 6;
1750 const double inc_k = 1.1, dec_k = 0.9;
1752 int g = cg1r->green_zone();
1753 if (update_rs_time > goal_ms) {
1754 g = (int)(g * dec_k); // Can become 0, that's OK. That would mean a mutator-only processing.
1755 } else {
1756 if (update_rs_time < goal_ms && update_rs_processed_buffers > g) {
1757 g = (int)MAX2(g * inc_k, g + 1.0);
1758 }
1759 }
1760 // Change the refinement threads params
1761 cg1r->set_green_zone(g);
1762 cg1r->set_yellow_zone(g * k_gy);
1763 cg1r->set_red_zone(g * k_gr);
1764 cg1r->reinitialize_threads();
1766 int processing_threshold_delta = MAX2((int)(cg1r->green_zone() * sigma()), 1);
1767 int processing_threshold = MIN2(cg1r->green_zone() + processing_threshold_delta,
1768 cg1r->yellow_zone());
1769 // Change the barrier params
1770 dcqs.set_process_completed_threshold(processing_threshold);
1771 dcqs.set_max_completed_queue(cg1r->red_zone());
1772 }
1774 int curr_queue_size = dcqs.completed_buffers_num();
1775 if (curr_queue_size >= cg1r->yellow_zone()) {
1776 dcqs.set_completed_queue_padding(curr_queue_size);
1777 } else {
1778 dcqs.set_completed_queue_padding(0);
1779 }
1780 dcqs.notify_if_necessary();
1781 }
1783 double
1784 G1CollectorPolicy::
1785 predict_young_collection_elapsed_time_ms(size_t adjustment) {
1786 guarantee( adjustment == 0 || adjustment == 1, "invariant" );
1788 G1CollectedHeap* g1h = G1CollectedHeap::heap();
1789 size_t young_num = g1h->young_list()->length();
1790 if (young_num == 0)
1791 return 0.0;
1793 young_num += adjustment;
1794 size_t pending_cards = predict_pending_cards();
1795 size_t rs_lengths = g1h->young_list()->sampled_rs_lengths() +
1796 predict_rs_length_diff();
1797 size_t card_num;
1798 if (full_young_gcs())
1799 card_num = predict_young_card_num(rs_lengths);
1800 else
1801 card_num = predict_non_young_card_num(rs_lengths);
1802 size_t young_byte_size = young_num * HeapRegion::GrainBytes;
1803 double accum_yg_surv_rate =
1804 _short_lived_surv_rate_group->accum_surv_rate(adjustment);
1806 size_t bytes_to_copy =
1807 (size_t) (accum_yg_surv_rate * (double) HeapRegion::GrainBytes);
1809 return
1810 predict_rs_update_time_ms(pending_cards) +
1811 predict_rs_scan_time_ms(card_num) +
1812 predict_object_copy_time_ms(bytes_to_copy) +
1813 predict_young_other_time_ms(young_num) +
1814 predict_constant_other_time_ms();
1815 }
1817 double
1818 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) {
1819 size_t rs_length = predict_rs_length_diff();
1820 size_t card_num;
1821 if (full_young_gcs())
1822 card_num = predict_young_card_num(rs_length);
1823 else
1824 card_num = predict_non_young_card_num(rs_length);
1825 return predict_base_elapsed_time_ms(pending_cards, card_num);
1826 }
1828 double
1829 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards,
1830 size_t scanned_cards) {
1831 return
1832 predict_rs_update_time_ms(pending_cards) +
1833 predict_rs_scan_time_ms(scanned_cards) +
1834 predict_constant_other_time_ms();
1835 }
1837 double
1838 G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr,
1839 bool young) {
1840 size_t rs_length = hr->rem_set()->occupied();
1841 size_t card_num;
1842 if (full_young_gcs())
1843 card_num = predict_young_card_num(rs_length);
1844 else
1845 card_num = predict_non_young_card_num(rs_length);
1846 size_t bytes_to_copy = predict_bytes_to_copy(hr);
1848 double region_elapsed_time_ms =
1849 predict_rs_scan_time_ms(card_num) +
1850 predict_object_copy_time_ms(bytes_to_copy);
1852 if (young)
1853 region_elapsed_time_ms += predict_young_other_time_ms(1);
1854 else
1855 region_elapsed_time_ms += predict_non_young_other_time_ms(1);
1857 return region_elapsed_time_ms;
1858 }
1860 size_t
1861 G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) {
1862 size_t bytes_to_copy;
1863 if (hr->is_marked())
1864 bytes_to_copy = hr->max_live_bytes();
1865 else {
1866 guarantee( hr->is_young() && hr->age_in_surv_rate_group() != -1,
1867 "invariant" );
1868 int age = hr->age_in_surv_rate_group();
1869 double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group());
1870 bytes_to_copy = (size_t) ((double) hr->used() * yg_surv_rate);
1871 }
1873 return bytes_to_copy;
1874 }
1876 void
1877 G1CollectorPolicy::start_recording_regions() {
1878 _recorded_rs_lengths = 0;
1879 _recorded_young_regions = 0;
1880 _recorded_non_young_regions = 0;
1882 #if PREDICTIONS_VERBOSE
1883 _recorded_marked_bytes = 0;
1884 _recorded_young_bytes = 0;
1885 _predicted_bytes_to_copy = 0;
1886 _predicted_rs_lengths = 0;
1887 _predicted_cards_scanned = 0;
1888 #endif // PREDICTIONS_VERBOSE
1889 }
1891 void
1892 G1CollectorPolicy::record_cset_region_info(HeapRegion* hr, bool young) {
1893 #if PREDICTIONS_VERBOSE
1894 if (!young) {
1895 _recorded_marked_bytes += hr->max_live_bytes();
1896 }
1897 _predicted_bytes_to_copy += predict_bytes_to_copy(hr);
1898 #endif // PREDICTIONS_VERBOSE
1900 size_t rs_length = hr->rem_set()->occupied();
1901 _recorded_rs_lengths += rs_length;
1902 }
1904 void
1905 G1CollectorPolicy::record_non_young_cset_region(HeapRegion* hr) {
1906 assert(!hr->is_young(), "should not call this");
1907 ++_recorded_non_young_regions;
1908 record_cset_region_info(hr, false);
1909 }
1911 void
1912 G1CollectorPolicy::set_recorded_young_regions(size_t n_regions) {
1913 _recorded_young_regions = n_regions;
1914 }
1916 void G1CollectorPolicy::set_recorded_young_bytes(size_t bytes) {
1917 #if PREDICTIONS_VERBOSE
1918 _recorded_young_bytes = bytes;
1919 #endif // PREDICTIONS_VERBOSE
1920 }
1922 void G1CollectorPolicy::set_recorded_rs_lengths(size_t rs_lengths) {
1923 _recorded_rs_lengths = rs_lengths;
1924 }
1926 void G1CollectorPolicy::set_predicted_bytes_to_copy(size_t bytes) {
1927 _predicted_bytes_to_copy = bytes;
1928 }
1930 void
1931 G1CollectorPolicy::end_recording_regions() {
1932 // The _predicted_pause_time_ms field is referenced in code
1933 // not under PREDICTIONS_VERBOSE. Let's initialize it.
1934 _predicted_pause_time_ms = -1.0;
1936 #if PREDICTIONS_VERBOSE
1937 _predicted_pending_cards = predict_pending_cards();
1938 _predicted_rs_lengths = _recorded_rs_lengths + predict_rs_length_diff();
1939 if (full_young_gcs())
1940 _predicted_cards_scanned += predict_young_card_num(_predicted_rs_lengths);
1941 else
1942 _predicted_cards_scanned +=
1943 predict_non_young_card_num(_predicted_rs_lengths);
1944 _recorded_region_num = _recorded_young_regions + _recorded_non_young_regions;
1946 _predicted_rs_update_time_ms =
1947 predict_rs_update_time_ms(_g1->pending_card_num());
1948 _predicted_rs_scan_time_ms =
1949 predict_rs_scan_time_ms(_predicted_cards_scanned);
1950 _predicted_object_copy_time_ms =
1951 predict_object_copy_time_ms(_predicted_bytes_to_copy);
1952 _predicted_constant_other_time_ms =
1953 predict_constant_other_time_ms();
1954 _predicted_young_other_time_ms =
1955 predict_young_other_time_ms(_recorded_young_regions);
1956 _predicted_non_young_other_time_ms =
1957 predict_non_young_other_time_ms(_recorded_non_young_regions);
1959 _predicted_pause_time_ms =
1960 _predicted_rs_update_time_ms +
1961 _predicted_rs_scan_time_ms +
1962 _predicted_object_copy_time_ms +
1963 _predicted_constant_other_time_ms +
1964 _predicted_young_other_time_ms +
1965 _predicted_non_young_other_time_ms;
1966 #endif // PREDICTIONS_VERBOSE
1967 }
1969 void G1CollectorPolicy::check_if_region_is_too_expensive(double
1970 predicted_time_ms) {
1971 // I don't think we need to do this when in young GC mode since
1972 // marking will be initiated next time we hit the soft limit anyway...
1973 if (predicted_time_ms > _expensive_region_limit_ms) {
1974 ergo_verbose2(ErgoPartiallyYoungGCs,
1975 "request partially-young GCs end",
1976 ergo_format_reason("predicted region time higher than threshold")
1977 ergo_format_ms("predicted region time")
1978 ergo_format_ms("threshold"),
1979 predicted_time_ms, _expensive_region_limit_ms);
1980 // no point in doing another partial one
1981 _should_revert_to_full_young_gcs = true;
1982 }
1983 }
1985 // </NEW PREDICTION>
1988 void G1CollectorPolicy::update_recent_gc_times(double end_time_sec,
1989 double elapsed_ms) {
1990 _recent_gc_times_ms->add(elapsed_ms);
1991 _recent_prev_end_times_for_all_gcs_sec->add(end_time_sec);
1992 _prev_collection_pause_end_ms = end_time_sec * 1000.0;
1993 }
1995 double G1CollectorPolicy::recent_avg_time_for_pauses_ms() {
1996 if (_recent_pause_times_ms->num() == 0) {
1997 return (double) MaxGCPauseMillis;
1998 }
1999 return _recent_pause_times_ms->avg();
2000 }
2002 double G1CollectorPolicy::recent_avg_time_for_rs_scan_ms() {
2003 if (_recent_rs_scan_times_ms->num() == 0) {
2004 return (double)MaxGCPauseMillis/3.0;
2005 }
2006 return _recent_rs_scan_times_ms->avg();
2007 }
2009 int G1CollectorPolicy::number_of_recent_gcs() {
2010 assert(_recent_rs_scan_times_ms->num() ==
2011 _recent_pause_times_ms->num(), "Sequence out of sync");
2012 assert(_recent_pause_times_ms->num() ==
2013 _recent_CS_bytes_used_before->num(), "Sequence out of sync");
2014 assert(_recent_CS_bytes_used_before->num() ==
2015 _recent_CS_bytes_surviving->num(), "Sequence out of sync");
2017 return _recent_pause_times_ms->num();
2018 }
2020 double G1CollectorPolicy::recent_avg_survival_fraction() {
2021 return recent_avg_survival_fraction_work(_recent_CS_bytes_surviving,
2022 _recent_CS_bytes_used_before);
2023 }
2025 double G1CollectorPolicy::last_survival_fraction() {
2026 return last_survival_fraction_work(_recent_CS_bytes_surviving,
2027 _recent_CS_bytes_used_before);
2028 }
2030 double
2031 G1CollectorPolicy::recent_avg_survival_fraction_work(TruncatedSeq* surviving,
2032 TruncatedSeq* before) {
2033 assert(surviving->num() == before->num(), "Sequence out of sync");
2034 if (before->sum() > 0.0) {
2035 double recent_survival_rate = surviving->sum() / before->sum();
2036 // We exempt parallel collection from this check because Alloc Buffer
2037 // fragmentation can produce negative collections.
2038 // Further, we're now always doing parallel collection. But I'm still
2039 // leaving this here as a placeholder for a more precise assertion later.
2040 // (DLD, 10/05.)
2041 assert((true || G1CollectedHeap::use_parallel_gc_threads()) ||
2042 _g1->evacuation_failed() ||
2043 recent_survival_rate <= 1.0, "Or bad frac");
2044 return recent_survival_rate;
2045 } else {
2046 return 1.0; // Be conservative.
2047 }
2048 }
2050 double
2051 G1CollectorPolicy::last_survival_fraction_work(TruncatedSeq* surviving,
2052 TruncatedSeq* before) {
2053 assert(surviving->num() == before->num(), "Sequence out of sync");
2054 if (surviving->num() > 0 && before->last() > 0.0) {
2055 double last_survival_rate = surviving->last() / before->last();
2056 // We exempt parallel collection from this check because Alloc Buffer
2057 // fragmentation can produce negative collections.
2058 // Further, we're now always doing parallel collection. But I'm still
2059 // leaving this here as a placeholder for a more precise assertion later.
2060 // (DLD, 10/05.)
2061 assert((true || G1CollectedHeap::use_parallel_gc_threads()) ||
2062 last_survival_rate <= 1.0, "Or bad frac");
2063 return last_survival_rate;
2064 } else {
2065 return 1.0;
2066 }
2067 }
2069 static const int survival_min_obs = 5;
2070 static double survival_min_obs_limits[] = { 0.9, 0.7, 0.5, 0.3, 0.1 };
2071 static const double min_survival_rate = 0.1;
2073 double
2074 G1CollectorPolicy::conservative_avg_survival_fraction_work(double avg,
2075 double latest) {
2076 double res = avg;
2077 if (number_of_recent_gcs() < survival_min_obs) {
2078 res = MAX2(res, survival_min_obs_limits[number_of_recent_gcs()]);
2079 }
2080 res = MAX2(res, latest);
2081 res = MAX2(res, min_survival_rate);
2082 // In the parallel case, LAB fragmentation can produce "negative
2083 // collections"; so can evac failure. Cap at 1.0
2084 res = MIN2(res, 1.0);
2085 return res;
2086 }
2088 size_t G1CollectorPolicy::expansion_amount() {
2089 double recent_gc_overhead = recent_avg_pause_time_ratio() * 100.0;
2090 double threshold = _gc_overhead_perc;
2091 if (recent_gc_overhead > threshold) {
2092 // We will double the existing space, or take
2093 // G1ExpandByPercentOfAvailable % of the available expansion
2094 // space, whichever is smaller, bounded below by a minimum
2095 // expansion (unless that's all that's left.)
2096 const size_t min_expand_bytes = 1*M;
2097 size_t reserved_bytes = _g1->max_capacity();
2098 size_t committed_bytes = _g1->capacity();
2099 size_t uncommitted_bytes = reserved_bytes - committed_bytes;
2100 size_t expand_bytes;
2101 size_t expand_bytes_via_pct =
2102 uncommitted_bytes * G1ExpandByPercentOfAvailable / 100;
2103 expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes);
2104 expand_bytes = MAX2(expand_bytes, min_expand_bytes);
2105 expand_bytes = MIN2(expand_bytes, uncommitted_bytes);
2107 ergo_verbose5(ErgoHeapSizing,
2108 "attempt heap expansion",
2109 ergo_format_reason("recent GC overhead higher than "
2110 "threshold after GC")
2111 ergo_format_perc("recent GC overhead")
2112 ergo_format_perc("threshold")
2113 ergo_format_byte("uncommitted")
2114 ergo_format_byte_perc("calculated expansion amount"),
2115 recent_gc_overhead, threshold,
2116 uncommitted_bytes,
2117 expand_bytes_via_pct, (double) G1ExpandByPercentOfAvailable);
2119 return expand_bytes;
2120 } else {
2121 return 0;
2122 }
2123 }
2125 class CountCSClosure: public HeapRegionClosure {
2126 G1CollectorPolicy* _g1_policy;
2127 public:
2128 CountCSClosure(G1CollectorPolicy* g1_policy) :
2129 _g1_policy(g1_policy) {}
2130 bool doHeapRegion(HeapRegion* r) {
2131 _g1_policy->_bytes_in_collection_set_before_gc += r->used();
2132 return false;
2133 }
2134 };
2136 void G1CollectorPolicy::count_CS_bytes_used() {
2137 CountCSClosure cs_closure(this);
2138 _g1->collection_set_iterate(&cs_closure);
2139 }
2141 void G1CollectorPolicy::print_summary (int level,
2142 const char* str,
2143 NumberSeq* seq) const {
2144 double sum = seq->sum();
2145 LineBuffer(level + 1).append_and_print_cr("%-24s = %8.2lf s (avg = %8.2lf ms)",
2146 str, sum / 1000.0, seq->avg());
2147 }
2149 void G1CollectorPolicy::print_summary_sd (int level,
2150 const char* str,
2151 NumberSeq* seq) const {
2152 print_summary(level, str, seq);
2153 LineBuffer(level + 6).append_and_print_cr("(num = %5d, std dev = %8.2lf ms, max = %8.2lf ms)",
2154 seq->num(), seq->sd(), seq->maximum());
2155 }
2157 void G1CollectorPolicy::check_other_times(int level,
2158 NumberSeq* other_times_ms,
2159 NumberSeq* calc_other_times_ms) const {
2160 bool should_print = false;
2161 LineBuffer buf(level + 2);
2163 double max_sum = MAX2(fabs(other_times_ms->sum()),
2164 fabs(calc_other_times_ms->sum()));
2165 double min_sum = MIN2(fabs(other_times_ms->sum()),
2166 fabs(calc_other_times_ms->sum()));
2167 double sum_ratio = max_sum / min_sum;
2168 if (sum_ratio > 1.1) {
2169 should_print = true;
2170 buf.append_and_print_cr("## CALCULATED OTHER SUM DOESN'T MATCH RECORDED ###");
2171 }
2173 double max_avg = MAX2(fabs(other_times_ms->avg()),
2174 fabs(calc_other_times_ms->avg()));
2175 double min_avg = MIN2(fabs(other_times_ms->avg()),
2176 fabs(calc_other_times_ms->avg()));
2177 double avg_ratio = max_avg / min_avg;
2178 if (avg_ratio > 1.1) {
2179 should_print = true;
2180 buf.append_and_print_cr("## CALCULATED OTHER AVG DOESN'T MATCH RECORDED ###");
2181 }
2183 if (other_times_ms->sum() < -0.01) {
2184 buf.append_and_print_cr("## RECORDED OTHER SUM IS NEGATIVE ###");
2185 }
2187 if (other_times_ms->avg() < -0.01) {
2188 buf.append_and_print_cr("## RECORDED OTHER AVG IS NEGATIVE ###");
2189 }
2191 if (calc_other_times_ms->sum() < -0.01) {
2192 should_print = true;
2193 buf.append_and_print_cr("## CALCULATED OTHER SUM IS NEGATIVE ###");
2194 }
2196 if (calc_other_times_ms->avg() < -0.01) {
2197 should_print = true;
2198 buf.append_and_print_cr("## CALCULATED OTHER AVG IS NEGATIVE ###");
2199 }
2201 if (should_print)
2202 print_summary(level, "Other(Calc)", calc_other_times_ms);
2203 }
2205 void G1CollectorPolicy::print_summary(PauseSummary* summary) const {
2206 bool parallel = G1CollectedHeap::use_parallel_gc_threads();
2207 MainBodySummary* body_summary = summary->main_body_summary();
2208 if (summary->get_total_seq()->num() > 0) {
2209 print_summary_sd(0, "Evacuation Pauses", summary->get_total_seq());
2210 if (body_summary != NULL) {
2211 print_summary(1, "SATB Drain", body_summary->get_satb_drain_seq());
2212 if (parallel) {
2213 print_summary(1, "Parallel Time", body_summary->get_parallel_seq());
2214 print_summary(2, "Update RS", body_summary->get_update_rs_seq());
2215 print_summary(2, "Ext Root Scanning",
2216 body_summary->get_ext_root_scan_seq());
2217 print_summary(2, "Mark Stack Scanning",
2218 body_summary->get_mark_stack_scan_seq());
2219 print_summary(2, "Scan RS", body_summary->get_scan_rs_seq());
2220 print_summary(2, "Object Copy", body_summary->get_obj_copy_seq());
2221 print_summary(2, "Termination", body_summary->get_termination_seq());
2222 print_summary(2, "Other", body_summary->get_parallel_other_seq());
2223 {
2224 NumberSeq* other_parts[] = {
2225 body_summary->get_update_rs_seq(),
2226 body_summary->get_ext_root_scan_seq(),
2227 body_summary->get_mark_stack_scan_seq(),
2228 body_summary->get_scan_rs_seq(),
2229 body_summary->get_obj_copy_seq(),
2230 body_summary->get_termination_seq()
2231 };
2232 NumberSeq calc_other_times_ms(body_summary->get_parallel_seq(),
2233 6, other_parts);
2234 check_other_times(2, body_summary->get_parallel_other_seq(),
2235 &calc_other_times_ms);
2236 }
2237 print_summary(1, "Mark Closure", body_summary->get_mark_closure_seq());
2238 print_summary(1, "Clear CT", body_summary->get_clear_ct_seq());
2239 } else {
2240 print_summary(1, "Update RS", body_summary->get_update_rs_seq());
2241 print_summary(1, "Ext Root Scanning",
2242 body_summary->get_ext_root_scan_seq());
2243 print_summary(1, "Mark Stack Scanning",
2244 body_summary->get_mark_stack_scan_seq());
2245 print_summary(1, "Scan RS", body_summary->get_scan_rs_seq());
2246 print_summary(1, "Object Copy", body_summary->get_obj_copy_seq());
2247 }
2248 }
2249 print_summary(1, "Other", summary->get_other_seq());
2250 {
2251 if (body_summary != NULL) {
2252 NumberSeq calc_other_times_ms;
2253 if (parallel) {
2254 // parallel
2255 NumberSeq* other_parts[] = {
2256 body_summary->get_satb_drain_seq(),
2257 body_summary->get_parallel_seq(),
2258 body_summary->get_clear_ct_seq()
2259 };
2260 calc_other_times_ms = NumberSeq(summary->get_total_seq(),
2261 3, other_parts);
2262 } else {
2263 // serial
2264 NumberSeq* other_parts[] = {
2265 body_summary->get_satb_drain_seq(),
2266 body_summary->get_update_rs_seq(),
2267 body_summary->get_ext_root_scan_seq(),
2268 body_summary->get_mark_stack_scan_seq(),
2269 body_summary->get_scan_rs_seq(),
2270 body_summary->get_obj_copy_seq()
2271 };
2272 calc_other_times_ms = NumberSeq(summary->get_total_seq(),
2273 6, other_parts);
2274 }
2275 check_other_times(1, summary->get_other_seq(), &calc_other_times_ms);
2276 }
2277 }
2278 } else {
2279 LineBuffer(1).append_and_print_cr("none");
2280 }
2281 LineBuffer(0).append_and_print_cr("");
2282 }
2284 void G1CollectorPolicy::print_tracing_info() const {
2285 if (TraceGen0Time) {
2286 gclog_or_tty->print_cr("ALL PAUSES");
2287 print_summary_sd(0, "Total", _all_pause_times_ms);
2288 gclog_or_tty->print_cr("");
2289 gclog_or_tty->print_cr("");
2290 gclog_or_tty->print_cr(" Full Young GC Pauses: %8d", _full_young_pause_num);
2291 gclog_or_tty->print_cr(" Partial Young GC Pauses: %8d", _partial_young_pause_num);
2292 gclog_or_tty->print_cr("");
2294 gclog_or_tty->print_cr("EVACUATION PAUSES");
2295 print_summary(_summary);
2297 gclog_or_tty->print_cr("MISC");
2298 print_summary_sd(0, "Stop World", _all_stop_world_times_ms);
2299 print_summary_sd(0, "Yields", _all_yield_times_ms);
2300 for (int i = 0; i < _aux_num; ++i) {
2301 if (_all_aux_times_ms[i].num() > 0) {
2302 char buffer[96];
2303 sprintf(buffer, "Aux%d", i);
2304 print_summary_sd(0, buffer, &_all_aux_times_ms[i]);
2305 }
2306 }
2308 size_t all_region_num = _region_num_young + _region_num_tenured;
2309 gclog_or_tty->print_cr(" New Regions %8d, Young %8d (%6.2lf%%), "
2310 "Tenured %8d (%6.2lf%%)",
2311 all_region_num,
2312 _region_num_young,
2313 (double) _region_num_young / (double) all_region_num * 100.0,
2314 _region_num_tenured,
2315 (double) _region_num_tenured / (double) all_region_num * 100.0);
2316 }
2317 if (TraceGen1Time) {
2318 if (_all_full_gc_times_ms->num() > 0) {
2319 gclog_or_tty->print("\n%4d full_gcs: total time = %8.2f s",
2320 _all_full_gc_times_ms->num(),
2321 _all_full_gc_times_ms->sum() / 1000.0);
2322 gclog_or_tty->print_cr(" (avg = %8.2fms).", _all_full_gc_times_ms->avg());
2323 gclog_or_tty->print_cr(" [std. dev = %8.2f ms, max = %8.2f ms]",
2324 _all_full_gc_times_ms->sd(),
2325 _all_full_gc_times_ms->maximum());
2326 }
2327 }
2328 }
2330 void G1CollectorPolicy::print_yg_surv_rate_info() const {
2331 #ifndef PRODUCT
2332 _short_lived_surv_rate_group->print_surv_rate_summary();
2333 // add this call for any other surv rate groups
2334 #endif // PRODUCT
2335 }
2337 void G1CollectorPolicy::update_region_num(bool young) {
2338 if (young) {
2339 ++_region_num_young;
2340 } else {
2341 ++_region_num_tenured;
2342 }
2343 }
2345 #ifndef PRODUCT
2346 // for debugging, bit of a hack...
2347 static char*
2348 region_num_to_mbs(int length) {
2349 static char buffer[64];
2350 double bytes = (double) (length * HeapRegion::GrainBytes);
2351 double mbs = bytes / (double) (1024 * 1024);
2352 sprintf(buffer, "%7.2lfMB", mbs);
2353 return buffer;
2354 }
2355 #endif // PRODUCT
2357 size_t G1CollectorPolicy::max_regions(int purpose) {
2358 switch (purpose) {
2359 case GCAllocForSurvived:
2360 return _max_survivor_regions;
2361 case GCAllocForTenured:
2362 return REGIONS_UNLIMITED;
2363 default:
2364 ShouldNotReachHere();
2365 return REGIONS_UNLIMITED;
2366 };
2367 }
2369 void G1CollectorPolicy::update_max_gc_locker_expansion() {
2370 size_t expansion_region_num = 0;
2371 if (GCLockerEdenExpansionPercent > 0) {
2372 double perc = (double) GCLockerEdenExpansionPercent / 100.0;
2373 double expansion_region_num_d = perc * (double) _young_list_target_length;
2374 // We use ceiling so that if expansion_region_num_d is > 0.0 (but
2375 // less than 1.0) we'll get 1.
2376 expansion_region_num = (size_t) ceil(expansion_region_num_d);
2377 } else {
2378 assert(expansion_region_num == 0, "sanity");
2379 }
2380 _young_list_max_length = _young_list_target_length + expansion_region_num;
2381 assert(_young_list_target_length <= _young_list_max_length, "post-condition");
2382 }
2384 // Calculates survivor space parameters.
2385 void G1CollectorPolicy::update_survivors_policy() {
2386 double max_survivor_regions_d =
2387 (double) _young_list_target_length / (double) SurvivorRatio;
2388 // We use ceiling so that if max_survivor_regions_d is > 0.0 (but
2389 // smaller than 1.0) we'll get 1.
2390 _max_survivor_regions = (size_t) ceil(max_survivor_regions_d);
2392 _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(
2393 HeapRegion::GrainWords * _max_survivor_regions);
2394 }
2396 #ifndef PRODUCT
2397 class HRSortIndexIsOKClosure: public HeapRegionClosure {
2398 CollectionSetChooser* _chooser;
2399 public:
2400 HRSortIndexIsOKClosure(CollectionSetChooser* chooser) :
2401 _chooser(chooser) {}
2403 bool doHeapRegion(HeapRegion* r) {
2404 if (!r->continuesHumongous()) {
2405 assert(_chooser->regionProperlyOrdered(r), "Ought to be.");
2406 }
2407 return false;
2408 }
2409 };
2411 bool G1CollectorPolicy::assertMarkedBytesDataOK() {
2412 HRSortIndexIsOKClosure cl(_collectionSetChooser);
2413 _g1->heap_region_iterate(&cl);
2414 return true;
2415 }
2416 #endif
2418 bool G1CollectorPolicy::force_initial_mark_if_outside_cycle(
2419 GCCause::Cause gc_cause) {
2420 bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
2421 if (!during_cycle) {
2422 ergo_verbose1(ErgoConcCycles,
2423 "request concurrent cycle initiation",
2424 ergo_format_reason("requested by GC cause")
2425 ergo_format_str("GC cause"),
2426 GCCause::to_string(gc_cause));
2427 set_initiate_conc_mark_if_possible();
2428 return true;
2429 } else {
2430 ergo_verbose1(ErgoConcCycles,
2431 "do not request concurrent cycle initiation",
2432 ergo_format_reason("concurrent cycle already in progress")
2433 ergo_format_str("GC cause"),
2434 GCCause::to_string(gc_cause));
2435 return false;
2436 }
2437 }
2439 void
2440 G1CollectorPolicy::decide_on_conc_mark_initiation() {
2441 // We are about to decide on whether this pause will be an
2442 // initial-mark pause.
2444 // First, during_initial_mark_pause() should not be already set. We
2445 // will set it here if we have to. However, it should be cleared by
2446 // the end of the pause (it's only set for the duration of an
2447 // initial-mark pause).
2448 assert(!during_initial_mark_pause(), "pre-condition");
2450 if (initiate_conc_mark_if_possible()) {
2451 // We had noticed on a previous pause that the heap occupancy has
2452 // gone over the initiating threshold and we should start a
2453 // concurrent marking cycle. So we might initiate one.
2455 bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
2456 if (!during_cycle) {
2457 // The concurrent marking thread is not "during a cycle", i.e.,
2458 // it has completed the last one. So we can go ahead and
2459 // initiate a new cycle.
2461 set_during_initial_mark_pause();
2462 // We do not allow non-full young GCs during marking.
2463 if (!full_young_gcs()) {
2464 set_full_young_gcs(true);
2465 ergo_verbose0(ErgoPartiallyYoungGCs,
2466 "end partially-young GCs",
2467 ergo_format_reason("concurrent cycle is about to start"));
2468 }
2470 // And we can now clear initiate_conc_mark_if_possible() as
2471 // we've already acted on it.
2472 clear_initiate_conc_mark_if_possible();
2474 ergo_verbose0(ErgoConcCycles,
2475 "initiate concurrent cycle",
2476 ergo_format_reason("concurrent cycle initiation requested"));
2477 } else {
2478 // The concurrent marking thread is still finishing up the
2479 // previous cycle. If we start one right now the two cycles
2480 // overlap. In particular, the concurrent marking thread might
2481 // be in the process of clearing the next marking bitmap (which
2482 // we will use for the next cycle if we start one). Starting a
2483 // cycle now will be bad given that parts of the marking
2484 // information might get cleared by the marking thread. And we
2485 // cannot wait for the marking thread to finish the cycle as it
2486 // periodically yields while clearing the next marking bitmap
2487 // and, if it's in a yield point, it's waiting for us to
2488 // finish. So, at this point we will not start a cycle and we'll
2489 // let the concurrent marking thread complete the last one.
2490 ergo_verbose0(ErgoConcCycles,
2491 "do not initiate concurrent cycle",
2492 ergo_format_reason("concurrent cycle already in progress"));
2493 }
2494 }
2495 }
2497 class KnownGarbageClosure: public HeapRegionClosure {
2498 CollectionSetChooser* _hrSorted;
2500 public:
2501 KnownGarbageClosure(CollectionSetChooser* hrSorted) :
2502 _hrSorted(hrSorted)
2503 {}
2505 bool doHeapRegion(HeapRegion* r) {
2506 // We only include humongous regions in collection
2507 // sets when concurrent mark shows that their contained object is
2508 // unreachable.
2510 // Do we have any marking information for this region?
2511 if (r->is_marked()) {
2512 // We don't include humongous regions in collection
2513 // sets because we collect them immediately at the end of a marking
2514 // cycle. We also don't include young regions because we *must*
2515 // include them in the next collection pause.
2516 if (!r->isHumongous() && !r->is_young()) {
2517 _hrSorted->addMarkedHeapRegion(r);
2518 }
2519 }
2520 return false;
2521 }
2522 };
2524 class ParKnownGarbageHRClosure: public HeapRegionClosure {
2525 CollectionSetChooser* _hrSorted;
2526 jint _marked_regions_added;
2527 jint _chunk_size;
2528 jint _cur_chunk_idx;
2529 jint _cur_chunk_end; // Cur chunk [_cur_chunk_idx, _cur_chunk_end)
2530 int _worker;
2531 int _invokes;
2533 void get_new_chunk() {
2534 _cur_chunk_idx = _hrSorted->getParMarkedHeapRegionChunk(_chunk_size);
2535 _cur_chunk_end = _cur_chunk_idx + _chunk_size;
2536 }
2537 void add_region(HeapRegion* r) {
2538 if (_cur_chunk_idx == _cur_chunk_end) {
2539 get_new_chunk();
2540 }
2541 assert(_cur_chunk_idx < _cur_chunk_end, "postcondition");
2542 _hrSorted->setMarkedHeapRegion(_cur_chunk_idx, r);
2543 _marked_regions_added++;
2544 _cur_chunk_idx++;
2545 }
2547 public:
2548 ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted,
2549 jint chunk_size,
2550 int worker) :
2551 _hrSorted(hrSorted), _chunk_size(chunk_size), _worker(worker),
2552 _marked_regions_added(0), _cur_chunk_idx(0), _cur_chunk_end(0),
2553 _invokes(0)
2554 {}
2556 bool doHeapRegion(HeapRegion* r) {
2557 // We only include humongous regions in collection
2558 // sets when concurrent mark shows that their contained object is
2559 // unreachable.
2560 _invokes++;
2562 // Do we have any marking information for this region?
2563 if (r->is_marked()) {
2564 // We don't include humongous regions in collection
2565 // sets because we collect them immediately at the end of a marking
2566 // cycle.
2567 // We also do not include young regions in collection sets
2568 if (!r->isHumongous() && !r->is_young()) {
2569 add_region(r);
2570 }
2571 }
2572 return false;
2573 }
2574 jint marked_regions_added() { return _marked_regions_added; }
2575 int invokes() { return _invokes; }
2576 };
2578 class ParKnownGarbageTask: public AbstractGangTask {
2579 CollectionSetChooser* _hrSorted;
2580 jint _chunk_size;
2581 G1CollectedHeap* _g1;
2582 public:
2583 ParKnownGarbageTask(CollectionSetChooser* hrSorted, jint chunk_size) :
2584 AbstractGangTask("ParKnownGarbageTask"),
2585 _hrSorted(hrSorted), _chunk_size(chunk_size),
2586 _g1(G1CollectedHeap::heap())
2587 {}
2589 void work(int i) {
2590 ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, _chunk_size, i);
2591 // Back to zero for the claim value.
2592 _g1->heap_region_par_iterate_chunked(&parKnownGarbageCl, i,
2593 HeapRegion::InitialClaimValue);
2594 jint regions_added = parKnownGarbageCl.marked_regions_added();
2595 _hrSorted->incNumMarkedHeapRegions(regions_added);
2596 if (G1PrintParCleanupStats) {
2597 gclog_or_tty->print_cr(" Thread %d called %d times, added %d regions to list.",
2598 i, parKnownGarbageCl.invokes(), regions_added);
2599 }
2600 }
2601 };
2603 void
2604 G1CollectorPolicy::record_concurrent_mark_cleanup_end() {
2605 double start_sec;
2606 if (G1PrintParCleanupStats) {
2607 start_sec = os::elapsedTime();
2608 }
2610 _collectionSetChooser->clearMarkedHeapRegions();
2611 double clear_marked_end_sec;
2612 if (G1PrintParCleanupStats) {
2613 clear_marked_end_sec = os::elapsedTime();
2614 gclog_or_tty->print_cr(" clear marked regions: %8.3f ms.",
2615 (clear_marked_end_sec - start_sec) * 1000.0);
2616 }
2618 if (G1CollectedHeap::use_parallel_gc_threads()) {
2619 const size_t OverpartitionFactor = 4;
2620 const size_t MinWorkUnit = 8;
2621 const size_t WorkUnit =
2622 MAX2(_g1->n_regions() / (ParallelGCThreads * OverpartitionFactor),
2623 MinWorkUnit);
2624 _collectionSetChooser->prepareForAddMarkedHeapRegionsPar(_g1->n_regions(),
2625 WorkUnit);
2626 ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser,
2627 (int) WorkUnit);
2628 _g1->workers()->run_task(&parKnownGarbageTask);
2630 assert(_g1->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
2631 "sanity check");
2632 } else {
2633 KnownGarbageClosure knownGarbagecl(_collectionSetChooser);
2634 _g1->heap_region_iterate(&knownGarbagecl);
2635 }
2636 double known_garbage_end_sec;
2637 if (G1PrintParCleanupStats) {
2638 known_garbage_end_sec = os::elapsedTime();
2639 gclog_or_tty->print_cr(" compute known garbage: %8.3f ms.",
2640 (known_garbage_end_sec - clear_marked_end_sec) * 1000.0);
2641 }
2643 _collectionSetChooser->sortMarkedHeapRegions();
2644 double end_sec = os::elapsedTime();
2645 if (G1PrintParCleanupStats) {
2646 gclog_or_tty->print_cr(" sorting: %8.3f ms.",
2647 (end_sec - known_garbage_end_sec) * 1000.0);
2648 }
2650 double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0;
2651 _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms);
2652 _cur_mark_stop_world_time_ms += elapsed_time_ms;
2653 _prev_collection_pause_end_ms += elapsed_time_ms;
2654 _mmu_tracker->add_pause(_mark_cleanup_start_sec, end_sec, true);
2655 }
2657 // Add the heap region at the head of the non-incremental collection set
2658 void G1CollectorPolicy::
2659 add_to_collection_set(HeapRegion* hr) {
2660 assert(_inc_cset_build_state == Active, "Precondition");
2661 assert(!hr->is_young(), "non-incremental add of young region");
2663 if (_g1->mark_in_progress())
2664 _g1->concurrent_mark()->registerCSetRegion(hr);
2666 assert(!hr->in_collection_set(), "should not already be in the CSet");
2667 hr->set_in_collection_set(true);
2668 hr->set_next_in_collection_set(_collection_set);
2669 _collection_set = hr;
2670 _collection_set_size++;
2671 _collection_set_bytes_used_before += hr->used();
2672 _g1->register_region_with_in_cset_fast_test(hr);
2673 }
2675 // Initialize the per-collection-set information
2676 void G1CollectorPolicy::start_incremental_cset_building() {
2677 assert(_inc_cset_build_state == Inactive, "Precondition");
2679 _inc_cset_head = NULL;
2680 _inc_cset_tail = NULL;
2681 _inc_cset_size = 0;
2682 _inc_cset_bytes_used_before = 0;
2684 _inc_cset_young_index = 0;
2686 _inc_cset_max_finger = 0;
2687 _inc_cset_recorded_young_bytes = 0;
2688 _inc_cset_recorded_rs_lengths = 0;
2689 _inc_cset_predicted_elapsed_time_ms = 0;
2690 _inc_cset_predicted_bytes_to_copy = 0;
2691 _inc_cset_build_state = Active;
2692 }
2694 void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length) {
2695 // This routine is used when:
2696 // * adding survivor regions to the incremental cset at the end of an
2697 // evacuation pause,
2698 // * adding the current allocation region to the incremental cset
2699 // when it is retired, and
2700 // * updating existing policy information for a region in the
2701 // incremental cset via young list RSet sampling.
2702 // Therefore this routine may be called at a safepoint by the
2703 // VM thread, or in-between safepoints by mutator threads (when
2704 // retiring the current allocation region) or a concurrent
2705 // refine thread (RSet sampling).
2707 double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, true);
2708 size_t used_bytes = hr->used();
2710 _inc_cset_recorded_rs_lengths += rs_length;
2711 _inc_cset_predicted_elapsed_time_ms += region_elapsed_time_ms;
2713 _inc_cset_bytes_used_before += used_bytes;
2715 // Cache the values we have added to the aggregated informtion
2716 // in the heap region in case we have to remove this region from
2717 // the incremental collection set, or it is updated by the
2718 // rset sampling code
2719 hr->set_recorded_rs_length(rs_length);
2720 hr->set_predicted_elapsed_time_ms(region_elapsed_time_ms);
2722 #if PREDICTIONS_VERBOSE
2723 size_t bytes_to_copy = predict_bytes_to_copy(hr);
2724 _inc_cset_predicted_bytes_to_copy += bytes_to_copy;
2726 // Record the number of bytes used in this region
2727 _inc_cset_recorded_young_bytes += used_bytes;
2729 // Cache the values we have added to the aggregated informtion
2730 // in the heap region in case we have to remove this region from
2731 // the incremental collection set, or it is updated by the
2732 // rset sampling code
2733 hr->set_predicted_bytes_to_copy(bytes_to_copy);
2734 #endif // PREDICTIONS_VERBOSE
2735 }
2737 void G1CollectorPolicy::remove_from_incremental_cset_info(HeapRegion* hr) {
2738 // This routine is currently only called as part of the updating of
2739 // existing policy information for regions in the incremental cset that
2740 // is performed by the concurrent refine thread(s) as part of young list
2741 // RSet sampling. Therefore we should not be at a safepoint.
2743 assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
2744 assert(hr->is_young(), "it should be");
2746 size_t used_bytes = hr->used();
2747 size_t old_rs_length = hr->recorded_rs_length();
2748 double old_elapsed_time_ms = hr->predicted_elapsed_time_ms();
2750 // Subtract the old recorded/predicted policy information for
2751 // the given heap region from the collection set info.
2752 _inc_cset_recorded_rs_lengths -= old_rs_length;
2753 _inc_cset_predicted_elapsed_time_ms -= old_elapsed_time_ms;
2755 _inc_cset_bytes_used_before -= used_bytes;
2757 // Clear the values cached in the heap region
2758 hr->set_recorded_rs_length(0);
2759 hr->set_predicted_elapsed_time_ms(0);
2761 #if PREDICTIONS_VERBOSE
2762 size_t old_predicted_bytes_to_copy = hr->predicted_bytes_to_copy();
2763 _inc_cset_predicted_bytes_to_copy -= old_predicted_bytes_to_copy;
2765 // Subtract the number of bytes used in this region
2766 _inc_cset_recorded_young_bytes -= used_bytes;
2768 // Clear the values cached in the heap region
2769 hr->set_predicted_bytes_to_copy(0);
2770 #endif // PREDICTIONS_VERBOSE
2771 }
2773 void G1CollectorPolicy::update_incremental_cset_info(HeapRegion* hr, size_t new_rs_length) {
2774 // Update the collection set information that is dependent on the new RS length
2775 assert(hr->is_young(), "Precondition");
2777 remove_from_incremental_cset_info(hr);
2778 add_to_incremental_cset_info(hr, new_rs_length);
2779 }
2781 void G1CollectorPolicy::add_region_to_incremental_cset_common(HeapRegion* hr) {
2782 assert( hr->is_young(), "invariant");
2783 assert( hr->young_index_in_cset() == -1, "invariant" );
2784 assert(_inc_cset_build_state == Active, "Precondition");
2786 // We need to clear and set the cached recorded/cached collection set
2787 // information in the heap region here (before the region gets added
2788 // to the collection set). An individual heap region's cached values
2789 // are calculated, aggregated with the policy collection set info,
2790 // and cached in the heap region here (initially) and (subsequently)
2791 // by the Young List sampling code.
2793 size_t rs_length = hr->rem_set()->occupied();
2794 add_to_incremental_cset_info(hr, rs_length);
2796 HeapWord* hr_end = hr->end();
2797 _inc_cset_max_finger = MAX2(_inc_cset_max_finger, hr_end);
2799 assert(!hr->in_collection_set(), "invariant");
2800 hr->set_in_collection_set(true);
2801 assert( hr->next_in_collection_set() == NULL, "invariant");
2803 _inc_cset_size++;
2804 _g1->register_region_with_in_cset_fast_test(hr);
2806 hr->set_young_index_in_cset((int) _inc_cset_young_index);
2807 ++_inc_cset_young_index;
2808 }
2810 // Add the region at the RHS of the incremental cset
2811 void G1CollectorPolicy::add_region_to_incremental_cset_rhs(HeapRegion* hr) {
2812 // We should only ever be appending survivors at the end of a pause
2813 assert( hr->is_survivor(), "Logic");
2815 // Do the 'common' stuff
2816 add_region_to_incremental_cset_common(hr);
2818 // Now add the region at the right hand side
2819 if (_inc_cset_tail == NULL) {
2820 assert(_inc_cset_head == NULL, "invariant");
2821 _inc_cset_head = hr;
2822 } else {
2823 _inc_cset_tail->set_next_in_collection_set(hr);
2824 }
2825 _inc_cset_tail = hr;
2826 }
2828 // Add the region to the LHS of the incremental cset
2829 void G1CollectorPolicy::add_region_to_incremental_cset_lhs(HeapRegion* hr) {
2830 // Survivors should be added to the RHS at the end of a pause
2831 assert(!hr->is_survivor(), "Logic");
2833 // Do the 'common' stuff
2834 add_region_to_incremental_cset_common(hr);
2836 // Add the region at the left hand side
2837 hr->set_next_in_collection_set(_inc_cset_head);
2838 if (_inc_cset_head == NULL) {
2839 assert(_inc_cset_tail == NULL, "Invariant");
2840 _inc_cset_tail = hr;
2841 }
2842 _inc_cset_head = hr;
2843 }
2845 #ifndef PRODUCT
2846 void G1CollectorPolicy::print_collection_set(HeapRegion* list_head, outputStream* st) {
2847 assert(list_head == inc_cset_head() || list_head == collection_set(), "must be");
2849 st->print_cr("\nCollection_set:");
2850 HeapRegion* csr = list_head;
2851 while (csr != NULL) {
2852 HeapRegion* next = csr->next_in_collection_set();
2853 assert(csr->in_collection_set(), "bad CS");
2854 st->print_cr(" [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, "
2855 "age: %4d, y: %d, surv: %d",
2856 csr->bottom(), csr->end(),
2857 csr->top(),
2858 csr->prev_top_at_mark_start(),
2859 csr->next_top_at_mark_start(),
2860 csr->top_at_conc_mark_count(),
2861 csr->age_in_surv_rate_group_cond(),
2862 csr->is_young(),
2863 csr->is_survivor());
2864 csr = next;
2865 }
2866 }
2867 #endif // !PRODUCT
2869 void G1CollectorPolicy::choose_collection_set(double target_pause_time_ms) {
2870 // Set this here - in case we're not doing young collections.
2871 double non_young_start_time_sec = os::elapsedTime();
2873 YoungList* young_list = _g1->young_list();
2875 start_recording_regions();
2877 guarantee(target_pause_time_ms > 0.0,
2878 err_msg("target_pause_time_ms = %1.6lf should be positive",
2879 target_pause_time_ms));
2880 guarantee(_collection_set == NULL, "Precondition");
2882 double base_time_ms = predict_base_elapsed_time_ms(_pending_cards);
2883 double predicted_pause_time_ms = base_time_ms;
2885 double time_remaining_ms = target_pause_time_ms - base_time_ms;
2887 ergo_verbose3(ErgoCSetConstruction | ErgoHigh,
2888 "start choosing CSet",
2889 ergo_format_ms("predicted base time")
2890 ergo_format_ms("remaining time")
2891 ergo_format_ms("target pause time"),
2892 base_time_ms, time_remaining_ms, target_pause_time_ms);
2894 // the 10% and 50% values are arbitrary...
2895 double threshold = 0.10 * target_pause_time_ms;
2896 if (time_remaining_ms < threshold) {
2897 double prev_time_remaining_ms = time_remaining_ms;
2898 time_remaining_ms = 0.50 * target_pause_time_ms;
2899 _within_target = false;
2900 ergo_verbose3(ErgoCSetConstruction,
2901 "adjust remaining time",
2902 ergo_format_reason("remaining time lower than threshold")
2903 ergo_format_ms("remaining time")
2904 ergo_format_ms("threshold")
2905 ergo_format_ms("adjusted remaining time"),
2906 prev_time_remaining_ms, threshold, time_remaining_ms);
2907 } else {
2908 _within_target = true;
2909 }
2911 size_t expansion_bytes = _g1->expansion_regions() * HeapRegion::GrainBytes;
2913 HeapRegion* hr;
2914 double young_start_time_sec = os::elapsedTime();
2916 _collection_set_bytes_used_before = 0;
2917 _collection_set_size = 0;
2918 _young_cset_length = 0;
2919 _last_young_gc_full = full_young_gcs() ? true : false;
2921 if (_last_young_gc_full) {
2922 ++_full_young_pause_num;
2923 } else {
2924 ++_partial_young_pause_num;
2925 }
2927 // The young list is laid with the survivor regions from the previous
2928 // pause are appended to the RHS of the young list, i.e.
2929 // [Newly Young Regions ++ Survivors from last pause].
2931 size_t survivor_region_num = young_list->survivor_length();
2932 size_t eden_region_num = young_list->length() - survivor_region_num;
2933 size_t old_region_num = 0;
2934 hr = young_list->first_survivor_region();
2935 while (hr != NULL) {
2936 assert(hr->is_survivor(), "badly formed young list");
2937 hr->set_young();
2938 hr = hr->get_next_young_region();
2939 }
2941 // Clear the fields that point to the survivor list - they are all young now.
2942 young_list->clear_survivors();
2944 if (_g1->mark_in_progress())
2945 _g1->concurrent_mark()->register_collection_set_finger(_inc_cset_max_finger);
2947 _young_cset_length = _inc_cset_young_index;
2948 _collection_set = _inc_cset_head;
2949 _collection_set_size = _inc_cset_size;
2950 _collection_set_bytes_used_before = _inc_cset_bytes_used_before;
2951 time_remaining_ms -= _inc_cset_predicted_elapsed_time_ms;
2952 predicted_pause_time_ms += _inc_cset_predicted_elapsed_time_ms;
2954 ergo_verbose3(ErgoCSetConstruction | ErgoHigh,
2955 "add young regions to CSet",
2956 ergo_format_region("eden")
2957 ergo_format_region("survivors")
2958 ergo_format_ms("predicted young region time"),
2959 eden_region_num, survivor_region_num,
2960 _inc_cset_predicted_elapsed_time_ms);
2962 // The number of recorded young regions is the incremental
2963 // collection set's current size
2964 set_recorded_young_regions(_inc_cset_size);
2965 set_recorded_rs_lengths(_inc_cset_recorded_rs_lengths);
2966 set_recorded_young_bytes(_inc_cset_recorded_young_bytes);
2967 #if PREDICTIONS_VERBOSE
2968 set_predicted_bytes_to_copy(_inc_cset_predicted_bytes_to_copy);
2969 #endif // PREDICTIONS_VERBOSE
2971 assert(_inc_cset_size == young_list->length(), "Invariant");
2973 double young_end_time_sec = os::elapsedTime();
2974 _recorded_young_cset_choice_time_ms =
2975 (young_end_time_sec - young_start_time_sec) * 1000.0;
2977 // We are doing young collections so reset this.
2978 non_young_start_time_sec = young_end_time_sec;
2980 if (!full_young_gcs()) {
2981 bool should_continue = true;
2982 NumberSeq seq;
2983 double avg_prediction = 100000000000000000.0; // something very large
2985 size_t prev_collection_set_size = _collection_set_size;
2986 double prev_predicted_pause_time_ms = predicted_pause_time_ms;
2987 do {
2988 hr = _collectionSetChooser->getNextMarkedRegion(time_remaining_ms,
2989 avg_prediction);
2990 if (hr != NULL) {
2991 double predicted_time_ms = predict_region_elapsed_time_ms(hr, false);
2992 time_remaining_ms -= predicted_time_ms;
2993 predicted_pause_time_ms += predicted_time_ms;
2994 add_to_collection_set(hr);
2995 record_non_young_cset_region(hr);
2996 seq.add(predicted_time_ms);
2997 avg_prediction = seq.avg() + seq.sd();
2998 }
3000 should_continue = true;
3001 if (hr == NULL) {
3002 // No need for an ergo verbose message here,
3003 // getNextMarkRegion() does this when it returns NULL.
3004 should_continue = false;
3005 } else {
3006 if (adaptive_young_list_length()) {
3007 if (time_remaining_ms < 0.0) {
3008 ergo_verbose1(ErgoCSetConstruction,
3009 "stop adding old regions to CSet",
3010 ergo_format_reason("remaining time is lower than 0")
3011 ergo_format_ms("remaining time"),
3012 time_remaining_ms);
3013 should_continue = false;
3014 }
3015 } else {
3016 if (_collection_set_size >= _young_list_fixed_length) {
3017 ergo_verbose2(ErgoCSetConstruction,
3018 "stop adding old regions to CSet",
3019 ergo_format_reason("CSet length reached target")
3020 ergo_format_region("CSet")
3021 ergo_format_region("young target"),
3022 _collection_set_size, _young_list_fixed_length);
3023 should_continue = false;
3024 }
3025 }
3026 }
3027 } while (should_continue);
3029 if (!adaptive_young_list_length() &&
3030 _collection_set_size < _young_list_fixed_length) {
3031 ergo_verbose2(ErgoCSetConstruction,
3032 "request partially-young GCs end",
3033 ergo_format_reason("CSet length lower than target")
3034 ergo_format_region("CSet")
3035 ergo_format_region("young target"),
3036 _collection_set_size, _young_list_fixed_length);
3037 _should_revert_to_full_young_gcs = true;
3038 }
3040 old_region_num = _collection_set_size - prev_collection_set_size;
3042 ergo_verbose2(ErgoCSetConstruction | ErgoHigh,
3043 "add old regions to CSet",
3044 ergo_format_region("old")
3045 ergo_format_ms("predicted old region time"),
3046 old_region_num,
3047 predicted_pause_time_ms - prev_predicted_pause_time_ms);
3048 }
3050 stop_incremental_cset_building();
3052 count_CS_bytes_used();
3054 end_recording_regions();
3056 ergo_verbose5(ErgoCSetConstruction,
3057 "finish choosing CSet",
3058 ergo_format_region("eden")
3059 ergo_format_region("survivors")
3060 ergo_format_region("old")
3061 ergo_format_ms("predicted pause time")
3062 ergo_format_ms("target pause time"),
3063 eden_region_num, survivor_region_num, old_region_num,
3064 predicted_pause_time_ms, target_pause_time_ms);
3066 double non_young_end_time_sec = os::elapsedTime();
3067 _recorded_non_young_cset_choice_time_ms =
3068 (non_young_end_time_sec - non_young_start_time_sec) * 1000.0;
3069 }