Wed, 11 Sep 2013 00:38:18 -0400
8024256: Minimal VM build is broken with PCH disabled
Reviewed-by: coleenp, twisti
1 /*
2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "gc_implementation/shared/adaptiveSizePolicy.hpp"
27 #include "gc_implementation/shared/gcPolicyCounters.hpp"
28 #include "gc_implementation/shared/vmGCOperations.hpp"
29 #include "memory/cardTableRS.hpp"
30 #include "memory/collectorPolicy.hpp"
31 #include "memory/gcLocker.inline.hpp"
32 #include "memory/genCollectedHeap.hpp"
33 #include "memory/generationSpec.hpp"
34 #include "memory/space.hpp"
35 #include "memory/universe.hpp"
36 #include "runtime/arguments.hpp"
37 #include "runtime/globals_extension.hpp"
38 #include "runtime/handles.inline.hpp"
39 #include "runtime/java.hpp"
40 #include "runtime/thread.inline.hpp"
41 #include "runtime/vmThread.hpp"
42 #include "utilities/macros.hpp"
43 #if INCLUDE_ALL_GCS
44 #include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp"
45 #include "gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp"
46 #endif // INCLUDE_ALL_GCS
48 // CollectorPolicy methods.
50 void CollectorPolicy::initialize_flags() {
51 assert(max_alignment() >= min_alignment(),
52 err_msg("max_alignment: " SIZE_FORMAT " less than min_alignment: " SIZE_FORMAT,
53 max_alignment(), min_alignment()));
54 assert(max_alignment() % min_alignment() == 0,
55 err_msg("max_alignment: " SIZE_FORMAT " not aligned by min_alignment: " SIZE_FORMAT,
56 max_alignment(), min_alignment()));
58 if (MaxHeapSize < InitialHeapSize) {
59 vm_exit_during_initialization("Incompatible initial and maximum heap sizes specified");
60 }
62 if (MetaspaceSize > MaxMetaspaceSize) {
63 MaxMetaspaceSize = MetaspaceSize;
64 }
65 MetaspaceSize = MAX2(min_alignment(), align_size_down_(MetaspaceSize, min_alignment()));
66 // Don't increase Metaspace size limit above specified.
67 MaxMetaspaceSize = align_size_down(MaxMetaspaceSize, max_alignment());
68 if (MetaspaceSize > MaxMetaspaceSize) {
69 MetaspaceSize = MaxMetaspaceSize;
70 }
72 MinMetaspaceExpansion = MAX2(min_alignment(), align_size_down_(MinMetaspaceExpansion, min_alignment()));
73 MaxMetaspaceExpansion = MAX2(min_alignment(), align_size_down_(MaxMetaspaceExpansion, min_alignment()));
75 MinHeapDeltaBytes = align_size_up(MinHeapDeltaBytes, min_alignment());
77 assert(MetaspaceSize % min_alignment() == 0, "metapace alignment");
78 assert(MaxMetaspaceSize % max_alignment() == 0, "maximum metaspace alignment");
79 if (MetaspaceSize < 256*K) {
80 vm_exit_during_initialization("Too small initial Metaspace size");
81 }
82 }
84 void CollectorPolicy::initialize_size_info() {
85 // User inputs from -mx and ms must be aligned
86 set_min_heap_byte_size(align_size_up(Arguments::min_heap_size(), min_alignment()));
87 set_initial_heap_byte_size(align_size_up(InitialHeapSize, min_alignment()));
88 set_max_heap_byte_size(align_size_up(MaxHeapSize, max_alignment()));
90 // Check heap parameter properties
91 if (initial_heap_byte_size() < M) {
92 vm_exit_during_initialization("Too small initial heap");
93 }
94 // Check heap parameter properties
95 if (min_heap_byte_size() < M) {
96 vm_exit_during_initialization("Too small minimum heap");
97 }
98 if (initial_heap_byte_size() <= NewSize) {
99 // make sure there is at least some room in old space
100 vm_exit_during_initialization("Too small initial heap for new size specified");
101 }
102 if (max_heap_byte_size() < min_heap_byte_size()) {
103 vm_exit_during_initialization("Incompatible minimum and maximum heap sizes specified");
104 }
105 if (initial_heap_byte_size() < min_heap_byte_size()) {
106 vm_exit_during_initialization("Incompatible minimum and initial heap sizes specified");
107 }
108 if (max_heap_byte_size() < initial_heap_byte_size()) {
109 vm_exit_during_initialization("Incompatible initial and maximum heap sizes specified");
110 }
112 if (PrintGCDetails && Verbose) {
113 gclog_or_tty->print_cr("Minimum heap " SIZE_FORMAT " Initial heap "
114 SIZE_FORMAT " Maximum heap " SIZE_FORMAT,
115 min_heap_byte_size(), initial_heap_byte_size(), max_heap_byte_size());
116 }
117 }
119 bool CollectorPolicy::use_should_clear_all_soft_refs(bool v) {
120 bool result = _should_clear_all_soft_refs;
121 set_should_clear_all_soft_refs(false);
122 return result;
123 }
125 GenRemSet* CollectorPolicy::create_rem_set(MemRegion whole_heap,
126 int max_covered_regions) {
127 switch (rem_set_name()) {
128 case GenRemSet::CardTable: {
129 CardTableRS* res = new CardTableRS(whole_heap, max_covered_regions);
130 return res;
131 }
132 default:
133 guarantee(false, "unrecognized GenRemSet::Name");
134 return NULL;
135 }
136 }
138 void CollectorPolicy::cleared_all_soft_refs() {
139 // If near gc overhear limit, continue to clear SoftRefs. SoftRefs may
140 // have been cleared in the last collection but if the gc overhear
141 // limit continues to be near, SoftRefs should still be cleared.
142 if (size_policy() != NULL) {
143 _should_clear_all_soft_refs = size_policy()->gc_overhead_limit_near();
144 }
145 _all_soft_refs_clear = true;
146 }
149 // GenCollectorPolicy methods.
151 size_t GenCollectorPolicy::scale_by_NewRatio_aligned(size_t base_size) {
152 size_t x = base_size / (NewRatio+1);
153 size_t new_gen_size = x > min_alignment() ?
154 align_size_down(x, min_alignment()) :
155 min_alignment();
156 return new_gen_size;
157 }
159 size_t GenCollectorPolicy::bound_minus_alignment(size_t desired_size,
160 size_t maximum_size) {
161 size_t alignment = min_alignment();
162 size_t max_minus = maximum_size - alignment;
163 return desired_size < max_minus ? desired_size : max_minus;
164 }
167 void GenCollectorPolicy::initialize_size_policy(size_t init_eden_size,
168 size_t init_promo_size,
169 size_t init_survivor_size) {
170 const double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0;
171 _size_policy = new AdaptiveSizePolicy(init_eden_size,
172 init_promo_size,
173 init_survivor_size,
174 max_gc_pause_sec,
175 GCTimeRatio);
176 }
178 size_t GenCollectorPolicy::compute_max_alignment() {
179 // The card marking array and the offset arrays for old generations are
180 // committed in os pages as well. Make sure they are entirely full (to
181 // avoid partial page problems), e.g. if 512 bytes heap corresponds to 1
182 // byte entry and the os page size is 4096, the maximum heap size should
183 // be 512*4096 = 2MB aligned.
184 size_t alignment = GenRemSet::max_alignment_constraint(rem_set_name());
186 // Parallel GC does its own alignment of the generations to avoid requiring a
187 // large page (256M on some platforms) for the permanent generation. The
188 // other collectors should also be updated to do their own alignment and then
189 // this use of lcm() should be removed.
190 if (UseLargePages && !UseParallelGC) {
191 // in presence of large pages we have to make sure that our
192 // alignment is large page aware
193 alignment = lcm(os::large_page_size(), alignment);
194 }
196 assert(alignment >= min_alignment(), "Must be");
198 return alignment;
199 }
201 void GenCollectorPolicy::initialize_flags() {
202 // All sizes must be multiples of the generation granularity.
203 set_min_alignment((uintx) Generation::GenGrain);
204 set_max_alignment(compute_max_alignment());
206 CollectorPolicy::initialize_flags();
208 // All generational heaps have a youngest gen; handle those flags here.
210 // Adjust max size parameters
211 if (NewSize > MaxNewSize) {
212 MaxNewSize = NewSize;
213 }
214 NewSize = align_size_down(NewSize, min_alignment());
215 MaxNewSize = align_size_down(MaxNewSize, min_alignment());
217 // Check validity of heap flags
218 assert(NewSize % min_alignment() == 0, "eden space alignment");
219 assert(MaxNewSize % min_alignment() == 0, "survivor space alignment");
221 if (NewSize < 3*min_alignment()) {
222 // make sure there room for eden and two survivor spaces
223 vm_exit_during_initialization("Too small new size specified");
224 }
225 if (SurvivorRatio < 1 || NewRatio < 1) {
226 vm_exit_during_initialization("Invalid heap ratio specified");
227 }
228 }
230 void TwoGenerationCollectorPolicy::initialize_flags() {
231 GenCollectorPolicy::initialize_flags();
233 OldSize = align_size_down(OldSize, min_alignment());
235 if (FLAG_IS_CMDLINE(OldSize) && FLAG_IS_DEFAULT(NewSize)) {
236 // NewRatio will be used later to set the young generation size so we use
237 // it to calculate how big the heap should be based on the requested OldSize
238 // and NewRatio.
239 assert(NewRatio > 0, "NewRatio should have been set up earlier");
240 size_t calculated_heapsize = (OldSize / NewRatio) * (NewRatio + 1);
242 calculated_heapsize = align_size_up(calculated_heapsize, max_alignment());
243 MaxHeapSize = calculated_heapsize;
244 InitialHeapSize = calculated_heapsize;
245 }
246 MaxHeapSize = align_size_up(MaxHeapSize, max_alignment());
248 // adjust max heap size if necessary
249 if (NewSize + OldSize > MaxHeapSize) {
250 if (FLAG_IS_CMDLINE(MaxHeapSize)) {
251 // somebody set a maximum heap size with the intention that we should not
252 // exceed it. Adjust New/OldSize as necessary.
253 uintx calculated_size = NewSize + OldSize;
254 double shrink_factor = (double) MaxHeapSize / calculated_size;
255 // align
256 NewSize = align_size_down((uintx) (NewSize * shrink_factor), min_alignment());
257 // OldSize is already aligned because above we aligned MaxHeapSize to
258 // max_alignment(), and we just made sure that NewSize is aligned to
259 // min_alignment(). In initialize_flags() we verified that max_alignment()
260 // is a multiple of min_alignment().
261 OldSize = MaxHeapSize - NewSize;
262 } else {
263 MaxHeapSize = NewSize + OldSize;
264 }
265 }
266 // need to do this again
267 MaxHeapSize = align_size_up(MaxHeapSize, max_alignment());
269 // adjust max heap size if necessary
270 if (NewSize + OldSize > MaxHeapSize) {
271 if (FLAG_IS_CMDLINE(MaxHeapSize)) {
272 // somebody set a maximum heap size with the intention that we should not
273 // exceed it. Adjust New/OldSize as necessary.
274 uintx calculated_size = NewSize + OldSize;
275 double shrink_factor = (double) MaxHeapSize / calculated_size;
276 // align
277 NewSize = align_size_down((uintx) (NewSize * shrink_factor), min_alignment());
278 // OldSize is already aligned because above we aligned MaxHeapSize to
279 // max_alignment(), and we just made sure that NewSize is aligned to
280 // min_alignment(). In initialize_flags() we verified that max_alignment()
281 // is a multiple of min_alignment().
282 OldSize = MaxHeapSize - NewSize;
283 } else {
284 MaxHeapSize = NewSize + OldSize;
285 }
286 }
287 // need to do this again
288 MaxHeapSize = align_size_up(MaxHeapSize, max_alignment());
290 always_do_update_barrier = UseConcMarkSweepGC;
292 // Check validity of heap flags
293 assert(OldSize % min_alignment() == 0, "old space alignment");
294 assert(MaxHeapSize % max_alignment() == 0, "maximum heap alignment");
295 }
297 // Values set on the command line win over any ergonomically
298 // set command line parameters.
299 // Ergonomic choice of parameters are done before this
300 // method is called. Values for command line parameters such as NewSize
301 // and MaxNewSize feed those ergonomic choices into this method.
302 // This method makes the final generation sizings consistent with
303 // themselves and with overall heap sizings.
304 // In the absence of explicitly set command line flags, policies
305 // such as the use of NewRatio are used to size the generation.
306 void GenCollectorPolicy::initialize_size_info() {
307 CollectorPolicy::initialize_size_info();
309 // min_alignment() is used for alignment within a generation.
310 // There is additional alignment done down stream for some
311 // collectors that sometimes causes unwanted rounding up of
312 // generations sizes.
314 // Determine maximum size of gen0
316 size_t max_new_size = 0;
317 if (FLAG_IS_CMDLINE(MaxNewSize) || FLAG_IS_ERGO(MaxNewSize)) {
318 if (MaxNewSize < min_alignment()) {
319 max_new_size = min_alignment();
320 }
321 if (MaxNewSize >= max_heap_byte_size()) {
322 max_new_size = align_size_down(max_heap_byte_size() - min_alignment(),
323 min_alignment());
324 warning("MaxNewSize (" SIZE_FORMAT "k) is equal to or "
325 "greater than the entire heap (" SIZE_FORMAT "k). A "
326 "new generation size of " SIZE_FORMAT "k will be used.",
327 MaxNewSize/K, max_heap_byte_size()/K, max_new_size/K);
328 } else {
329 max_new_size = align_size_down(MaxNewSize, min_alignment());
330 }
332 // The case for FLAG_IS_ERGO(MaxNewSize) could be treated
333 // specially at this point to just use an ergonomically set
334 // MaxNewSize to set max_new_size. For cases with small
335 // heaps such a policy often did not work because the MaxNewSize
336 // was larger than the entire heap. The interpretation given
337 // to ergonomically set flags is that the flags are set
338 // by different collectors for their own special needs but
339 // are not allowed to badly shape the heap. This allows the
340 // different collectors to decide what's best for themselves
341 // without having to factor in the overall heap shape. It
342 // can be the case in the future that the collectors would
343 // only make "wise" ergonomics choices and this policy could
344 // just accept those choices. The choices currently made are
345 // not always "wise".
346 } else {
347 max_new_size = scale_by_NewRatio_aligned(max_heap_byte_size());
348 // Bound the maximum size by NewSize below (since it historically
349 // would have been NewSize and because the NewRatio calculation could
350 // yield a size that is too small) and bound it by MaxNewSize above.
351 // Ergonomics plays here by previously calculating the desired
352 // NewSize and MaxNewSize.
353 max_new_size = MIN2(MAX2(max_new_size, NewSize), MaxNewSize);
354 }
355 assert(max_new_size > 0, "All paths should set max_new_size");
357 // Given the maximum gen0 size, determine the initial and
358 // minimum gen0 sizes.
360 if (max_heap_byte_size() == min_heap_byte_size()) {
361 // The maximum and minimum heap sizes are the same so
362 // the generations minimum and initial must be the
363 // same as its maximum.
364 set_min_gen0_size(max_new_size);
365 set_initial_gen0_size(max_new_size);
366 set_max_gen0_size(max_new_size);
367 } else {
368 size_t desired_new_size = 0;
369 if (!FLAG_IS_DEFAULT(NewSize)) {
370 // If NewSize is set ergonomically (for example by cms), it
371 // would make sense to use it. If it is used, also use it
372 // to set the initial size. Although there is no reason
373 // the minimum size and the initial size have to be the same,
374 // the current implementation gets into trouble during the calculation
375 // of the tenured generation sizes if they are different.
376 // Note that this makes the initial size and the minimum size
377 // generally small compared to the NewRatio calculation.
378 _min_gen0_size = NewSize;
379 desired_new_size = NewSize;
380 max_new_size = MAX2(max_new_size, NewSize);
381 } else {
382 // For the case where NewSize is the default, use NewRatio
383 // to size the minimum and initial generation sizes.
384 // Use the default NewSize as the floor for these values. If
385 // NewRatio is overly large, the resulting sizes can be too
386 // small.
387 _min_gen0_size = MAX2(scale_by_NewRatio_aligned(min_heap_byte_size()),
388 NewSize);
389 desired_new_size =
390 MAX2(scale_by_NewRatio_aligned(initial_heap_byte_size()),
391 NewSize);
392 }
394 assert(_min_gen0_size > 0, "Sanity check");
395 set_initial_gen0_size(desired_new_size);
396 set_max_gen0_size(max_new_size);
398 // At this point the desirable initial and minimum sizes have been
399 // determined without regard to the maximum sizes.
401 // Bound the sizes by the corresponding overall heap sizes.
402 set_min_gen0_size(
403 bound_minus_alignment(_min_gen0_size, min_heap_byte_size()));
404 set_initial_gen0_size(
405 bound_minus_alignment(_initial_gen0_size, initial_heap_byte_size()));
406 set_max_gen0_size(
407 bound_minus_alignment(_max_gen0_size, max_heap_byte_size()));
409 // At this point all three sizes have been checked against the
410 // maximum sizes but have not been checked for consistency
411 // among the three.
413 // Final check min <= initial <= max
414 set_min_gen0_size(MIN2(_min_gen0_size, _max_gen0_size));
415 set_initial_gen0_size(
416 MAX2(MIN2(_initial_gen0_size, _max_gen0_size), _min_gen0_size));
417 set_min_gen0_size(MIN2(_min_gen0_size, _initial_gen0_size));
418 }
420 if (PrintGCDetails && Verbose) {
421 gclog_or_tty->print_cr("1: Minimum gen0 " SIZE_FORMAT " Initial gen0 "
422 SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT,
423 min_gen0_size(), initial_gen0_size(), max_gen0_size());
424 }
425 }
427 // Call this method during the sizing of the gen1 to make
428 // adjustments to gen0 because of gen1 sizing policy. gen0 initially has
429 // the most freedom in sizing because it is done before the
430 // policy for gen1 is applied. Once gen1 policies have been applied,
431 // there may be conflicts in the shape of the heap and this method
432 // is used to make the needed adjustments. The application of the
433 // policies could be more sophisticated (iterative for example) but
434 // keeping it simple also seems a worthwhile goal.
435 bool TwoGenerationCollectorPolicy::adjust_gen0_sizes(size_t* gen0_size_ptr,
436 size_t* gen1_size_ptr,
437 const size_t heap_size,
438 const size_t min_gen1_size) {
439 bool result = false;
441 if ((*gen1_size_ptr + *gen0_size_ptr) > heap_size) {
442 if ((heap_size < (*gen0_size_ptr + min_gen1_size)) &&
443 (heap_size >= min_gen1_size + min_alignment())) {
444 // Adjust gen0 down to accommodate min_gen1_size
445 *gen0_size_ptr = heap_size - min_gen1_size;
446 *gen0_size_ptr =
447 MAX2((uintx)align_size_down(*gen0_size_ptr, min_alignment()),
448 min_alignment());
449 assert(*gen0_size_ptr > 0, "Min gen0 is too large");
450 result = true;
451 } else {
452 *gen1_size_ptr = heap_size - *gen0_size_ptr;
453 *gen1_size_ptr =
454 MAX2((uintx)align_size_down(*gen1_size_ptr, min_alignment()),
455 min_alignment());
456 }
457 }
458 return result;
459 }
461 // Minimum sizes of the generations may be different than
462 // the initial sizes. An inconsistently is permitted here
463 // in the total size that can be specified explicitly by
464 // command line specification of OldSize and NewSize and
465 // also a command line specification of -Xms. Issue a warning
466 // but allow the values to pass.
468 void TwoGenerationCollectorPolicy::initialize_size_info() {
469 GenCollectorPolicy::initialize_size_info();
471 // At this point the minimum, initial and maximum sizes
472 // of the overall heap and of gen0 have been determined.
473 // The maximum gen1 size can be determined from the maximum gen0
474 // and maximum heap size since no explicit flags exits
475 // for setting the gen1 maximum.
476 _max_gen1_size = max_heap_byte_size() - _max_gen0_size;
477 _max_gen1_size =
478 MAX2((uintx)align_size_down(_max_gen1_size, min_alignment()),
479 min_alignment());
480 // If no explicit command line flag has been set for the
481 // gen1 size, use what is left for gen1.
482 if (FLAG_IS_DEFAULT(OldSize) || FLAG_IS_ERGO(OldSize)) {
483 // The user has not specified any value or ergonomics
484 // has chosen a value (which may or may not be consistent
485 // with the overall heap size). In either case make
486 // the minimum, maximum and initial sizes consistent
487 // with the gen0 sizes and the overall heap sizes.
488 assert(min_heap_byte_size() > _min_gen0_size,
489 "gen0 has an unexpected minimum size");
490 set_min_gen1_size(min_heap_byte_size() - min_gen0_size());
491 set_min_gen1_size(
492 MAX2((uintx)align_size_down(_min_gen1_size, min_alignment()),
493 min_alignment()));
494 set_initial_gen1_size(initial_heap_byte_size() - initial_gen0_size());
495 set_initial_gen1_size(
496 MAX2((uintx)align_size_down(_initial_gen1_size, min_alignment()),
497 min_alignment()));
499 } else {
500 // It's been explicitly set on the command line. Use the
501 // OldSize and then determine the consequences.
502 set_min_gen1_size(OldSize);
503 set_initial_gen1_size(OldSize);
505 // If the user has explicitly set an OldSize that is inconsistent
506 // with other command line flags, issue a warning.
507 // The generation minimums and the overall heap mimimum should
508 // be within one heap alignment.
509 if ((_min_gen1_size + _min_gen0_size + min_alignment()) <
510 min_heap_byte_size()) {
511 warning("Inconsistency between minimum heap size and minimum "
512 "generation sizes: using minimum heap = " SIZE_FORMAT,
513 min_heap_byte_size());
514 }
515 if ((OldSize > _max_gen1_size)) {
516 warning("Inconsistency between maximum heap size and maximum "
517 "generation sizes: using maximum heap = " SIZE_FORMAT
518 " -XX:OldSize flag is being ignored",
519 max_heap_byte_size());
520 }
521 // If there is an inconsistency between the OldSize and the minimum and/or
522 // initial size of gen0, since OldSize was explicitly set, OldSize wins.
523 if (adjust_gen0_sizes(&_min_gen0_size, &_min_gen1_size,
524 min_heap_byte_size(), OldSize)) {
525 if (PrintGCDetails && Verbose) {
526 gclog_or_tty->print_cr("2: Minimum gen0 " SIZE_FORMAT " Initial gen0 "
527 SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT,
528 min_gen0_size(), initial_gen0_size(), max_gen0_size());
529 }
530 }
531 // Initial size
532 if (adjust_gen0_sizes(&_initial_gen0_size, &_initial_gen1_size,
533 initial_heap_byte_size(), OldSize)) {
534 if (PrintGCDetails && Verbose) {
535 gclog_or_tty->print_cr("3: Minimum gen0 " SIZE_FORMAT " Initial gen0 "
536 SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT,
537 min_gen0_size(), initial_gen0_size(), max_gen0_size());
538 }
539 }
540 }
541 // Enforce the maximum gen1 size.
542 set_min_gen1_size(MIN2(_min_gen1_size, _max_gen1_size));
544 // Check that min gen1 <= initial gen1 <= max gen1
545 set_initial_gen1_size(MAX2(_initial_gen1_size, _min_gen1_size));
546 set_initial_gen1_size(MIN2(_initial_gen1_size, _max_gen1_size));
548 if (PrintGCDetails && Verbose) {
549 gclog_or_tty->print_cr("Minimum gen1 " SIZE_FORMAT " Initial gen1 "
550 SIZE_FORMAT " Maximum gen1 " SIZE_FORMAT,
551 min_gen1_size(), initial_gen1_size(), max_gen1_size());
552 }
553 }
555 HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size,
556 bool is_tlab,
557 bool* gc_overhead_limit_was_exceeded) {
558 GenCollectedHeap *gch = GenCollectedHeap::heap();
560 debug_only(gch->check_for_valid_allocation_state());
561 assert(gch->no_gc_in_progress(), "Allocation during gc not allowed");
563 // In general gc_overhead_limit_was_exceeded should be false so
564 // set it so here and reset it to true only if the gc time
565 // limit is being exceeded as checked below.
566 *gc_overhead_limit_was_exceeded = false;
568 HeapWord* result = NULL;
570 // Loop until the allocation is satisified,
571 // or unsatisfied after GC.
572 for (int try_count = 1, gclocker_stalled_count = 0; /* return or throw */; try_count += 1) {
573 HandleMark hm; // discard any handles allocated in each iteration
575 // First allocation attempt is lock-free.
576 Generation *gen0 = gch->get_gen(0);
577 assert(gen0->supports_inline_contig_alloc(),
578 "Otherwise, must do alloc within heap lock");
579 if (gen0->should_allocate(size, is_tlab)) {
580 result = gen0->par_allocate(size, is_tlab);
581 if (result != NULL) {
582 assert(gch->is_in_reserved(result), "result not in heap");
583 return result;
584 }
585 }
586 unsigned int gc_count_before; // read inside the Heap_lock locked region
587 {
588 MutexLocker ml(Heap_lock);
589 if (PrintGC && Verbose) {
590 gclog_or_tty->print_cr("TwoGenerationCollectorPolicy::mem_allocate_work:"
591 " attempting locked slow path allocation");
592 }
593 // Note that only large objects get a shot at being
594 // allocated in later generations.
595 bool first_only = ! should_try_older_generation_allocation(size);
597 result = gch->attempt_allocation(size, is_tlab, first_only);
598 if (result != NULL) {
599 assert(gch->is_in_reserved(result), "result not in heap");
600 return result;
601 }
603 if (GC_locker::is_active_and_needs_gc()) {
604 if (is_tlab) {
605 return NULL; // Caller will retry allocating individual object
606 }
607 if (!gch->is_maximal_no_gc()) {
608 // Try and expand heap to satisfy request
609 result = expand_heap_and_allocate(size, is_tlab);
610 // result could be null if we are out of space
611 if (result != NULL) {
612 return result;
613 }
614 }
616 if (gclocker_stalled_count > GCLockerRetryAllocationCount) {
617 return NULL; // we didn't get to do a GC and we didn't get any memory
618 }
620 // If this thread is not in a jni critical section, we stall
621 // the requestor until the critical section has cleared and
622 // GC allowed. When the critical section clears, a GC is
623 // initiated by the last thread exiting the critical section; so
624 // we retry the allocation sequence from the beginning of the loop,
625 // rather than causing more, now probably unnecessary, GC attempts.
626 JavaThread* jthr = JavaThread::current();
627 if (!jthr->in_critical()) {
628 MutexUnlocker mul(Heap_lock);
629 // Wait for JNI critical section to be exited
630 GC_locker::stall_until_clear();
631 gclocker_stalled_count += 1;
632 continue;
633 } else {
634 if (CheckJNICalls) {
635 fatal("Possible deadlock due to allocating while"
636 " in jni critical section");
637 }
638 return NULL;
639 }
640 }
642 // Read the gc count while the heap lock is held.
643 gc_count_before = Universe::heap()->total_collections();
644 }
646 VM_GenCollectForAllocation op(size,
647 is_tlab,
648 gc_count_before);
649 VMThread::execute(&op);
650 if (op.prologue_succeeded()) {
651 result = op.result();
652 if (op.gc_locked()) {
653 assert(result == NULL, "must be NULL if gc_locked() is true");
654 continue; // retry and/or stall as necessary
655 }
657 // Allocation has failed and a collection
658 // has been done. If the gc time limit was exceeded the
659 // this time, return NULL so that an out-of-memory
660 // will be thrown. Clear gc_overhead_limit_exceeded
661 // so that the overhead exceeded does not persist.
663 const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
664 const bool softrefs_clear = all_soft_refs_clear();
666 if (limit_exceeded && softrefs_clear) {
667 *gc_overhead_limit_was_exceeded = true;
668 size_policy()->set_gc_overhead_limit_exceeded(false);
669 if (op.result() != NULL) {
670 CollectedHeap::fill_with_object(op.result(), size);
671 }
672 return NULL;
673 }
674 assert(result == NULL || gch->is_in_reserved(result),
675 "result not in heap");
676 return result;
677 }
679 // Give a warning if we seem to be looping forever.
680 if ((QueuedAllocationWarningCount > 0) &&
681 (try_count % QueuedAllocationWarningCount == 0)) {
682 warning("TwoGenerationCollectorPolicy::mem_allocate_work retries %d times \n\t"
683 " size=%d %s", try_count, size, is_tlab ? "(TLAB)" : "");
684 }
685 }
686 }
688 HeapWord* GenCollectorPolicy::expand_heap_and_allocate(size_t size,
689 bool is_tlab) {
690 GenCollectedHeap *gch = GenCollectedHeap::heap();
691 HeapWord* result = NULL;
692 for (int i = number_of_generations() - 1; i >= 0 && result == NULL; i--) {
693 Generation *gen = gch->get_gen(i);
694 if (gen->should_allocate(size, is_tlab)) {
695 result = gen->expand_and_allocate(size, is_tlab);
696 }
697 }
698 assert(result == NULL || gch->is_in_reserved(result), "result not in heap");
699 return result;
700 }
702 HeapWord* GenCollectorPolicy::satisfy_failed_allocation(size_t size,
703 bool is_tlab) {
704 GenCollectedHeap *gch = GenCollectedHeap::heap();
705 GCCauseSetter x(gch, GCCause::_allocation_failure);
706 HeapWord* result = NULL;
708 assert(size != 0, "Precondition violated");
709 if (GC_locker::is_active_and_needs_gc()) {
710 // GC locker is active; instead of a collection we will attempt
711 // to expand the heap, if there's room for expansion.
712 if (!gch->is_maximal_no_gc()) {
713 result = expand_heap_and_allocate(size, is_tlab);
714 }
715 return result; // could be null if we are out of space
716 } else if (!gch->incremental_collection_will_fail(false /* don't consult_young */)) {
717 // Do an incremental collection.
718 gch->do_collection(false /* full */,
719 false /* clear_all_soft_refs */,
720 size /* size */,
721 is_tlab /* is_tlab */,
722 number_of_generations() - 1 /* max_level */);
723 } else {
724 if (Verbose && PrintGCDetails) {
725 gclog_or_tty->print(" :: Trying full because partial may fail :: ");
726 }
727 // Try a full collection; see delta for bug id 6266275
728 // for the original code and why this has been simplified
729 // with from-space allocation criteria modified and
730 // such allocation moved out of the safepoint path.
731 gch->do_collection(true /* full */,
732 false /* clear_all_soft_refs */,
733 size /* size */,
734 is_tlab /* is_tlab */,
735 number_of_generations() - 1 /* max_level */);
736 }
738 result = gch->attempt_allocation(size, is_tlab, false /*first_only*/);
740 if (result != NULL) {
741 assert(gch->is_in_reserved(result), "result not in heap");
742 return result;
743 }
745 // OK, collection failed, try expansion.
746 result = expand_heap_and_allocate(size, is_tlab);
747 if (result != NULL) {
748 return result;
749 }
751 // If we reach this point, we're really out of memory. Try every trick
752 // we can to reclaim memory. Force collection of soft references. Force
753 // a complete compaction of the heap. Any additional methods for finding
754 // free memory should be here, especially if they are expensive. If this
755 // attempt fails, an OOM exception will be thrown.
756 {
757 UIntFlagSetting flag_change(MarkSweepAlwaysCompactCount, 1); // Make sure the heap is fully compacted
759 gch->do_collection(true /* full */,
760 true /* clear_all_soft_refs */,
761 size /* size */,
762 is_tlab /* is_tlab */,
763 number_of_generations() - 1 /* max_level */);
764 }
766 result = gch->attempt_allocation(size, is_tlab, false /* first_only */);
767 if (result != NULL) {
768 assert(gch->is_in_reserved(result), "result not in heap");
769 return result;
770 }
772 assert(!should_clear_all_soft_refs(),
773 "Flag should have been handled and cleared prior to this point");
775 // What else? We might try synchronous finalization later. If the total
776 // space available is large enough for the allocation, then a more
777 // complete compaction phase than we've tried so far might be
778 // appropriate.
779 return NULL;
780 }
782 MetaWord* CollectorPolicy::satisfy_failed_metadata_allocation(
783 ClassLoaderData* loader_data,
784 size_t word_size,
785 Metaspace::MetadataType mdtype) {
786 uint loop_count = 0;
787 uint gc_count = 0;
788 uint full_gc_count = 0;
790 assert(!Heap_lock->owned_by_self(), "Should not be holding the Heap_lock");
792 do {
793 MetaWord* result = NULL;
794 if (GC_locker::is_active_and_needs_gc()) {
795 // If the GC_locker is active, just expand and allocate.
796 // If that does not succeed, wait if this thread is not
797 // in a critical section itself.
798 result =
799 loader_data->metaspace_non_null()->expand_and_allocate(word_size,
800 mdtype);
801 if (result != NULL) {
802 return result;
803 }
804 JavaThread* jthr = JavaThread::current();
805 if (!jthr->in_critical()) {
806 // Wait for JNI critical section to be exited
807 GC_locker::stall_until_clear();
808 // The GC invoked by the last thread leaving the critical
809 // section will be a young collection and a full collection
810 // is (currently) needed for unloading classes so continue
811 // to the next iteration to get a full GC.
812 continue;
813 } else {
814 if (CheckJNICalls) {
815 fatal("Possible deadlock due to allocating while"
816 " in jni critical section");
817 }
818 return NULL;
819 }
820 }
822 { // Need lock to get self consistent gc_count's
823 MutexLocker ml(Heap_lock);
824 gc_count = Universe::heap()->total_collections();
825 full_gc_count = Universe::heap()->total_full_collections();
826 }
828 // Generate a VM operation
829 VM_CollectForMetadataAllocation op(loader_data,
830 word_size,
831 mdtype,
832 gc_count,
833 full_gc_count,
834 GCCause::_metadata_GC_threshold);
835 VMThread::execute(&op);
837 // If GC was locked out, try again. Check
838 // before checking success because the prologue
839 // could have succeeded and the GC still have
840 // been locked out.
841 if (op.gc_locked()) {
842 continue;
843 }
845 if (op.prologue_succeeded()) {
846 return op.result();
847 }
848 loop_count++;
849 if ((QueuedAllocationWarningCount > 0) &&
850 (loop_count % QueuedAllocationWarningCount == 0)) {
851 warning("satisfy_failed_metadata_allocation() retries %d times \n\t"
852 " size=%d", loop_count, word_size);
853 }
854 } while (true); // Until a GC is done
855 }
857 // Return true if any of the following is true:
858 // . the allocation won't fit into the current young gen heap
859 // . gc locker is occupied (jni critical section)
860 // . heap memory is tight -- the most recent previous collection
861 // was a full collection because a partial collection (would
862 // have) failed and is likely to fail again
863 bool GenCollectorPolicy::should_try_older_generation_allocation(
864 size_t word_size) const {
865 GenCollectedHeap* gch = GenCollectedHeap::heap();
866 size_t gen0_capacity = gch->get_gen(0)->capacity_before_gc();
867 return (word_size > heap_word_size(gen0_capacity))
868 || GC_locker::is_active_and_needs_gc()
869 || gch->incremental_collection_failed();
870 }
873 //
874 // MarkSweepPolicy methods
875 //
877 MarkSweepPolicy::MarkSweepPolicy() {
878 initialize_all();
879 }
881 void MarkSweepPolicy::initialize_generations() {
882 _generations = NEW_C_HEAP_ARRAY3(GenerationSpecPtr, number_of_generations(), mtGC, 0, AllocFailStrategy::RETURN_NULL);
883 if (_generations == NULL)
884 vm_exit_during_initialization("Unable to allocate gen spec");
886 if (UseParNewGC) {
887 _generations[0] = new GenerationSpec(Generation::ParNew, _initial_gen0_size, _max_gen0_size);
888 } else {
889 _generations[0] = new GenerationSpec(Generation::DefNew, _initial_gen0_size, _max_gen0_size);
890 }
891 _generations[1] = new GenerationSpec(Generation::MarkSweepCompact, _initial_gen1_size, _max_gen1_size);
893 if (_generations[0] == NULL || _generations[1] == NULL)
894 vm_exit_during_initialization("Unable to allocate gen spec");
895 }
897 void MarkSweepPolicy::initialize_gc_policy_counters() {
898 // initialize the policy counters - 2 collectors, 3 generations
899 if (UseParNewGC) {
900 _gc_policy_counters = new GCPolicyCounters("ParNew:MSC", 2, 3);
901 } else {
902 _gc_policy_counters = new GCPolicyCounters("Copy:MSC", 2, 3);
903 }
904 }