Wed, 23 Jan 2013 13:02:39 -0500
8005915: Unify SERIALGC and INCLUDE_ALTERNATE_GCS
Summary: Rename INCLUDE_ALTERNATE_GCS to INCLUDE_ALL_GCS and replace SERIALGC with INCLUDE_ALL_GCS.
Reviewed-by: coleenp, stefank
1 /*
2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "gc_implementation/shared/adaptiveSizePolicy.hpp"
27 #include "gc_implementation/shared/gcPolicyCounters.hpp"
28 #include "gc_implementation/shared/vmGCOperations.hpp"
29 #include "memory/cardTableRS.hpp"
30 #include "memory/collectorPolicy.hpp"
31 #include "memory/gcLocker.inline.hpp"
32 #include "memory/genCollectedHeap.hpp"
33 #include "memory/generationSpec.hpp"
34 #include "memory/space.hpp"
35 #include "memory/universe.hpp"
36 #include "runtime/arguments.hpp"
37 #include "runtime/globals_extension.hpp"
38 #include "runtime/handles.inline.hpp"
39 #include "runtime/java.hpp"
40 #include "runtime/thread.inline.hpp"
41 #include "runtime/vmThread.hpp"
42 #include "utilities/macros.hpp"
43 #if INCLUDE_ALL_GCS
44 #include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp"
45 #include "gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp"
46 #endif // INCLUDE_ALL_GCS
48 // CollectorPolicy methods.
50 void CollectorPolicy::initialize_flags() {
51 if (MetaspaceSize > MaxMetaspaceSize) {
52 MaxMetaspaceSize = MetaspaceSize;
53 }
54 MetaspaceSize = MAX2(min_alignment(), align_size_down_(MetaspaceSize, min_alignment()));
55 // Don't increase Metaspace size limit above specified.
56 MaxMetaspaceSize = align_size_down(MaxMetaspaceSize, max_alignment());
57 if (MetaspaceSize > MaxMetaspaceSize) {
58 MetaspaceSize = MaxMetaspaceSize;
59 }
61 MinMetaspaceExpansion = MAX2(min_alignment(), align_size_down_(MinMetaspaceExpansion, min_alignment()));
62 MaxMetaspaceExpansion = MAX2(min_alignment(), align_size_down_(MaxMetaspaceExpansion, min_alignment()));
64 MinHeapDeltaBytes = align_size_up(MinHeapDeltaBytes, min_alignment());
66 assert(MetaspaceSize % min_alignment() == 0, "metapace alignment");
67 assert(MaxMetaspaceSize % max_alignment() == 0, "maximum metaspace alignment");
68 if (MetaspaceSize < 256*K) {
69 vm_exit_during_initialization("Too small initial Metaspace size");
70 }
71 }
73 void CollectorPolicy::initialize_size_info() {
74 // User inputs from -mx and ms are aligned
75 set_initial_heap_byte_size(InitialHeapSize);
76 if (initial_heap_byte_size() == 0) {
77 set_initial_heap_byte_size(NewSize + OldSize);
78 }
79 set_initial_heap_byte_size(align_size_up(_initial_heap_byte_size,
80 min_alignment()));
82 set_min_heap_byte_size(Arguments::min_heap_size());
83 if (min_heap_byte_size() == 0) {
84 set_min_heap_byte_size(NewSize + OldSize);
85 }
86 set_min_heap_byte_size(align_size_up(_min_heap_byte_size,
87 min_alignment()));
89 set_max_heap_byte_size(align_size_up(MaxHeapSize, max_alignment()));
91 // Check heap parameter properties
92 if (initial_heap_byte_size() < M) {
93 vm_exit_during_initialization("Too small initial heap");
94 }
95 // Check heap parameter properties
96 if (min_heap_byte_size() < M) {
97 vm_exit_during_initialization("Too small minimum heap");
98 }
99 if (initial_heap_byte_size() <= NewSize) {
100 // make sure there is at least some room in old space
101 vm_exit_during_initialization("Too small initial heap for new size specified");
102 }
103 if (max_heap_byte_size() < min_heap_byte_size()) {
104 vm_exit_during_initialization("Incompatible minimum and maximum heap sizes specified");
105 }
106 if (initial_heap_byte_size() < min_heap_byte_size()) {
107 vm_exit_during_initialization("Incompatible minimum and initial heap sizes specified");
108 }
109 if (max_heap_byte_size() < initial_heap_byte_size()) {
110 vm_exit_during_initialization("Incompatible initial and maximum heap sizes specified");
111 }
113 if (PrintGCDetails && Verbose) {
114 gclog_or_tty->print_cr("Minimum heap " SIZE_FORMAT " Initial heap "
115 SIZE_FORMAT " Maximum heap " SIZE_FORMAT,
116 min_heap_byte_size(), initial_heap_byte_size(), max_heap_byte_size());
117 }
118 }
120 bool CollectorPolicy::use_should_clear_all_soft_refs(bool v) {
121 bool result = _should_clear_all_soft_refs;
122 set_should_clear_all_soft_refs(false);
123 return result;
124 }
126 GenRemSet* CollectorPolicy::create_rem_set(MemRegion whole_heap,
127 int max_covered_regions) {
128 switch (rem_set_name()) {
129 case GenRemSet::CardTable: {
130 CardTableRS* res = new CardTableRS(whole_heap, max_covered_regions);
131 return res;
132 }
133 default:
134 guarantee(false, "unrecognized GenRemSet::Name");
135 return NULL;
136 }
137 }
139 void CollectorPolicy::cleared_all_soft_refs() {
140 // If near gc overhear limit, continue to clear SoftRefs. SoftRefs may
141 // have been cleared in the last collection but if the gc overhear
142 // limit continues to be near, SoftRefs should still be cleared.
143 if (size_policy() != NULL) {
144 _should_clear_all_soft_refs = size_policy()->gc_overhead_limit_near();
145 }
146 _all_soft_refs_clear = true;
147 }
150 // GenCollectorPolicy methods.
152 size_t GenCollectorPolicy::scale_by_NewRatio_aligned(size_t base_size) {
153 size_t x = base_size / (NewRatio+1);
154 size_t new_gen_size = x > min_alignment() ?
155 align_size_down(x, min_alignment()) :
156 min_alignment();
157 return new_gen_size;
158 }
160 size_t GenCollectorPolicy::bound_minus_alignment(size_t desired_size,
161 size_t maximum_size) {
162 size_t alignment = min_alignment();
163 size_t max_minus = maximum_size - alignment;
164 return desired_size < max_minus ? desired_size : max_minus;
165 }
168 void GenCollectorPolicy::initialize_size_policy(size_t init_eden_size,
169 size_t init_promo_size,
170 size_t init_survivor_size) {
171 const double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0;
172 _size_policy = new AdaptiveSizePolicy(init_eden_size,
173 init_promo_size,
174 init_survivor_size,
175 max_gc_minor_pause_sec,
176 GCTimeRatio);
177 }
179 size_t GenCollectorPolicy::compute_max_alignment() {
180 // The card marking array and the offset arrays for old generations are
181 // committed in os pages as well. Make sure they are entirely full (to
182 // avoid partial page problems), e.g. if 512 bytes heap corresponds to 1
183 // byte entry and the os page size is 4096, the maximum heap size should
184 // be 512*4096 = 2MB aligned.
185 size_t alignment = GenRemSet::max_alignment_constraint(rem_set_name());
187 // Parallel GC does its own alignment of the generations to avoid requiring a
188 // large page (256M on some platforms) for the permanent generation. The
189 // other collectors should also be updated to do their own alignment and then
190 // this use of lcm() should be removed.
191 if (UseLargePages && !UseParallelGC) {
192 // in presence of large pages we have to make sure that our
193 // alignment is large page aware
194 alignment = lcm(os::large_page_size(), alignment);
195 }
197 return alignment;
198 }
200 void GenCollectorPolicy::initialize_flags() {
201 // All sizes must be multiples of the generation granularity.
202 set_min_alignment((uintx) Generation::GenGrain);
203 set_max_alignment(compute_max_alignment());
204 assert(max_alignment() >= min_alignment() &&
205 max_alignment() % min_alignment() == 0,
206 "invalid alignment constraints");
208 CollectorPolicy::initialize_flags();
210 // All generational heaps have a youngest gen; handle those flags here.
212 // Adjust max size parameters
213 if (NewSize > MaxNewSize) {
214 MaxNewSize = NewSize;
215 }
216 NewSize = align_size_down(NewSize, min_alignment());
217 MaxNewSize = align_size_down(MaxNewSize, min_alignment());
219 // Check validity of heap flags
220 assert(NewSize % min_alignment() == 0, "eden space alignment");
221 assert(MaxNewSize % min_alignment() == 0, "survivor space alignment");
223 if (NewSize < 3*min_alignment()) {
224 // make sure there room for eden and two survivor spaces
225 vm_exit_during_initialization("Too small new size specified");
226 }
227 if (SurvivorRatio < 1 || NewRatio < 1) {
228 vm_exit_during_initialization("Invalid heap ratio specified");
229 }
230 }
232 void TwoGenerationCollectorPolicy::initialize_flags() {
233 GenCollectorPolicy::initialize_flags();
235 OldSize = align_size_down(OldSize, min_alignment());
236 if (NewSize + OldSize > MaxHeapSize) {
237 MaxHeapSize = NewSize + OldSize;
238 }
239 MaxHeapSize = align_size_up(MaxHeapSize, max_alignment());
241 always_do_update_barrier = UseConcMarkSweepGC;
243 // Check validity of heap flags
244 assert(OldSize % min_alignment() == 0, "old space alignment");
245 assert(MaxHeapSize % max_alignment() == 0, "maximum heap alignment");
246 }
248 // Values set on the command line win over any ergonomically
249 // set command line parameters.
250 // Ergonomic choice of parameters are done before this
251 // method is called. Values for command line parameters such as NewSize
252 // and MaxNewSize feed those ergonomic choices into this method.
253 // This method makes the final generation sizings consistent with
254 // themselves and with overall heap sizings.
255 // In the absence of explicitly set command line flags, policies
256 // such as the use of NewRatio are used to size the generation.
257 void GenCollectorPolicy::initialize_size_info() {
258 CollectorPolicy::initialize_size_info();
260 // min_alignment() is used for alignment within a generation.
261 // There is additional alignment done down stream for some
262 // collectors that sometimes causes unwanted rounding up of
263 // generations sizes.
265 // Determine maximum size of gen0
267 size_t max_new_size = 0;
268 if (FLAG_IS_CMDLINE(MaxNewSize) || FLAG_IS_ERGO(MaxNewSize)) {
269 if (MaxNewSize < min_alignment()) {
270 max_new_size = min_alignment();
271 }
272 if (MaxNewSize >= max_heap_byte_size()) {
273 max_new_size = align_size_down(max_heap_byte_size() - min_alignment(),
274 min_alignment());
275 warning("MaxNewSize (" SIZE_FORMAT "k) is equal to or "
276 "greater than the entire heap (" SIZE_FORMAT "k). A "
277 "new generation size of " SIZE_FORMAT "k will be used.",
278 MaxNewSize/K, max_heap_byte_size()/K, max_new_size/K);
279 } else {
280 max_new_size = align_size_down(MaxNewSize, min_alignment());
281 }
283 // The case for FLAG_IS_ERGO(MaxNewSize) could be treated
284 // specially at this point to just use an ergonomically set
285 // MaxNewSize to set max_new_size. For cases with small
286 // heaps such a policy often did not work because the MaxNewSize
287 // was larger than the entire heap. The interpretation given
288 // to ergonomically set flags is that the flags are set
289 // by different collectors for their own special needs but
290 // are not allowed to badly shape the heap. This allows the
291 // different collectors to decide what's best for themselves
292 // without having to factor in the overall heap shape. It
293 // can be the case in the future that the collectors would
294 // only make "wise" ergonomics choices and this policy could
295 // just accept those choices. The choices currently made are
296 // not always "wise".
297 } else {
298 max_new_size = scale_by_NewRatio_aligned(max_heap_byte_size());
299 // Bound the maximum size by NewSize below (since it historically
300 // would have been NewSize and because the NewRatio calculation could
301 // yield a size that is too small) and bound it by MaxNewSize above.
302 // Ergonomics plays here by previously calculating the desired
303 // NewSize and MaxNewSize.
304 max_new_size = MIN2(MAX2(max_new_size, NewSize), MaxNewSize);
305 }
306 assert(max_new_size > 0, "All paths should set max_new_size");
308 // Given the maximum gen0 size, determine the initial and
309 // minimum gen0 sizes.
311 if (max_heap_byte_size() == min_heap_byte_size()) {
312 // The maximum and minimum heap sizes are the same so
313 // the generations minimum and initial must be the
314 // same as its maximum.
315 set_min_gen0_size(max_new_size);
316 set_initial_gen0_size(max_new_size);
317 set_max_gen0_size(max_new_size);
318 } else {
319 size_t desired_new_size = 0;
320 if (!FLAG_IS_DEFAULT(NewSize)) {
321 // If NewSize is set ergonomically (for example by cms), it
322 // would make sense to use it. If it is used, also use it
323 // to set the initial size. Although there is no reason
324 // the minimum size and the initial size have to be the same,
325 // the current implementation gets into trouble during the calculation
326 // of the tenured generation sizes if they are different.
327 // Note that this makes the initial size and the minimum size
328 // generally small compared to the NewRatio calculation.
329 _min_gen0_size = NewSize;
330 desired_new_size = NewSize;
331 max_new_size = MAX2(max_new_size, NewSize);
332 } else {
333 // For the case where NewSize is the default, use NewRatio
334 // to size the minimum and initial generation sizes.
335 // Use the default NewSize as the floor for these values. If
336 // NewRatio is overly large, the resulting sizes can be too
337 // small.
338 _min_gen0_size = MAX2(scale_by_NewRatio_aligned(min_heap_byte_size()),
339 NewSize);
340 desired_new_size =
341 MAX2(scale_by_NewRatio_aligned(initial_heap_byte_size()),
342 NewSize);
343 }
345 assert(_min_gen0_size > 0, "Sanity check");
346 set_initial_gen0_size(desired_new_size);
347 set_max_gen0_size(max_new_size);
349 // At this point the desirable initial and minimum sizes have been
350 // determined without regard to the maximum sizes.
352 // Bound the sizes by the corresponding overall heap sizes.
353 set_min_gen0_size(
354 bound_minus_alignment(_min_gen0_size, min_heap_byte_size()));
355 set_initial_gen0_size(
356 bound_minus_alignment(_initial_gen0_size, initial_heap_byte_size()));
357 set_max_gen0_size(
358 bound_minus_alignment(_max_gen0_size, max_heap_byte_size()));
360 // At this point all three sizes have been checked against the
361 // maximum sizes but have not been checked for consistency
362 // among the three.
364 // Final check min <= initial <= max
365 set_min_gen0_size(MIN2(_min_gen0_size, _max_gen0_size));
366 set_initial_gen0_size(
367 MAX2(MIN2(_initial_gen0_size, _max_gen0_size), _min_gen0_size));
368 set_min_gen0_size(MIN2(_min_gen0_size, _initial_gen0_size));
369 }
371 if (PrintGCDetails && Verbose) {
372 gclog_or_tty->print_cr("1: Minimum gen0 " SIZE_FORMAT " Initial gen0 "
373 SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT,
374 min_gen0_size(), initial_gen0_size(), max_gen0_size());
375 }
376 }
378 // Call this method during the sizing of the gen1 to make
379 // adjustments to gen0 because of gen1 sizing policy. gen0 initially has
380 // the most freedom in sizing because it is done before the
381 // policy for gen1 is applied. Once gen1 policies have been applied,
382 // there may be conflicts in the shape of the heap and this method
383 // is used to make the needed adjustments. The application of the
384 // policies could be more sophisticated (iterative for example) but
385 // keeping it simple also seems a worthwhile goal.
386 bool TwoGenerationCollectorPolicy::adjust_gen0_sizes(size_t* gen0_size_ptr,
387 size_t* gen1_size_ptr,
388 size_t heap_size,
389 size_t min_gen0_size) {
390 bool result = false;
391 if ((*gen1_size_ptr + *gen0_size_ptr) > heap_size) {
392 if (((*gen0_size_ptr + OldSize) > heap_size) &&
393 (heap_size - min_gen0_size) >= min_alignment()) {
394 // Adjust gen0 down to accomodate OldSize
395 *gen0_size_ptr = heap_size - min_gen0_size;
396 *gen0_size_ptr =
397 MAX2((uintx)align_size_down(*gen0_size_ptr, min_alignment()),
398 min_alignment());
399 assert(*gen0_size_ptr > 0, "Min gen0 is too large");
400 result = true;
401 } else {
402 *gen1_size_ptr = heap_size - *gen0_size_ptr;
403 *gen1_size_ptr =
404 MAX2((uintx)align_size_down(*gen1_size_ptr, min_alignment()),
405 min_alignment());
406 }
407 }
408 return result;
409 }
411 // Minimum sizes of the generations may be different than
412 // the initial sizes. An inconsistently is permitted here
413 // in the total size that can be specified explicitly by
414 // command line specification of OldSize and NewSize and
415 // also a command line specification of -Xms. Issue a warning
416 // but allow the values to pass.
418 void TwoGenerationCollectorPolicy::initialize_size_info() {
419 GenCollectorPolicy::initialize_size_info();
421 // At this point the minimum, initial and maximum sizes
422 // of the overall heap and of gen0 have been determined.
423 // The maximum gen1 size can be determined from the maximum gen0
424 // and maximum heap size since no explicit flags exits
425 // for setting the gen1 maximum.
426 _max_gen1_size = max_heap_byte_size() - _max_gen0_size;
427 _max_gen1_size =
428 MAX2((uintx)align_size_down(_max_gen1_size, min_alignment()),
429 min_alignment());
430 // If no explicit command line flag has been set for the
431 // gen1 size, use what is left for gen1.
432 if (FLAG_IS_DEFAULT(OldSize) || FLAG_IS_ERGO(OldSize)) {
433 // The user has not specified any value or ergonomics
434 // has chosen a value (which may or may not be consistent
435 // with the overall heap size). In either case make
436 // the minimum, maximum and initial sizes consistent
437 // with the gen0 sizes and the overall heap sizes.
438 assert(min_heap_byte_size() > _min_gen0_size,
439 "gen0 has an unexpected minimum size");
440 set_min_gen1_size(min_heap_byte_size() - min_gen0_size());
441 set_min_gen1_size(
442 MAX2((uintx)align_size_down(_min_gen1_size, min_alignment()),
443 min_alignment()));
444 set_initial_gen1_size(initial_heap_byte_size() - initial_gen0_size());
445 set_initial_gen1_size(
446 MAX2((uintx)align_size_down(_initial_gen1_size, min_alignment()),
447 min_alignment()));
449 } else {
450 // It's been explicitly set on the command line. Use the
451 // OldSize and then determine the consequences.
452 set_min_gen1_size(OldSize);
453 set_initial_gen1_size(OldSize);
455 // If the user has explicitly set an OldSize that is inconsistent
456 // with other command line flags, issue a warning.
457 // The generation minimums and the overall heap mimimum should
458 // be within one heap alignment.
459 if ((_min_gen1_size + _min_gen0_size + min_alignment()) <
460 min_heap_byte_size()) {
461 warning("Inconsistency between minimum heap size and minimum "
462 "generation sizes: using minimum heap = " SIZE_FORMAT,
463 min_heap_byte_size());
464 }
465 if ((OldSize > _max_gen1_size)) {
466 warning("Inconsistency between maximum heap size and maximum "
467 "generation sizes: using maximum heap = " SIZE_FORMAT
468 " -XX:OldSize flag is being ignored",
469 max_heap_byte_size());
470 }
471 // If there is an inconsistency between the OldSize and the minimum and/or
472 // initial size of gen0, since OldSize was explicitly set, OldSize wins.
473 if (adjust_gen0_sizes(&_min_gen0_size, &_min_gen1_size,
474 min_heap_byte_size(), OldSize)) {
475 if (PrintGCDetails && Verbose) {
476 gclog_or_tty->print_cr("2: Minimum gen0 " SIZE_FORMAT " Initial gen0 "
477 SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT,
478 min_gen0_size(), initial_gen0_size(), max_gen0_size());
479 }
480 }
481 // Initial size
482 if (adjust_gen0_sizes(&_initial_gen0_size, &_initial_gen1_size,
483 initial_heap_byte_size(), OldSize)) {
484 if (PrintGCDetails && Verbose) {
485 gclog_or_tty->print_cr("3: Minimum gen0 " SIZE_FORMAT " Initial gen0 "
486 SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT,
487 min_gen0_size(), initial_gen0_size(), max_gen0_size());
488 }
489 }
490 }
491 // Enforce the maximum gen1 size.
492 set_min_gen1_size(MIN2(_min_gen1_size, _max_gen1_size));
494 // Check that min gen1 <= initial gen1 <= max gen1
495 set_initial_gen1_size(MAX2(_initial_gen1_size, _min_gen1_size));
496 set_initial_gen1_size(MIN2(_initial_gen1_size, _max_gen1_size));
498 if (PrintGCDetails && Verbose) {
499 gclog_or_tty->print_cr("Minimum gen1 " SIZE_FORMAT " Initial gen1 "
500 SIZE_FORMAT " Maximum gen1 " SIZE_FORMAT,
501 min_gen1_size(), initial_gen1_size(), max_gen1_size());
502 }
503 }
505 HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size,
506 bool is_tlab,
507 bool* gc_overhead_limit_was_exceeded) {
508 GenCollectedHeap *gch = GenCollectedHeap::heap();
510 debug_only(gch->check_for_valid_allocation_state());
511 assert(gch->no_gc_in_progress(), "Allocation during gc not allowed");
513 // In general gc_overhead_limit_was_exceeded should be false so
514 // set it so here and reset it to true only if the gc time
515 // limit is being exceeded as checked below.
516 *gc_overhead_limit_was_exceeded = false;
518 HeapWord* result = NULL;
520 // Loop until the allocation is satisified,
521 // or unsatisfied after GC.
522 for (int try_count = 1; /* return or throw */; try_count += 1) {
523 HandleMark hm; // discard any handles allocated in each iteration
525 // First allocation attempt is lock-free.
526 Generation *gen0 = gch->get_gen(0);
527 assert(gen0->supports_inline_contig_alloc(),
528 "Otherwise, must do alloc within heap lock");
529 if (gen0->should_allocate(size, is_tlab)) {
530 result = gen0->par_allocate(size, is_tlab);
531 if (result != NULL) {
532 assert(gch->is_in_reserved(result), "result not in heap");
533 return result;
534 }
535 }
536 unsigned int gc_count_before; // read inside the Heap_lock locked region
537 {
538 MutexLocker ml(Heap_lock);
539 if (PrintGC && Verbose) {
540 gclog_or_tty->print_cr("TwoGenerationCollectorPolicy::mem_allocate_work:"
541 " attempting locked slow path allocation");
542 }
543 // Note that only large objects get a shot at being
544 // allocated in later generations.
545 bool first_only = ! should_try_older_generation_allocation(size);
547 result = gch->attempt_allocation(size, is_tlab, first_only);
548 if (result != NULL) {
549 assert(gch->is_in_reserved(result), "result not in heap");
550 return result;
551 }
553 if (GC_locker::is_active_and_needs_gc()) {
554 if (is_tlab) {
555 return NULL; // Caller will retry allocating individual object
556 }
557 if (!gch->is_maximal_no_gc()) {
558 // Try and expand heap to satisfy request
559 result = expand_heap_and_allocate(size, is_tlab);
560 // result could be null if we are out of space
561 if (result != NULL) {
562 return result;
563 }
564 }
566 // If this thread is not in a jni critical section, we stall
567 // the requestor until the critical section has cleared and
568 // GC allowed. When the critical section clears, a GC is
569 // initiated by the last thread exiting the critical section; so
570 // we retry the allocation sequence from the beginning of the loop,
571 // rather than causing more, now probably unnecessary, GC attempts.
572 JavaThread* jthr = JavaThread::current();
573 if (!jthr->in_critical()) {
574 MutexUnlocker mul(Heap_lock);
575 // Wait for JNI critical section to be exited
576 GC_locker::stall_until_clear();
577 continue;
578 } else {
579 if (CheckJNICalls) {
580 fatal("Possible deadlock due to allocating while"
581 " in jni critical section");
582 }
583 return NULL;
584 }
585 }
587 // Read the gc count while the heap lock is held.
588 gc_count_before = Universe::heap()->total_collections();
589 }
591 VM_GenCollectForAllocation op(size,
592 is_tlab,
593 gc_count_before);
594 VMThread::execute(&op);
595 if (op.prologue_succeeded()) {
596 result = op.result();
597 if (op.gc_locked()) {
598 assert(result == NULL, "must be NULL if gc_locked() is true");
599 continue; // retry and/or stall as necessary
600 }
602 // Allocation has failed and a collection
603 // has been done. If the gc time limit was exceeded the
604 // this time, return NULL so that an out-of-memory
605 // will be thrown. Clear gc_overhead_limit_exceeded
606 // so that the overhead exceeded does not persist.
608 const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
609 const bool softrefs_clear = all_soft_refs_clear();
610 assert(!limit_exceeded || softrefs_clear, "Should have been cleared");
611 if (limit_exceeded && softrefs_clear) {
612 *gc_overhead_limit_was_exceeded = true;
613 size_policy()->set_gc_overhead_limit_exceeded(false);
614 if (op.result() != NULL) {
615 CollectedHeap::fill_with_object(op.result(), size);
616 }
617 return NULL;
618 }
619 assert(result == NULL || gch->is_in_reserved(result),
620 "result not in heap");
621 return result;
622 }
624 // Give a warning if we seem to be looping forever.
625 if ((QueuedAllocationWarningCount > 0) &&
626 (try_count % QueuedAllocationWarningCount == 0)) {
627 warning("TwoGenerationCollectorPolicy::mem_allocate_work retries %d times \n\t"
628 " size=%d %s", try_count, size, is_tlab ? "(TLAB)" : "");
629 }
630 }
631 }
633 HeapWord* GenCollectorPolicy::expand_heap_and_allocate(size_t size,
634 bool is_tlab) {
635 GenCollectedHeap *gch = GenCollectedHeap::heap();
636 HeapWord* result = NULL;
637 for (int i = number_of_generations() - 1; i >= 0 && result == NULL; i--) {
638 Generation *gen = gch->get_gen(i);
639 if (gen->should_allocate(size, is_tlab)) {
640 result = gen->expand_and_allocate(size, is_tlab);
641 }
642 }
643 assert(result == NULL || gch->is_in_reserved(result), "result not in heap");
644 return result;
645 }
647 HeapWord* GenCollectorPolicy::satisfy_failed_allocation(size_t size,
648 bool is_tlab) {
649 GenCollectedHeap *gch = GenCollectedHeap::heap();
650 GCCauseSetter x(gch, GCCause::_allocation_failure);
651 HeapWord* result = NULL;
653 assert(size != 0, "Precondition violated");
654 if (GC_locker::is_active_and_needs_gc()) {
655 // GC locker is active; instead of a collection we will attempt
656 // to expand the heap, if there's room for expansion.
657 if (!gch->is_maximal_no_gc()) {
658 result = expand_heap_and_allocate(size, is_tlab);
659 }
660 return result; // could be null if we are out of space
661 } else if (!gch->incremental_collection_will_fail(false /* don't consult_young */)) {
662 // Do an incremental collection.
663 gch->do_collection(false /* full */,
664 false /* clear_all_soft_refs */,
665 size /* size */,
666 is_tlab /* is_tlab */,
667 number_of_generations() - 1 /* max_level */);
668 } else {
669 if (Verbose && PrintGCDetails) {
670 gclog_or_tty->print(" :: Trying full because partial may fail :: ");
671 }
672 // Try a full collection; see delta for bug id 6266275
673 // for the original code and why this has been simplified
674 // with from-space allocation criteria modified and
675 // such allocation moved out of the safepoint path.
676 gch->do_collection(true /* full */,
677 false /* clear_all_soft_refs */,
678 size /* size */,
679 is_tlab /* is_tlab */,
680 number_of_generations() - 1 /* max_level */);
681 }
683 result = gch->attempt_allocation(size, is_tlab, false /*first_only*/);
685 if (result != NULL) {
686 assert(gch->is_in_reserved(result), "result not in heap");
687 return result;
688 }
690 // OK, collection failed, try expansion.
691 result = expand_heap_and_allocate(size, is_tlab);
692 if (result != NULL) {
693 return result;
694 }
696 // If we reach this point, we're really out of memory. Try every trick
697 // we can to reclaim memory. Force collection of soft references. Force
698 // a complete compaction of the heap. Any additional methods for finding
699 // free memory should be here, especially if they are expensive. If this
700 // attempt fails, an OOM exception will be thrown.
701 {
702 IntFlagSetting flag_change(MarkSweepAlwaysCompactCount, 1); // Make sure the heap is fully compacted
704 gch->do_collection(true /* full */,
705 true /* clear_all_soft_refs */,
706 size /* size */,
707 is_tlab /* is_tlab */,
708 number_of_generations() - 1 /* max_level */);
709 }
711 result = gch->attempt_allocation(size, is_tlab, false /* first_only */);
712 if (result != NULL) {
713 assert(gch->is_in_reserved(result), "result not in heap");
714 return result;
715 }
717 assert(!should_clear_all_soft_refs(),
718 "Flag should have been handled and cleared prior to this point");
720 // What else? We might try synchronous finalization later. If the total
721 // space available is large enough for the allocation, then a more
722 // complete compaction phase than we've tried so far might be
723 // appropriate.
724 return NULL;
725 }
727 MetaWord* CollectorPolicy::satisfy_failed_metadata_allocation(
728 ClassLoaderData* loader_data,
729 size_t word_size,
730 Metaspace::MetadataType mdtype) {
731 uint loop_count = 0;
732 uint gc_count = 0;
733 uint full_gc_count = 0;
735 assert(!Heap_lock->owned_by_self(), "Should not be holding the Heap_lock");
737 do {
738 MetaWord* result = NULL;
739 if (GC_locker::is_active_and_needs_gc()) {
740 // If the GC_locker is active, just expand and allocate.
741 // If that does not succeed, wait if this thread is not
742 // in a critical section itself.
743 result =
744 loader_data->metaspace_non_null()->expand_and_allocate(word_size,
745 mdtype);
746 if (result != NULL) {
747 return result;
748 }
749 JavaThread* jthr = JavaThread::current();
750 if (!jthr->in_critical()) {
751 // Wait for JNI critical section to be exited
752 GC_locker::stall_until_clear();
753 // The GC invoked by the last thread leaving the critical
754 // section will be a young collection and a full collection
755 // is (currently) needed for unloading classes so continue
756 // to the next iteration to get a full GC.
757 continue;
758 } else {
759 if (CheckJNICalls) {
760 fatal("Possible deadlock due to allocating while"
761 " in jni critical section");
762 }
763 return NULL;
764 }
765 }
767 { // Need lock to get self consistent gc_count's
768 MutexLocker ml(Heap_lock);
769 gc_count = Universe::heap()->total_collections();
770 full_gc_count = Universe::heap()->total_full_collections();
771 }
773 // Generate a VM operation
774 VM_CollectForMetadataAllocation op(loader_data,
775 word_size,
776 mdtype,
777 gc_count,
778 full_gc_count,
779 GCCause::_metadata_GC_threshold);
780 VMThread::execute(&op);
782 // If GC was locked out, try again. Check
783 // before checking success because the prologue
784 // could have succeeded and the GC still have
785 // been locked out.
786 if (op.gc_locked()) {
787 continue;
788 }
790 if (op.prologue_succeeded()) {
791 return op.result();
792 }
793 loop_count++;
794 if ((QueuedAllocationWarningCount > 0) &&
795 (loop_count % QueuedAllocationWarningCount == 0)) {
796 warning("satisfy_failed_metadata_allocation() retries %d times \n\t"
797 " size=%d", loop_count, word_size);
798 }
799 } while (true); // Until a GC is done
800 }
802 // Return true if any of the following is true:
803 // . the allocation won't fit into the current young gen heap
804 // . gc locker is occupied (jni critical section)
805 // . heap memory is tight -- the most recent previous collection
806 // was a full collection because a partial collection (would
807 // have) failed and is likely to fail again
808 bool GenCollectorPolicy::should_try_older_generation_allocation(
809 size_t word_size) const {
810 GenCollectedHeap* gch = GenCollectedHeap::heap();
811 size_t gen0_capacity = gch->get_gen(0)->capacity_before_gc();
812 return (word_size > heap_word_size(gen0_capacity))
813 || GC_locker::is_active_and_needs_gc()
814 || gch->incremental_collection_failed();
815 }
818 //
819 // MarkSweepPolicy methods
820 //
822 MarkSweepPolicy::MarkSweepPolicy() {
823 initialize_all();
824 }
826 void MarkSweepPolicy::initialize_generations() {
827 _generations = new GenerationSpecPtr[number_of_generations()];
828 if (_generations == NULL)
829 vm_exit_during_initialization("Unable to allocate gen spec");
831 if (UseParNewGC) {
832 _generations[0] = new GenerationSpec(Generation::ParNew, _initial_gen0_size, _max_gen0_size);
833 } else {
834 _generations[0] = new GenerationSpec(Generation::DefNew, _initial_gen0_size, _max_gen0_size);
835 }
836 _generations[1] = new GenerationSpec(Generation::MarkSweepCompact, _initial_gen1_size, _max_gen1_size);
838 if (_generations[0] == NULL || _generations[1] == NULL)
839 vm_exit_during_initialization("Unable to allocate gen spec");
840 }
842 void MarkSweepPolicy::initialize_gc_policy_counters() {
843 // initialize the policy counters - 2 collectors, 3 generations
844 if (UseParNewGC) {
845 _gc_policy_counters = new GCPolicyCounters("ParNew:MSC", 2, 3);
846 } else {
847 _gc_policy_counters = new GCPolicyCounters("Copy:MSC", 2, 3);
848 }
849 }