Mon, 21 Oct 2013 18:52:13 +0200
8026852: Use restricted_align_down in collector policy code
Summary: Moved restricted_align_down to globalDefinitions and renamed it align_size_down_bounded
Reviewed-by: stefank, mgerdin, tschatzl
1 /*
2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "gc_implementation/shared/adaptiveSizePolicy.hpp"
27 #include "gc_implementation/shared/gcPolicyCounters.hpp"
28 #include "gc_implementation/shared/vmGCOperations.hpp"
29 #include "memory/cardTableRS.hpp"
30 #include "memory/collectorPolicy.hpp"
31 #include "memory/gcLocker.inline.hpp"
32 #include "memory/genCollectedHeap.hpp"
33 #include "memory/generationSpec.hpp"
34 #include "memory/space.hpp"
35 #include "memory/universe.hpp"
36 #include "runtime/arguments.hpp"
37 #include "runtime/globals_extension.hpp"
38 #include "runtime/handles.inline.hpp"
39 #include "runtime/java.hpp"
40 #include "runtime/thread.inline.hpp"
41 #include "runtime/vmThread.hpp"
42 #include "utilities/macros.hpp"
43 #if INCLUDE_ALL_GCS
44 #include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp"
45 #include "gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp"
46 #endif // INCLUDE_ALL_GCS
48 // CollectorPolicy methods.
50 void CollectorPolicy::initialize_flags() {
51 assert(_max_alignment >= _min_alignment,
52 err_msg("max_alignment: " SIZE_FORMAT " less than min_alignment: " SIZE_FORMAT,
53 _max_alignment, _min_alignment));
54 assert(_max_alignment % _min_alignment == 0,
55 err_msg("max_alignment: " SIZE_FORMAT " not aligned by min_alignment: " SIZE_FORMAT,
56 _max_alignment, _min_alignment));
58 if (MaxHeapSize < InitialHeapSize) {
59 vm_exit_during_initialization("Incompatible initial and maximum heap sizes specified");
60 }
62 MinHeapDeltaBytes = align_size_up(MinHeapDeltaBytes, _min_alignment);
63 }
65 void CollectorPolicy::initialize_size_info() {
66 // User inputs from -mx and ms must be aligned
67 _min_heap_byte_size = align_size_up(Arguments::min_heap_size(), _min_alignment);
68 _initial_heap_byte_size = align_size_up(InitialHeapSize, _min_alignment);
69 _max_heap_byte_size = align_size_up(MaxHeapSize, _max_alignment);
71 // Check heap parameter properties
72 if (_initial_heap_byte_size < M) {
73 vm_exit_during_initialization("Too small initial heap");
74 }
75 // Check heap parameter properties
76 if (_min_heap_byte_size < M) {
77 vm_exit_during_initialization("Too small minimum heap");
78 }
79 if (_initial_heap_byte_size <= NewSize) {
80 // make sure there is at least some room in old space
81 vm_exit_during_initialization("Too small initial heap for new size specified");
82 }
83 if (_max_heap_byte_size < _min_heap_byte_size) {
84 vm_exit_during_initialization("Incompatible minimum and maximum heap sizes specified");
85 }
86 if (_initial_heap_byte_size < _min_heap_byte_size) {
87 vm_exit_during_initialization("Incompatible minimum and initial heap sizes specified");
88 }
89 if (_max_heap_byte_size < _initial_heap_byte_size) {
90 vm_exit_during_initialization("Incompatible initial and maximum heap sizes specified");
91 }
93 if (PrintGCDetails && Verbose) {
94 gclog_or_tty->print_cr("Minimum heap " SIZE_FORMAT " Initial heap "
95 SIZE_FORMAT " Maximum heap " SIZE_FORMAT,
96 _min_heap_byte_size, _initial_heap_byte_size, _max_heap_byte_size);
97 }
98 }
100 bool CollectorPolicy::use_should_clear_all_soft_refs(bool v) {
101 bool result = _should_clear_all_soft_refs;
102 set_should_clear_all_soft_refs(false);
103 return result;
104 }
106 GenRemSet* CollectorPolicy::create_rem_set(MemRegion whole_heap,
107 int max_covered_regions) {
108 return new CardTableRS(whole_heap, max_covered_regions);
109 }
111 void CollectorPolicy::cleared_all_soft_refs() {
112 // If near gc overhear limit, continue to clear SoftRefs. SoftRefs may
113 // have been cleared in the last collection but if the gc overhear
114 // limit continues to be near, SoftRefs should still be cleared.
115 if (size_policy() != NULL) {
116 _should_clear_all_soft_refs = size_policy()->gc_overhead_limit_near();
117 }
118 _all_soft_refs_clear = true;
119 }
121 size_t CollectorPolicy::compute_max_alignment() {
122 // The card marking array and the offset arrays for old generations are
123 // committed in os pages as well. Make sure they are entirely full (to
124 // avoid partial page problems), e.g. if 512 bytes heap corresponds to 1
125 // byte entry and the os page size is 4096, the maximum heap size should
126 // be 512*4096 = 2MB aligned.
128 // There is only the GenRemSet in Hotspot and only the GenRemSet::CardTable
129 // is supported.
130 // Requirements of any new remembered set implementations must be added here.
131 size_t alignment = GenRemSet::max_alignment_constraint(GenRemSet::CardTable);
133 // Parallel GC does its own alignment of the generations to avoid requiring a
134 // large page (256M on some platforms) for the permanent generation. The
135 // other collectors should also be updated to do their own alignment and then
136 // this use of lcm() should be removed.
137 if (UseLargePages && !UseParallelGC) {
138 // in presence of large pages we have to make sure that our
139 // alignment is large page aware
140 alignment = lcm(os::large_page_size(), alignment);
141 }
143 return alignment;
144 }
146 // GenCollectorPolicy methods.
148 size_t GenCollectorPolicy::scale_by_NewRatio_aligned(size_t base_size) {
149 return align_size_down_bounded(base_size / (NewRatio + 1), _min_alignment);
150 }
152 size_t GenCollectorPolicy::bound_minus_alignment(size_t desired_size,
153 size_t maximum_size) {
154 size_t alignment = _min_alignment;
155 size_t max_minus = maximum_size - alignment;
156 return desired_size < max_minus ? desired_size : max_minus;
157 }
160 void GenCollectorPolicy::initialize_size_policy(size_t init_eden_size,
161 size_t init_promo_size,
162 size_t init_survivor_size) {
163 const double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0;
164 _size_policy = new AdaptiveSizePolicy(init_eden_size,
165 init_promo_size,
166 init_survivor_size,
167 max_gc_pause_sec,
168 GCTimeRatio);
169 }
171 void GenCollectorPolicy::initialize_flags() {
172 // All sizes must be multiples of the generation granularity.
173 _min_alignment = (uintx) Generation::GenGrain;
174 _max_alignment = compute_max_alignment();
176 CollectorPolicy::initialize_flags();
178 // All generational heaps have a youngest gen; handle those flags here.
180 // Adjust max size parameters
181 if (NewSize > MaxNewSize) {
182 MaxNewSize = NewSize;
183 }
184 NewSize = align_size_down(NewSize, _min_alignment);
185 MaxNewSize = align_size_down(MaxNewSize, _min_alignment);
187 // Check validity of heap flags
188 assert(NewSize % _min_alignment == 0, "eden space alignment");
189 assert(MaxNewSize % _min_alignment == 0, "survivor space alignment");
191 if (NewSize < 3 * _min_alignment) {
192 // make sure there room for eden and two survivor spaces
193 vm_exit_during_initialization("Too small new size specified");
194 }
195 if (SurvivorRatio < 1 || NewRatio < 1) {
196 vm_exit_during_initialization("Invalid young gen ratio specified");
197 }
198 }
200 void TwoGenerationCollectorPolicy::initialize_flags() {
201 GenCollectorPolicy::initialize_flags();
203 OldSize = align_size_down(OldSize, _min_alignment);
205 if (FLAG_IS_CMDLINE(OldSize) && FLAG_IS_DEFAULT(NewSize)) {
206 // NewRatio will be used later to set the young generation size so we use
207 // it to calculate how big the heap should be based on the requested OldSize
208 // and NewRatio.
209 assert(NewRatio > 0, "NewRatio should have been set up earlier");
210 size_t calculated_heapsize = (OldSize / NewRatio) * (NewRatio + 1);
212 calculated_heapsize = align_size_up(calculated_heapsize, _max_alignment);
213 MaxHeapSize = calculated_heapsize;
214 InitialHeapSize = calculated_heapsize;
215 }
216 MaxHeapSize = align_size_up(MaxHeapSize, _max_alignment);
218 // adjust max heap size if necessary
219 if (NewSize + OldSize > MaxHeapSize) {
220 if (FLAG_IS_CMDLINE(MaxHeapSize)) {
221 // somebody set a maximum heap size with the intention that we should not
222 // exceed it. Adjust New/OldSize as necessary.
223 uintx calculated_size = NewSize + OldSize;
224 double shrink_factor = (double) MaxHeapSize / calculated_size;
225 // align
226 NewSize = align_size_down((uintx) (NewSize * shrink_factor), _min_alignment);
227 // OldSize is already aligned because above we aligned MaxHeapSize to
228 // _max_alignment, and we just made sure that NewSize is aligned to
229 // _min_alignment. In initialize_flags() we verified that _max_alignment
230 // is a multiple of _min_alignment.
231 OldSize = MaxHeapSize - NewSize;
232 } else {
233 MaxHeapSize = NewSize + OldSize;
234 }
235 }
236 // need to do this again
237 MaxHeapSize = align_size_up(MaxHeapSize, _max_alignment);
239 // adjust max heap size if necessary
240 if (NewSize + OldSize > MaxHeapSize) {
241 if (FLAG_IS_CMDLINE(MaxHeapSize)) {
242 // somebody set a maximum heap size with the intention that we should not
243 // exceed it. Adjust New/OldSize as necessary.
244 uintx calculated_size = NewSize + OldSize;
245 double shrink_factor = (double) MaxHeapSize / calculated_size;
246 // align
247 NewSize = align_size_down((uintx) (NewSize * shrink_factor), _min_alignment);
248 // OldSize is already aligned because above we aligned MaxHeapSize to
249 // _max_alignment, and we just made sure that NewSize is aligned to
250 // _min_alignment. In initialize_flags() we verified that _max_alignment
251 // is a multiple of _min_alignment.
252 OldSize = MaxHeapSize - NewSize;
253 } else {
254 MaxHeapSize = NewSize + OldSize;
255 }
256 }
257 // need to do this again
258 MaxHeapSize = align_size_up(MaxHeapSize, _max_alignment);
260 always_do_update_barrier = UseConcMarkSweepGC;
262 // Check validity of heap flags
263 assert(OldSize % _min_alignment == 0, "old space alignment");
264 assert(MaxHeapSize % _max_alignment == 0, "maximum heap alignment");
265 }
267 // Values set on the command line win over any ergonomically
268 // set command line parameters.
269 // Ergonomic choice of parameters are done before this
270 // method is called. Values for command line parameters such as NewSize
271 // and MaxNewSize feed those ergonomic choices into this method.
272 // This method makes the final generation sizings consistent with
273 // themselves and with overall heap sizings.
274 // In the absence of explicitly set command line flags, policies
275 // such as the use of NewRatio are used to size the generation.
276 void GenCollectorPolicy::initialize_size_info() {
277 CollectorPolicy::initialize_size_info();
279 // _min_alignment is used for alignment within a generation.
280 // There is additional alignment done down stream for some
281 // collectors that sometimes causes unwanted rounding up of
282 // generations sizes.
284 // Determine maximum size of gen0
286 size_t max_new_size = 0;
287 if (FLAG_IS_CMDLINE(MaxNewSize) || FLAG_IS_ERGO(MaxNewSize)) {
288 if (MaxNewSize < _min_alignment) {
289 max_new_size = _min_alignment;
290 }
291 if (MaxNewSize >= _max_heap_byte_size) {
292 max_new_size = align_size_down(_max_heap_byte_size - _min_alignment,
293 _min_alignment);
294 warning("MaxNewSize (" SIZE_FORMAT "k) is equal to or "
295 "greater than the entire heap (" SIZE_FORMAT "k). A "
296 "new generation size of " SIZE_FORMAT "k will be used.",
297 MaxNewSize/K, _max_heap_byte_size/K, max_new_size/K);
298 } else {
299 max_new_size = align_size_down(MaxNewSize, _min_alignment);
300 }
302 // The case for FLAG_IS_ERGO(MaxNewSize) could be treated
303 // specially at this point to just use an ergonomically set
304 // MaxNewSize to set max_new_size. For cases with small
305 // heaps such a policy often did not work because the MaxNewSize
306 // was larger than the entire heap. The interpretation given
307 // to ergonomically set flags is that the flags are set
308 // by different collectors for their own special needs but
309 // are not allowed to badly shape the heap. This allows the
310 // different collectors to decide what's best for themselves
311 // without having to factor in the overall heap shape. It
312 // can be the case in the future that the collectors would
313 // only make "wise" ergonomics choices and this policy could
314 // just accept those choices. The choices currently made are
315 // not always "wise".
316 } else {
317 max_new_size = scale_by_NewRatio_aligned(_max_heap_byte_size);
318 // Bound the maximum size by NewSize below (since it historically
319 // would have been NewSize and because the NewRatio calculation could
320 // yield a size that is too small) and bound it by MaxNewSize above.
321 // Ergonomics plays here by previously calculating the desired
322 // NewSize and MaxNewSize.
323 max_new_size = MIN2(MAX2(max_new_size, NewSize), MaxNewSize);
324 }
325 assert(max_new_size > 0, "All paths should set max_new_size");
327 // Given the maximum gen0 size, determine the initial and
328 // minimum gen0 sizes.
330 if (_max_heap_byte_size == _min_heap_byte_size) {
331 // The maximum and minimum heap sizes are the same so
332 // the generations minimum and initial must be the
333 // same as its maximum.
334 _min_gen0_size = max_new_size;
335 _initial_gen0_size = max_new_size;
336 _max_gen0_size = max_new_size;
337 } else {
338 size_t desired_new_size = 0;
339 if (!FLAG_IS_DEFAULT(NewSize)) {
340 // If NewSize is set ergonomically (for example by cms), it
341 // would make sense to use it. If it is used, also use it
342 // to set the initial size. Although there is no reason
343 // the minimum size and the initial size have to be the same,
344 // the current implementation gets into trouble during the calculation
345 // of the tenured generation sizes if they are different.
346 // Note that this makes the initial size and the minimum size
347 // generally small compared to the NewRatio calculation.
348 _min_gen0_size = NewSize;
349 desired_new_size = NewSize;
350 max_new_size = MAX2(max_new_size, NewSize);
351 } else {
352 // For the case where NewSize is the default, use NewRatio
353 // to size the minimum and initial generation sizes.
354 // Use the default NewSize as the floor for these values. If
355 // NewRatio is overly large, the resulting sizes can be too
356 // small.
357 _min_gen0_size = MAX2(scale_by_NewRatio_aligned(_min_heap_byte_size), NewSize);
358 desired_new_size =
359 MAX2(scale_by_NewRatio_aligned(_initial_heap_byte_size), NewSize);
360 }
362 assert(_min_gen0_size > 0, "Sanity check");
363 _initial_gen0_size = desired_new_size;
364 _max_gen0_size = max_new_size;
366 // At this point the desirable initial and minimum sizes have been
367 // determined without regard to the maximum sizes.
369 // Bound the sizes by the corresponding overall heap sizes.
370 _min_gen0_size = bound_minus_alignment(_min_gen0_size, _min_heap_byte_size);
371 _initial_gen0_size = bound_minus_alignment(_initial_gen0_size, _initial_heap_byte_size);
372 _max_gen0_size = bound_minus_alignment(_max_gen0_size, _max_heap_byte_size);
374 // At this point all three sizes have been checked against the
375 // maximum sizes but have not been checked for consistency
376 // among the three.
378 // Final check min <= initial <= max
379 _min_gen0_size = MIN2(_min_gen0_size, _max_gen0_size);
380 _initial_gen0_size = MAX2(MIN2(_initial_gen0_size, _max_gen0_size), _min_gen0_size);
381 _min_gen0_size = MIN2(_min_gen0_size, _initial_gen0_size);
382 }
384 if (PrintGCDetails && Verbose) {
385 gclog_or_tty->print_cr("1: Minimum gen0 " SIZE_FORMAT " Initial gen0 "
386 SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT,
387 _min_gen0_size, _initial_gen0_size, _max_gen0_size);
388 }
389 }
391 // Call this method during the sizing of the gen1 to make
392 // adjustments to gen0 because of gen1 sizing policy. gen0 initially has
393 // the most freedom in sizing because it is done before the
394 // policy for gen1 is applied. Once gen1 policies have been applied,
395 // there may be conflicts in the shape of the heap and this method
396 // is used to make the needed adjustments. The application of the
397 // policies could be more sophisticated (iterative for example) but
398 // keeping it simple also seems a worthwhile goal.
399 bool TwoGenerationCollectorPolicy::adjust_gen0_sizes(size_t* gen0_size_ptr,
400 size_t* gen1_size_ptr,
401 const size_t heap_size,
402 const size_t min_gen1_size) {
403 bool result = false;
405 if ((*gen1_size_ptr + *gen0_size_ptr) > heap_size) {
406 if ((heap_size < (*gen0_size_ptr + min_gen1_size)) &&
407 (heap_size >= min_gen1_size + _min_alignment)) {
408 // Adjust gen0 down to accommodate min_gen1_size
409 *gen0_size_ptr = align_size_down_bounded(heap_size - min_gen1_size, _min_alignment);
410 assert(*gen0_size_ptr > 0, "Min gen0 is too large");
411 result = true;
412 } else {
413 *gen1_size_ptr = align_size_down_bounded(heap_size - *gen0_size_ptr, _min_alignment);
414 }
415 }
416 return result;
417 }
419 // Minimum sizes of the generations may be different than
420 // the initial sizes. An inconsistently is permitted here
421 // in the total size that can be specified explicitly by
422 // command line specification of OldSize and NewSize and
423 // also a command line specification of -Xms. Issue a warning
424 // but allow the values to pass.
426 void TwoGenerationCollectorPolicy::initialize_size_info() {
427 GenCollectorPolicy::initialize_size_info();
429 // At this point the minimum, initial and maximum sizes
430 // of the overall heap and of gen0 have been determined.
431 // The maximum gen1 size can be determined from the maximum gen0
432 // and maximum heap size since no explicit flags exits
433 // for setting the gen1 maximum.
434 _max_gen1_size = _max_heap_byte_size - _max_gen0_size;
435 _max_gen1_size =
436 MAX2((uintx)align_size_down(_max_gen1_size, _min_alignment), _min_alignment);
437 // If no explicit command line flag has been set for the
438 // gen1 size, use what is left for gen1.
439 if (FLAG_IS_DEFAULT(OldSize) || FLAG_IS_ERGO(OldSize)) {
440 // The user has not specified any value or ergonomics
441 // has chosen a value (which may or may not be consistent
442 // with the overall heap size). In either case make
443 // the minimum, maximum and initial sizes consistent
444 // with the gen0 sizes and the overall heap sizes.
445 assert(_min_heap_byte_size > _min_gen0_size,
446 "gen0 has an unexpected minimum size");
447 _min_gen1_size = _min_heap_byte_size - _min_gen0_size;
448 _min_gen1_size =
449 MAX2((uintx)align_size_down(_min_gen1_size, _min_alignment), _min_alignment);
450 _initial_gen1_size = _initial_heap_byte_size - _initial_gen0_size;
451 _initial_gen1_size =
452 MAX2((uintx)align_size_down(_initial_gen1_size, _min_alignment), _min_alignment);
453 } else {
454 // It's been explicitly set on the command line. Use the
455 // OldSize and then determine the consequences.
456 _min_gen1_size = OldSize;
457 _initial_gen1_size = OldSize;
459 // If the user has explicitly set an OldSize that is inconsistent
460 // with other command line flags, issue a warning.
461 // The generation minimums and the overall heap mimimum should
462 // be within one heap alignment.
463 if ((_min_gen1_size + _min_gen0_size + _min_alignment) < _min_heap_byte_size) {
464 warning("Inconsistency between minimum heap size and minimum "
465 "generation sizes: using minimum heap = " SIZE_FORMAT,
466 _min_heap_byte_size);
467 }
468 if ((OldSize > _max_gen1_size)) {
469 warning("Inconsistency between maximum heap size and maximum "
470 "generation sizes: using maximum heap = " SIZE_FORMAT
471 " -XX:OldSize flag is being ignored",
472 _max_heap_byte_size);
473 }
474 // If there is an inconsistency between the OldSize and the minimum and/or
475 // initial size of gen0, since OldSize was explicitly set, OldSize wins.
476 if (adjust_gen0_sizes(&_min_gen0_size, &_min_gen1_size,
477 _min_heap_byte_size, OldSize)) {
478 if (PrintGCDetails && Verbose) {
479 gclog_or_tty->print_cr("2: Minimum gen0 " SIZE_FORMAT " Initial gen0 "
480 SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT,
481 _min_gen0_size, _initial_gen0_size, _max_gen0_size);
482 }
483 }
484 // Initial size
485 if (adjust_gen0_sizes(&_initial_gen0_size, &_initial_gen1_size,
486 _initial_heap_byte_size, OldSize)) {
487 if (PrintGCDetails && Verbose) {
488 gclog_or_tty->print_cr("3: Minimum gen0 " SIZE_FORMAT " Initial gen0 "
489 SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT,
490 _min_gen0_size, _initial_gen0_size, _max_gen0_size);
491 }
492 }
493 }
494 // Enforce the maximum gen1 size.
495 _min_gen1_size = MIN2(_min_gen1_size, _max_gen1_size);
497 // Check that min gen1 <= initial gen1 <= max gen1
498 _initial_gen1_size = MAX2(_initial_gen1_size, _min_gen1_size);
499 _initial_gen1_size = MIN2(_initial_gen1_size, _max_gen1_size);
501 if (PrintGCDetails && Verbose) {
502 gclog_or_tty->print_cr("Minimum gen1 " SIZE_FORMAT " Initial gen1 "
503 SIZE_FORMAT " Maximum gen1 " SIZE_FORMAT,
504 _min_gen1_size, _initial_gen1_size, _max_gen1_size);
505 }
506 }
508 HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size,
509 bool is_tlab,
510 bool* gc_overhead_limit_was_exceeded) {
511 GenCollectedHeap *gch = GenCollectedHeap::heap();
513 debug_only(gch->check_for_valid_allocation_state());
514 assert(gch->no_gc_in_progress(), "Allocation during gc not allowed");
516 // In general gc_overhead_limit_was_exceeded should be false so
517 // set it so here and reset it to true only if the gc time
518 // limit is being exceeded as checked below.
519 *gc_overhead_limit_was_exceeded = false;
521 HeapWord* result = NULL;
523 // Loop until the allocation is satisified,
524 // or unsatisfied after GC.
525 for (int try_count = 1, gclocker_stalled_count = 0; /* return or throw */; try_count += 1) {
526 HandleMark hm; // discard any handles allocated in each iteration
528 // First allocation attempt is lock-free.
529 Generation *gen0 = gch->get_gen(0);
530 assert(gen0->supports_inline_contig_alloc(),
531 "Otherwise, must do alloc within heap lock");
532 if (gen0->should_allocate(size, is_tlab)) {
533 result = gen0->par_allocate(size, is_tlab);
534 if (result != NULL) {
535 assert(gch->is_in_reserved(result), "result not in heap");
536 return result;
537 }
538 }
539 unsigned int gc_count_before; // read inside the Heap_lock locked region
540 {
541 MutexLocker ml(Heap_lock);
542 if (PrintGC && Verbose) {
543 gclog_or_tty->print_cr("TwoGenerationCollectorPolicy::mem_allocate_work:"
544 " attempting locked slow path allocation");
545 }
546 // Note that only large objects get a shot at being
547 // allocated in later generations.
548 bool first_only = ! should_try_older_generation_allocation(size);
550 result = gch->attempt_allocation(size, is_tlab, first_only);
551 if (result != NULL) {
552 assert(gch->is_in_reserved(result), "result not in heap");
553 return result;
554 }
556 if (GC_locker::is_active_and_needs_gc()) {
557 if (is_tlab) {
558 return NULL; // Caller will retry allocating individual object
559 }
560 if (!gch->is_maximal_no_gc()) {
561 // Try and expand heap to satisfy request
562 result = expand_heap_and_allocate(size, is_tlab);
563 // result could be null if we are out of space
564 if (result != NULL) {
565 return result;
566 }
567 }
569 if (gclocker_stalled_count > GCLockerRetryAllocationCount) {
570 return NULL; // we didn't get to do a GC and we didn't get any memory
571 }
573 // If this thread is not in a jni critical section, we stall
574 // the requestor until the critical section has cleared and
575 // GC allowed. When the critical section clears, a GC is
576 // initiated by the last thread exiting the critical section; so
577 // we retry the allocation sequence from the beginning of the loop,
578 // rather than causing more, now probably unnecessary, GC attempts.
579 JavaThread* jthr = JavaThread::current();
580 if (!jthr->in_critical()) {
581 MutexUnlocker mul(Heap_lock);
582 // Wait for JNI critical section to be exited
583 GC_locker::stall_until_clear();
584 gclocker_stalled_count += 1;
585 continue;
586 } else {
587 if (CheckJNICalls) {
588 fatal("Possible deadlock due to allocating while"
589 " in jni critical section");
590 }
591 return NULL;
592 }
593 }
595 // Read the gc count while the heap lock is held.
596 gc_count_before = Universe::heap()->total_collections();
597 }
599 VM_GenCollectForAllocation op(size,
600 is_tlab,
601 gc_count_before);
602 VMThread::execute(&op);
603 if (op.prologue_succeeded()) {
604 result = op.result();
605 if (op.gc_locked()) {
606 assert(result == NULL, "must be NULL if gc_locked() is true");
607 continue; // retry and/or stall as necessary
608 }
610 // Allocation has failed and a collection
611 // has been done. If the gc time limit was exceeded the
612 // this time, return NULL so that an out-of-memory
613 // will be thrown. Clear gc_overhead_limit_exceeded
614 // so that the overhead exceeded does not persist.
616 const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
617 const bool softrefs_clear = all_soft_refs_clear();
619 if (limit_exceeded && softrefs_clear) {
620 *gc_overhead_limit_was_exceeded = true;
621 size_policy()->set_gc_overhead_limit_exceeded(false);
622 if (op.result() != NULL) {
623 CollectedHeap::fill_with_object(op.result(), size);
624 }
625 return NULL;
626 }
627 assert(result == NULL || gch->is_in_reserved(result),
628 "result not in heap");
629 return result;
630 }
632 // Give a warning if we seem to be looping forever.
633 if ((QueuedAllocationWarningCount > 0) &&
634 (try_count % QueuedAllocationWarningCount == 0)) {
635 warning("TwoGenerationCollectorPolicy::mem_allocate_work retries %d times \n\t"
636 " size=%d %s", try_count, size, is_tlab ? "(TLAB)" : "");
637 }
638 }
639 }
641 HeapWord* GenCollectorPolicy::expand_heap_and_allocate(size_t size,
642 bool is_tlab) {
643 GenCollectedHeap *gch = GenCollectedHeap::heap();
644 HeapWord* result = NULL;
645 for (int i = number_of_generations() - 1; i >= 0 && result == NULL; i--) {
646 Generation *gen = gch->get_gen(i);
647 if (gen->should_allocate(size, is_tlab)) {
648 result = gen->expand_and_allocate(size, is_tlab);
649 }
650 }
651 assert(result == NULL || gch->is_in_reserved(result), "result not in heap");
652 return result;
653 }
655 HeapWord* GenCollectorPolicy::satisfy_failed_allocation(size_t size,
656 bool is_tlab) {
657 GenCollectedHeap *gch = GenCollectedHeap::heap();
658 GCCauseSetter x(gch, GCCause::_allocation_failure);
659 HeapWord* result = NULL;
661 assert(size != 0, "Precondition violated");
662 if (GC_locker::is_active_and_needs_gc()) {
663 // GC locker is active; instead of a collection we will attempt
664 // to expand the heap, if there's room for expansion.
665 if (!gch->is_maximal_no_gc()) {
666 result = expand_heap_and_allocate(size, is_tlab);
667 }
668 return result; // could be null if we are out of space
669 } else if (!gch->incremental_collection_will_fail(false /* don't consult_young */)) {
670 // Do an incremental collection.
671 gch->do_collection(false /* full */,
672 false /* clear_all_soft_refs */,
673 size /* size */,
674 is_tlab /* is_tlab */,
675 number_of_generations() - 1 /* max_level */);
676 } else {
677 if (Verbose && PrintGCDetails) {
678 gclog_or_tty->print(" :: Trying full because partial may fail :: ");
679 }
680 // Try a full collection; see delta for bug id 6266275
681 // for the original code and why this has been simplified
682 // with from-space allocation criteria modified and
683 // such allocation moved out of the safepoint path.
684 gch->do_collection(true /* full */,
685 false /* clear_all_soft_refs */,
686 size /* size */,
687 is_tlab /* is_tlab */,
688 number_of_generations() - 1 /* max_level */);
689 }
691 result = gch->attempt_allocation(size, is_tlab, false /*first_only*/);
693 if (result != NULL) {
694 assert(gch->is_in_reserved(result), "result not in heap");
695 return result;
696 }
698 // OK, collection failed, try expansion.
699 result = expand_heap_and_allocate(size, is_tlab);
700 if (result != NULL) {
701 return result;
702 }
704 // If we reach this point, we're really out of memory. Try every trick
705 // we can to reclaim memory. Force collection of soft references. Force
706 // a complete compaction of the heap. Any additional methods for finding
707 // free memory should be here, especially if they are expensive. If this
708 // attempt fails, an OOM exception will be thrown.
709 {
710 UIntFlagSetting flag_change(MarkSweepAlwaysCompactCount, 1); // Make sure the heap is fully compacted
712 gch->do_collection(true /* full */,
713 true /* clear_all_soft_refs */,
714 size /* size */,
715 is_tlab /* is_tlab */,
716 number_of_generations() - 1 /* max_level */);
717 }
719 result = gch->attempt_allocation(size, is_tlab, false /* first_only */);
720 if (result != NULL) {
721 assert(gch->is_in_reserved(result), "result not in heap");
722 return result;
723 }
725 assert(!should_clear_all_soft_refs(),
726 "Flag should have been handled and cleared prior to this point");
728 // What else? We might try synchronous finalization later. If the total
729 // space available is large enough for the allocation, then a more
730 // complete compaction phase than we've tried so far might be
731 // appropriate.
732 return NULL;
733 }
735 MetaWord* CollectorPolicy::satisfy_failed_metadata_allocation(
736 ClassLoaderData* loader_data,
737 size_t word_size,
738 Metaspace::MetadataType mdtype) {
739 uint loop_count = 0;
740 uint gc_count = 0;
741 uint full_gc_count = 0;
743 assert(!Heap_lock->owned_by_self(), "Should not be holding the Heap_lock");
745 do {
746 MetaWord* result = NULL;
747 if (GC_locker::is_active_and_needs_gc()) {
748 // If the GC_locker is active, just expand and allocate.
749 // If that does not succeed, wait if this thread is not
750 // in a critical section itself.
751 result =
752 loader_data->metaspace_non_null()->expand_and_allocate(word_size,
753 mdtype);
754 if (result != NULL) {
755 return result;
756 }
757 JavaThread* jthr = JavaThread::current();
758 if (!jthr->in_critical()) {
759 // Wait for JNI critical section to be exited
760 GC_locker::stall_until_clear();
761 // The GC invoked by the last thread leaving the critical
762 // section will be a young collection and a full collection
763 // is (currently) needed for unloading classes so continue
764 // to the next iteration to get a full GC.
765 continue;
766 } else {
767 if (CheckJNICalls) {
768 fatal("Possible deadlock due to allocating while"
769 " in jni critical section");
770 }
771 return NULL;
772 }
773 }
775 { // Need lock to get self consistent gc_count's
776 MutexLocker ml(Heap_lock);
777 gc_count = Universe::heap()->total_collections();
778 full_gc_count = Universe::heap()->total_full_collections();
779 }
781 // Generate a VM operation
782 VM_CollectForMetadataAllocation op(loader_data,
783 word_size,
784 mdtype,
785 gc_count,
786 full_gc_count,
787 GCCause::_metadata_GC_threshold);
788 VMThread::execute(&op);
790 // If GC was locked out, try again. Check
791 // before checking success because the prologue
792 // could have succeeded and the GC still have
793 // been locked out.
794 if (op.gc_locked()) {
795 continue;
796 }
798 if (op.prologue_succeeded()) {
799 return op.result();
800 }
801 loop_count++;
802 if ((QueuedAllocationWarningCount > 0) &&
803 (loop_count % QueuedAllocationWarningCount == 0)) {
804 warning("satisfy_failed_metadata_allocation() retries %d times \n\t"
805 " size=%d", loop_count, word_size);
806 }
807 } while (true); // Until a GC is done
808 }
810 // Return true if any of the following is true:
811 // . the allocation won't fit into the current young gen heap
812 // . gc locker is occupied (jni critical section)
813 // . heap memory is tight -- the most recent previous collection
814 // was a full collection because a partial collection (would
815 // have) failed and is likely to fail again
816 bool GenCollectorPolicy::should_try_older_generation_allocation(
817 size_t word_size) const {
818 GenCollectedHeap* gch = GenCollectedHeap::heap();
819 size_t gen0_capacity = gch->get_gen(0)->capacity_before_gc();
820 return (word_size > heap_word_size(gen0_capacity))
821 || GC_locker::is_active_and_needs_gc()
822 || gch->incremental_collection_failed();
823 }
826 //
827 // MarkSweepPolicy methods
828 //
830 MarkSweepPolicy::MarkSweepPolicy() {
831 initialize_all();
832 }
834 void MarkSweepPolicy::initialize_generations() {
835 _generations = NEW_C_HEAP_ARRAY3(GenerationSpecPtr, number_of_generations(), mtGC, 0, AllocFailStrategy::RETURN_NULL);
836 if (_generations == NULL)
837 vm_exit_during_initialization("Unable to allocate gen spec");
839 if (UseParNewGC) {
840 _generations[0] = new GenerationSpec(Generation::ParNew, _initial_gen0_size, _max_gen0_size);
841 } else {
842 _generations[0] = new GenerationSpec(Generation::DefNew, _initial_gen0_size, _max_gen0_size);
843 }
844 _generations[1] = new GenerationSpec(Generation::MarkSweepCompact, _initial_gen1_size, _max_gen1_size);
846 if (_generations[0] == NULL || _generations[1] == NULL)
847 vm_exit_during_initialization("Unable to allocate gen spec");
848 }
850 void MarkSweepPolicy::initialize_gc_policy_counters() {
851 // initialize the policy counters - 2 collectors, 3 generations
852 if (UseParNewGC) {
853 _gc_policy_counters = new GCPolicyCounters("ParNew:MSC", 2, 3);
854 } else {
855 _gc_policy_counters = new GCPolicyCounters("Copy:MSC", 2, 3);
856 }
857 }