Tue, 11 May 2010 14:35:43 -0700
6931180: Migration to recent versions of MS Platform SDK
6951582: Build problems on win64
Summary: Changes to enable building JDK7 with Microsoft Visual Studio 2010
Reviewed-by: ohair, art, ccheung, dcubed
1 /*
2 * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 # include "incls/_precompiled.incl"
26 # include "incls/_collectorPolicy.cpp.incl"
28 // CollectorPolicy methods.
30 void CollectorPolicy::initialize_flags() {
31 if (PermSize > MaxPermSize) {
32 MaxPermSize = PermSize;
33 }
34 PermSize = MAX2(min_alignment(), align_size_down_(PermSize, min_alignment()));
35 MaxPermSize = align_size_up(MaxPermSize, max_alignment());
37 MinPermHeapExpansion = MAX2(min_alignment(), align_size_down_(MinPermHeapExpansion, min_alignment()));
38 MaxPermHeapExpansion = MAX2(min_alignment(), align_size_down_(MaxPermHeapExpansion, min_alignment()));
40 MinHeapDeltaBytes = align_size_up(MinHeapDeltaBytes, min_alignment());
42 SharedReadOnlySize = align_size_up(SharedReadOnlySize, max_alignment());
43 SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment());
44 SharedMiscDataSize = align_size_up(SharedMiscDataSize, max_alignment());
46 assert(PermSize % min_alignment() == 0, "permanent space alignment");
47 assert(MaxPermSize % max_alignment() == 0, "maximum permanent space alignment");
48 assert(SharedReadOnlySize % max_alignment() == 0, "read-only space alignment");
49 assert(SharedReadWriteSize % max_alignment() == 0, "read-write space alignment");
50 assert(SharedMiscDataSize % max_alignment() == 0, "misc-data space alignment");
51 if (PermSize < M) {
52 vm_exit_during_initialization("Too small initial permanent heap");
53 }
54 }
56 void CollectorPolicy::initialize_size_info() {
57 // User inputs from -mx and ms are aligned
58 set_initial_heap_byte_size(InitialHeapSize);
59 if (initial_heap_byte_size() == 0) {
60 set_initial_heap_byte_size(NewSize + OldSize);
61 }
62 set_initial_heap_byte_size(align_size_up(_initial_heap_byte_size,
63 min_alignment()));
65 set_min_heap_byte_size(Arguments::min_heap_size());
66 if (min_heap_byte_size() == 0) {
67 set_min_heap_byte_size(NewSize + OldSize);
68 }
69 set_min_heap_byte_size(align_size_up(_min_heap_byte_size,
70 min_alignment()));
72 set_max_heap_byte_size(align_size_up(MaxHeapSize, max_alignment()));
74 // Check heap parameter properties
75 if (initial_heap_byte_size() < M) {
76 vm_exit_during_initialization("Too small initial heap");
77 }
78 // Check heap parameter properties
79 if (min_heap_byte_size() < M) {
80 vm_exit_during_initialization("Too small minimum heap");
81 }
82 if (initial_heap_byte_size() <= NewSize) {
83 // make sure there is at least some room in old space
84 vm_exit_during_initialization("Too small initial heap for new size specified");
85 }
86 if (max_heap_byte_size() < min_heap_byte_size()) {
87 vm_exit_during_initialization("Incompatible minimum and maximum heap sizes specified");
88 }
89 if (initial_heap_byte_size() < min_heap_byte_size()) {
90 vm_exit_during_initialization("Incompatible minimum and initial heap sizes specified");
91 }
92 if (max_heap_byte_size() < initial_heap_byte_size()) {
93 vm_exit_during_initialization("Incompatible initial and maximum heap sizes specified");
94 }
96 if (PrintGCDetails && Verbose) {
97 gclog_or_tty->print_cr("Minimum heap " SIZE_FORMAT " Initial heap "
98 SIZE_FORMAT " Maximum heap " SIZE_FORMAT,
99 min_heap_byte_size(), initial_heap_byte_size(), max_heap_byte_size());
100 }
101 }
103 void CollectorPolicy::initialize_perm_generation(PermGen::Name pgnm) {
104 _permanent_generation =
105 new PermanentGenerationSpec(pgnm, PermSize, MaxPermSize,
106 SharedReadOnlySize,
107 SharedReadWriteSize,
108 SharedMiscDataSize,
109 SharedMiscCodeSize);
110 if (_permanent_generation == NULL) {
111 vm_exit_during_initialization("Unable to allocate gen spec");
112 }
113 }
115 bool CollectorPolicy::use_should_clear_all_soft_refs(bool v) {
116 bool result = _should_clear_all_soft_refs;
117 set_should_clear_all_soft_refs(false);
118 return result;
119 }
121 GenRemSet* CollectorPolicy::create_rem_set(MemRegion whole_heap,
122 int max_covered_regions) {
123 switch (rem_set_name()) {
124 case GenRemSet::CardTable: {
125 CardTableRS* res = new CardTableRS(whole_heap, max_covered_regions);
126 return res;
127 }
128 default:
129 guarantee(false, "unrecognized GenRemSet::Name");
130 return NULL;
131 }
132 }
134 void CollectorPolicy::cleared_all_soft_refs() {
135 // If near gc overhear limit, continue to clear SoftRefs. SoftRefs may
136 // have been cleared in the last collection but if the gc overhear
137 // limit continues to be near, SoftRefs should still be cleared.
138 if (size_policy() != NULL) {
139 _should_clear_all_soft_refs = size_policy()->gc_overhead_limit_near();
140 }
141 _all_soft_refs_clear = true;
142 }
145 // GenCollectorPolicy methods.
147 size_t GenCollectorPolicy::scale_by_NewRatio_aligned(size_t base_size) {
148 size_t x = base_size / (NewRatio+1);
149 size_t new_gen_size = x > min_alignment() ?
150 align_size_down(x, min_alignment()) :
151 min_alignment();
152 return new_gen_size;
153 }
155 size_t GenCollectorPolicy::bound_minus_alignment(size_t desired_size,
156 size_t maximum_size) {
157 size_t alignment = min_alignment();
158 size_t max_minus = maximum_size - alignment;
159 return desired_size < max_minus ? desired_size : max_minus;
160 }
163 void GenCollectorPolicy::initialize_size_policy(size_t init_eden_size,
164 size_t init_promo_size,
165 size_t init_survivor_size) {
166 const double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0;
167 _size_policy = new AdaptiveSizePolicy(init_eden_size,
168 init_promo_size,
169 init_survivor_size,
170 max_gc_minor_pause_sec,
171 GCTimeRatio);
172 }
174 size_t GenCollectorPolicy::compute_max_alignment() {
175 // The card marking array and the offset arrays for old generations are
176 // committed in os pages as well. Make sure they are entirely full (to
177 // avoid partial page problems), e.g. if 512 bytes heap corresponds to 1
178 // byte entry and the os page size is 4096, the maximum heap size should
179 // be 512*4096 = 2MB aligned.
180 size_t alignment = GenRemSet::max_alignment_constraint(rem_set_name());
182 // Parallel GC does its own alignment of the generations to avoid requiring a
183 // large page (256M on some platforms) for the permanent generation. The
184 // other collectors should also be updated to do their own alignment and then
185 // this use of lcm() should be removed.
186 if (UseLargePages && !UseParallelGC) {
187 // in presence of large pages we have to make sure that our
188 // alignment is large page aware
189 alignment = lcm(os::large_page_size(), alignment);
190 }
192 return alignment;
193 }
195 void GenCollectorPolicy::initialize_flags() {
196 // All sizes must be multiples of the generation granularity.
197 set_min_alignment((uintx) Generation::GenGrain);
198 set_max_alignment(compute_max_alignment());
199 assert(max_alignment() >= min_alignment() &&
200 max_alignment() % min_alignment() == 0,
201 "invalid alignment constraints");
203 CollectorPolicy::initialize_flags();
205 // All generational heaps have a youngest gen; handle those flags here.
207 // Adjust max size parameters
208 if (NewSize > MaxNewSize) {
209 MaxNewSize = NewSize;
210 }
211 NewSize = align_size_down(NewSize, min_alignment());
212 MaxNewSize = align_size_down(MaxNewSize, min_alignment());
214 // Check validity of heap flags
215 assert(NewSize % min_alignment() == 0, "eden space alignment");
216 assert(MaxNewSize % min_alignment() == 0, "survivor space alignment");
218 if (NewSize < 3*min_alignment()) {
219 // make sure there room for eden and two survivor spaces
220 vm_exit_during_initialization("Too small new size specified");
221 }
222 if (SurvivorRatio < 1 || NewRatio < 1) {
223 vm_exit_during_initialization("Invalid heap ratio specified");
224 }
225 }
227 void TwoGenerationCollectorPolicy::initialize_flags() {
228 GenCollectorPolicy::initialize_flags();
230 OldSize = align_size_down(OldSize, min_alignment());
231 if (NewSize + OldSize > MaxHeapSize) {
232 MaxHeapSize = NewSize + OldSize;
233 }
234 MaxHeapSize = align_size_up(MaxHeapSize, max_alignment());
236 always_do_update_barrier = UseConcMarkSweepGC;
237 BlockOffsetArrayUseUnallocatedBlock =
238 BlockOffsetArrayUseUnallocatedBlock || ParallelGCThreads > 0;
240 // Check validity of heap flags
241 assert(OldSize % min_alignment() == 0, "old space alignment");
242 assert(MaxHeapSize % max_alignment() == 0, "maximum heap alignment");
243 }
245 // Values set on the command line win over any ergonomically
246 // set command line parameters.
247 // Ergonomic choice of parameters are done before this
248 // method is called. Values for command line parameters such as NewSize
249 // and MaxNewSize feed those ergonomic choices into this method.
250 // This method makes the final generation sizings consistent with
251 // themselves and with overall heap sizings.
252 // In the absence of explicitly set command line flags, policies
253 // such as the use of NewRatio are used to size the generation.
254 void GenCollectorPolicy::initialize_size_info() {
255 CollectorPolicy::initialize_size_info();
257 // min_alignment() is used for alignment within a generation.
258 // There is additional alignment done down stream for some
259 // collectors that sometimes causes unwanted rounding up of
260 // generations sizes.
262 // Determine maximum size of gen0
264 size_t max_new_size = 0;
265 if (FLAG_IS_CMDLINE(MaxNewSize)) {
266 if (MaxNewSize < min_alignment()) {
267 max_new_size = min_alignment();
268 } else if (MaxNewSize >= max_heap_byte_size()) {
269 max_new_size = align_size_down(max_heap_byte_size() - min_alignment(),
270 min_alignment());
271 warning("MaxNewSize (" SIZE_FORMAT "k) is equal to or "
272 "greater than the entire heap (" SIZE_FORMAT "k). A "
273 "new generation size of " SIZE_FORMAT "k will be used.",
274 MaxNewSize/K, max_heap_byte_size()/K, max_new_size/K);
275 } else {
276 max_new_size = align_size_down(MaxNewSize, min_alignment());
277 }
279 // The case for FLAG_IS_ERGO(MaxNewSize) could be treated
280 // specially at this point to just use an ergonomically set
281 // MaxNewSize to set max_new_size. For cases with small
282 // heaps such a policy often did not work because the MaxNewSize
283 // was larger than the entire heap. The interpretation given
284 // to ergonomically set flags is that the flags are set
285 // by different collectors for their own special needs but
286 // are not allowed to badly shape the heap. This allows the
287 // different collectors to decide what's best for themselves
288 // without having to factor in the overall heap shape. It
289 // can be the case in the future that the collectors would
290 // only make "wise" ergonomics choices and this policy could
291 // just accept those choices. The choices currently made are
292 // not always "wise".
293 } else {
294 max_new_size = scale_by_NewRatio_aligned(max_heap_byte_size());
295 // Bound the maximum size by NewSize below (since it historically
296 // would have been NewSize and because the NewRatio calculation could
297 // yield a size that is too small) and bound it by MaxNewSize above.
298 // Ergonomics plays here by previously calculating the desired
299 // NewSize and MaxNewSize.
300 max_new_size = MIN2(MAX2(max_new_size, NewSize), MaxNewSize);
301 }
302 assert(max_new_size > 0, "All paths should set max_new_size");
304 // Given the maximum gen0 size, determine the initial and
305 // minimum sizes.
307 if (max_heap_byte_size() == min_heap_byte_size()) {
308 // The maximum and minimum heap sizes are the same so
309 // the generations minimum and initial must be the
310 // same as its maximum.
311 set_min_gen0_size(max_new_size);
312 set_initial_gen0_size(max_new_size);
313 set_max_gen0_size(max_new_size);
314 } else {
315 size_t desired_new_size = 0;
316 if (!FLAG_IS_DEFAULT(NewSize)) {
317 // If NewSize is set ergonomically (for example by cms), it
318 // would make sense to use it. If it is used, also use it
319 // to set the initial size. Although there is no reason
320 // the minimum size and the initial size have to be the same,
321 // the current implementation gets into trouble during the calculation
322 // of the tenured generation sizes if they are different.
323 // Note that this makes the initial size and the minimum size
324 // generally small compared to the NewRatio calculation.
325 _min_gen0_size = NewSize;
326 desired_new_size = NewSize;
327 max_new_size = MAX2(max_new_size, NewSize);
328 } else {
329 // For the case where NewSize is the default, use NewRatio
330 // to size the minimum and initial generation sizes.
331 // Use the default NewSize as the floor for these values. If
332 // NewRatio is overly large, the resulting sizes can be too
333 // small.
334 _min_gen0_size = MAX2(scale_by_NewRatio_aligned(min_heap_byte_size()),
335 NewSize);
336 desired_new_size =
337 MAX2(scale_by_NewRatio_aligned(initial_heap_byte_size()),
338 NewSize);
339 }
341 assert(_min_gen0_size > 0, "Sanity check");
342 set_initial_gen0_size(desired_new_size);
343 set_max_gen0_size(max_new_size);
345 // At this point the desirable initial and minimum sizes have been
346 // determined without regard to the maximum sizes.
348 // Bound the sizes by the corresponding overall heap sizes.
349 set_min_gen0_size(
350 bound_minus_alignment(_min_gen0_size, min_heap_byte_size()));
351 set_initial_gen0_size(
352 bound_minus_alignment(_initial_gen0_size, initial_heap_byte_size()));
353 set_max_gen0_size(
354 bound_minus_alignment(_max_gen0_size, max_heap_byte_size()));
356 // At this point all three sizes have been checked against the
357 // maximum sizes but have not been checked for consistency
358 // among the three.
360 // Final check min <= initial <= max
361 set_min_gen0_size(MIN2(_min_gen0_size, _max_gen0_size));
362 set_initial_gen0_size(
363 MAX2(MIN2(_initial_gen0_size, _max_gen0_size), _min_gen0_size));
364 set_min_gen0_size(MIN2(_min_gen0_size, _initial_gen0_size));
365 }
367 if (PrintGCDetails && Verbose) {
368 gclog_or_tty->print_cr("Minimum gen0 " SIZE_FORMAT " Initial gen0 "
369 SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT,
370 min_gen0_size(), initial_gen0_size(), max_gen0_size());
371 }
372 }
374 // Call this method during the sizing of the gen1 to make
375 // adjustments to gen0 because of gen1 sizing policy. gen0 initially has
376 // the most freedom in sizing because it is done before the
377 // policy for gen1 is applied. Once gen1 policies have been applied,
378 // there may be conflicts in the shape of the heap and this method
379 // is used to make the needed adjustments. The application of the
380 // policies could be more sophisticated (iterative for example) but
381 // keeping it simple also seems a worthwhile goal.
382 bool TwoGenerationCollectorPolicy::adjust_gen0_sizes(size_t* gen0_size_ptr,
383 size_t* gen1_size_ptr,
384 size_t heap_size,
385 size_t min_gen0_size) {
386 bool result = false;
387 if ((*gen1_size_ptr + *gen0_size_ptr) > heap_size) {
388 if (((*gen0_size_ptr + OldSize) > heap_size) &&
389 (heap_size - min_gen0_size) >= min_alignment()) {
390 // Adjust gen0 down to accomodate OldSize
391 *gen0_size_ptr = heap_size - min_gen0_size;
392 *gen0_size_ptr =
393 MAX2((uintx)align_size_down(*gen0_size_ptr, min_alignment()),
394 min_alignment());
395 assert(*gen0_size_ptr > 0, "Min gen0 is too large");
396 result = true;
397 } else {
398 *gen1_size_ptr = heap_size - *gen0_size_ptr;
399 *gen1_size_ptr =
400 MAX2((uintx)align_size_down(*gen1_size_ptr, min_alignment()),
401 min_alignment());
402 }
403 }
404 return result;
405 }
407 // Minimum sizes of the generations may be different than
408 // the initial sizes. An inconsistently is permitted here
409 // in the total size that can be specified explicitly by
410 // command line specification of OldSize and NewSize and
411 // also a command line specification of -Xms. Issue a warning
412 // but allow the values to pass.
414 void TwoGenerationCollectorPolicy::initialize_size_info() {
415 GenCollectorPolicy::initialize_size_info();
417 // At this point the minimum, initial and maximum sizes
418 // of the overall heap and of gen0 have been determined.
419 // The maximum gen1 size can be determined from the maximum gen0
420 // and maximum heap size since not explicit flags exits
421 // for setting the gen1 maximum.
422 _max_gen1_size = max_heap_byte_size() - _max_gen0_size;
423 _max_gen1_size =
424 MAX2((uintx)align_size_down(_max_gen1_size, min_alignment()),
425 min_alignment());
426 // If no explicit command line flag has been set for the
427 // gen1 size, use what is left for gen1.
428 if (FLAG_IS_DEFAULT(OldSize) || FLAG_IS_ERGO(OldSize)) {
429 // The user has not specified any value or ergonomics
430 // has chosen a value (which may or may not be consistent
431 // with the overall heap size). In either case make
432 // the minimum, maximum and initial sizes consistent
433 // with the gen0 sizes and the overall heap sizes.
434 assert(min_heap_byte_size() > _min_gen0_size,
435 "gen0 has an unexpected minimum size");
436 set_min_gen1_size(min_heap_byte_size() - min_gen0_size());
437 set_min_gen1_size(
438 MAX2((uintx)align_size_down(_min_gen1_size, min_alignment()),
439 min_alignment()));
440 set_initial_gen1_size(initial_heap_byte_size() - initial_gen0_size());
441 set_initial_gen1_size(
442 MAX2((uintx)align_size_down(_initial_gen1_size, min_alignment()),
443 min_alignment()));
445 } else {
446 // It's been explicitly set on the command line. Use the
447 // OldSize and then determine the consequences.
448 set_min_gen1_size(OldSize);
449 set_initial_gen1_size(OldSize);
451 // If the user has explicitly set an OldSize that is inconsistent
452 // with other command line flags, issue a warning.
453 // The generation minimums and the overall heap mimimum should
454 // be within one heap alignment.
455 if ((_min_gen1_size + _min_gen0_size + min_alignment()) <
456 min_heap_byte_size()) {
457 warning("Inconsistency between minimum heap size and minimum "
458 "generation sizes: using minimum heap = " SIZE_FORMAT,
459 min_heap_byte_size());
460 }
461 if ((OldSize > _max_gen1_size)) {
462 warning("Inconsistency between maximum heap size and maximum "
463 "generation sizes: using maximum heap = " SIZE_FORMAT
464 " -XX:OldSize flag is being ignored",
465 max_heap_byte_size());
466 }
467 // If there is an inconsistency between the OldSize and the minimum and/or
468 // initial size of gen0, since OldSize was explicitly set, OldSize wins.
469 if (adjust_gen0_sizes(&_min_gen0_size, &_min_gen1_size,
470 min_heap_byte_size(), OldSize)) {
471 if (PrintGCDetails && Verbose) {
472 gclog_or_tty->print_cr("Minimum gen0 " SIZE_FORMAT " Initial gen0 "
473 SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT,
474 min_gen0_size(), initial_gen0_size(), max_gen0_size());
475 }
476 }
477 // Initial size
478 if (adjust_gen0_sizes(&_initial_gen0_size, &_initial_gen1_size,
479 initial_heap_byte_size(), OldSize)) {
480 if (PrintGCDetails && Verbose) {
481 gclog_or_tty->print_cr("Minimum gen0 " SIZE_FORMAT " Initial gen0 "
482 SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT,
483 min_gen0_size(), initial_gen0_size(), max_gen0_size());
484 }
485 }
486 }
487 // Enforce the maximum gen1 size.
488 set_min_gen1_size(MIN2(_min_gen1_size, _max_gen1_size));
490 // Check that min gen1 <= initial gen1 <= max gen1
491 set_initial_gen1_size(MAX2(_initial_gen1_size, _min_gen1_size));
492 set_initial_gen1_size(MIN2(_initial_gen1_size, _max_gen1_size));
494 if (PrintGCDetails && Verbose) {
495 gclog_or_tty->print_cr("Minimum gen1 " SIZE_FORMAT " Initial gen1 "
496 SIZE_FORMAT " Maximum gen1 " SIZE_FORMAT,
497 min_gen1_size(), initial_gen1_size(), max_gen1_size());
498 }
499 }
501 HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size,
502 bool is_tlab,
503 bool* gc_overhead_limit_was_exceeded) {
504 GenCollectedHeap *gch = GenCollectedHeap::heap();
506 debug_only(gch->check_for_valid_allocation_state());
507 assert(gch->no_gc_in_progress(), "Allocation during gc not allowed");
509 // In general gc_overhead_limit_was_exceeded should be false so
510 // set it so here and reset it to true only if the gc time
511 // limit is being exceeded as checked below.
512 *gc_overhead_limit_was_exceeded = false;
514 HeapWord* result = NULL;
516 // Loop until the allocation is satisified,
517 // or unsatisfied after GC.
518 for (int try_count = 1; /* return or throw */; try_count += 1) {
519 HandleMark hm; // discard any handles allocated in each iteration
521 // First allocation attempt is lock-free.
522 Generation *gen0 = gch->get_gen(0);
523 assert(gen0->supports_inline_contig_alloc(),
524 "Otherwise, must do alloc within heap lock");
525 if (gen0->should_allocate(size, is_tlab)) {
526 result = gen0->par_allocate(size, is_tlab);
527 if (result != NULL) {
528 assert(gch->is_in_reserved(result), "result not in heap");
529 return result;
530 }
531 }
532 unsigned int gc_count_before; // read inside the Heap_lock locked region
533 {
534 MutexLocker ml(Heap_lock);
535 if (PrintGC && Verbose) {
536 gclog_or_tty->print_cr("TwoGenerationCollectorPolicy::mem_allocate_work:"
537 " attempting locked slow path allocation");
538 }
539 // Note that only large objects get a shot at being
540 // allocated in later generations.
541 bool first_only = ! should_try_older_generation_allocation(size);
543 result = gch->attempt_allocation(size, is_tlab, first_only);
544 if (result != NULL) {
545 assert(gch->is_in_reserved(result), "result not in heap");
546 return result;
547 }
549 if (GC_locker::is_active_and_needs_gc()) {
550 if (is_tlab) {
551 return NULL; // Caller will retry allocating individual object
552 }
553 if (!gch->is_maximal_no_gc()) {
554 // Try and expand heap to satisfy request
555 result = expand_heap_and_allocate(size, is_tlab);
556 // result could be null if we are out of space
557 if (result != NULL) {
558 return result;
559 }
560 }
562 // If this thread is not in a jni critical section, we stall
563 // the requestor until the critical section has cleared and
564 // GC allowed. When the critical section clears, a GC is
565 // initiated by the last thread exiting the critical section; so
566 // we retry the allocation sequence from the beginning of the loop,
567 // rather than causing more, now probably unnecessary, GC attempts.
568 JavaThread* jthr = JavaThread::current();
569 if (!jthr->in_critical()) {
570 MutexUnlocker mul(Heap_lock);
571 // Wait for JNI critical section to be exited
572 GC_locker::stall_until_clear();
573 continue;
574 } else {
575 if (CheckJNICalls) {
576 fatal("Possible deadlock due to allocating while"
577 " in jni critical section");
578 }
579 return NULL;
580 }
581 }
583 // Read the gc count while the heap lock is held.
584 gc_count_before = Universe::heap()->total_collections();
585 }
587 VM_GenCollectForAllocation op(size,
588 is_tlab,
589 gc_count_before);
590 VMThread::execute(&op);
591 if (op.prologue_succeeded()) {
592 result = op.result();
593 if (op.gc_locked()) {
594 assert(result == NULL, "must be NULL if gc_locked() is true");
595 continue; // retry and/or stall as necessary
596 }
598 // Allocation has failed and a collection
599 // has been done. If the gc time limit was exceeded the
600 // this time, return NULL so that an out-of-memory
601 // will be thrown. Clear gc_overhead_limit_exceeded
602 // so that the overhead exceeded does not persist.
604 const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
605 const bool softrefs_clear = all_soft_refs_clear();
606 assert(!limit_exceeded || softrefs_clear, "Should have been cleared");
607 if (limit_exceeded && softrefs_clear) {
608 *gc_overhead_limit_was_exceeded = true;
609 size_policy()->set_gc_overhead_limit_exceeded(false);
610 if (op.result() != NULL) {
611 CollectedHeap::fill_with_object(op.result(), size);
612 }
613 return NULL;
614 }
615 assert(result == NULL || gch->is_in_reserved(result),
616 "result not in heap");
617 return result;
618 }
620 // Give a warning if we seem to be looping forever.
621 if ((QueuedAllocationWarningCount > 0) &&
622 (try_count % QueuedAllocationWarningCount == 0)) {
623 warning("TwoGenerationCollectorPolicy::mem_allocate_work retries %d times \n\t"
624 " size=%d %s", try_count, size, is_tlab ? "(TLAB)" : "");
625 }
626 }
627 }
629 HeapWord* GenCollectorPolicy::expand_heap_and_allocate(size_t size,
630 bool is_tlab) {
631 GenCollectedHeap *gch = GenCollectedHeap::heap();
632 HeapWord* result = NULL;
633 for (int i = number_of_generations() - 1; i >= 0 && result == NULL; i--) {
634 Generation *gen = gch->get_gen(i);
635 if (gen->should_allocate(size, is_tlab)) {
636 result = gen->expand_and_allocate(size, is_tlab);
637 }
638 }
639 assert(result == NULL || gch->is_in_reserved(result), "result not in heap");
640 return result;
641 }
643 HeapWord* GenCollectorPolicy::satisfy_failed_allocation(size_t size,
644 bool is_tlab) {
645 GenCollectedHeap *gch = GenCollectedHeap::heap();
646 GCCauseSetter x(gch, GCCause::_allocation_failure);
647 HeapWord* result = NULL;
649 assert(size != 0, "Precondition violated");
650 if (GC_locker::is_active_and_needs_gc()) {
651 // GC locker is active; instead of a collection we will attempt
652 // to expand the heap, if there's room for expansion.
653 if (!gch->is_maximal_no_gc()) {
654 result = expand_heap_and_allocate(size, is_tlab);
655 }
656 return result; // could be null if we are out of space
657 } else if (!gch->incremental_collection_will_fail()) {
658 // The gc_prologues have not executed yet. The value
659 // for incremental_collection_will_fail() is the remanent
660 // of the last collection.
661 // Do an incremental collection.
662 gch->do_collection(false /* full */,
663 false /* clear_all_soft_refs */,
664 size /* size */,
665 is_tlab /* is_tlab */,
666 number_of_generations() - 1 /* max_level */);
667 } else {
668 // Try a full collection; see delta for bug id 6266275
669 // for the original code and why this has been simplified
670 // with from-space allocation criteria modified and
671 // such allocation moved out of the safepoint path.
672 gch->do_collection(true /* full */,
673 false /* clear_all_soft_refs */,
674 size /* size */,
675 is_tlab /* is_tlab */,
676 number_of_generations() - 1 /* max_level */);
677 }
679 result = gch->attempt_allocation(size, is_tlab, false /*first_only*/);
681 if (result != NULL) {
682 assert(gch->is_in_reserved(result), "result not in heap");
683 return result;
684 }
686 // OK, collection failed, try expansion.
687 result = expand_heap_and_allocate(size, is_tlab);
688 if (result != NULL) {
689 return result;
690 }
692 // If we reach this point, we're really out of memory. Try every trick
693 // we can to reclaim memory. Force collection of soft references. Force
694 // a complete compaction of the heap. Any additional methods for finding
695 // free memory should be here, especially if they are expensive. If this
696 // attempt fails, an OOM exception will be thrown.
697 {
698 IntFlagSetting flag_change(MarkSweepAlwaysCompactCount, 1); // Make sure the heap is fully compacted
700 gch->do_collection(true /* full */,
701 true /* clear_all_soft_refs */,
702 size /* size */,
703 is_tlab /* is_tlab */,
704 number_of_generations() - 1 /* max_level */);
705 }
707 result = gch->attempt_allocation(size, is_tlab, false /* first_only */);
708 if (result != NULL) {
709 assert(gch->is_in_reserved(result), "result not in heap");
710 return result;
711 }
713 assert(!should_clear_all_soft_refs(),
714 "Flag should have been handled and cleared prior to this point");
716 // What else? We might try synchronous finalization later. If the total
717 // space available is large enough for the allocation, then a more
718 // complete compaction phase than we've tried so far might be
719 // appropriate.
720 return NULL;
721 }
723 size_t GenCollectorPolicy::large_typearray_limit() {
724 return FastAllocateSizeLimit;
725 }
727 // Return true if any of the following is true:
728 // . the allocation won't fit into the current young gen heap
729 // . gc locker is occupied (jni critical section)
730 // . heap memory is tight -- the most recent previous collection
731 // was a full collection because a partial collection (would
732 // have) failed and is likely to fail again
733 bool GenCollectorPolicy::should_try_older_generation_allocation(
734 size_t word_size) const {
735 GenCollectedHeap* gch = GenCollectedHeap::heap();
736 size_t gen0_capacity = gch->get_gen(0)->capacity_before_gc();
737 return (word_size > heap_word_size(gen0_capacity))
738 || (GC_locker::is_active_and_needs_gc())
739 || ( gch->last_incremental_collection_failed()
740 && gch->incremental_collection_will_fail());
741 }
744 //
745 // MarkSweepPolicy methods
746 //
748 MarkSweepPolicy::MarkSweepPolicy() {
749 initialize_all();
750 }
752 void MarkSweepPolicy::initialize_generations() {
753 initialize_perm_generation(PermGen::MarkSweepCompact);
754 _generations = new GenerationSpecPtr[number_of_generations()];
755 if (_generations == NULL)
756 vm_exit_during_initialization("Unable to allocate gen spec");
758 if (UseParNewGC && ParallelGCThreads > 0) {
759 _generations[0] = new GenerationSpec(Generation::ParNew, _initial_gen0_size, _max_gen0_size);
760 } else {
761 _generations[0] = new GenerationSpec(Generation::DefNew, _initial_gen0_size, _max_gen0_size);
762 }
763 _generations[1] = new GenerationSpec(Generation::MarkSweepCompact, _initial_gen1_size, _max_gen1_size);
765 if (_generations[0] == NULL || _generations[1] == NULL)
766 vm_exit_during_initialization("Unable to allocate gen spec");
767 }
769 void MarkSweepPolicy::initialize_gc_policy_counters() {
770 // initialize the policy counters - 2 collectors, 3 generations
771 if (UseParNewGC && ParallelGCThreads > 0) {
772 _gc_policy_counters = new GCPolicyCounters("ParNew:MSC", 2, 3);
773 }
774 else {
775 _gc_policy_counters = new GCPolicyCounters("Copy:MSC", 2, 3);
776 }
777 }