duke@435: /* drchase@6680: * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. duke@435: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. duke@435: * duke@435: * This code is free software; you can redistribute it and/or modify it duke@435: * under the terms of the GNU General Public License version 2 only, as duke@435: * published by the Free Software Foundation. duke@435: * duke@435: * This code is distributed in the hope that it will be useful, but WITHOUT duke@435: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or duke@435: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License duke@435: * version 2 for more details (a copy is included in the LICENSE file that duke@435: * accompanied this code). duke@435: * duke@435: * You should have received a copy of the GNU General Public License version duke@435: * 2 along with this work; if not, write to the Free Software Foundation, duke@435: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. duke@435: * trims@1907: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA trims@1907: * or visit www.oracle.com if you need additional information or have any trims@1907: * questions. duke@435: * duke@435: */ duke@435: stefank@2314: #include "precompiled.hpp" stefank@2314: #include "gc_implementation/shared/adaptiveSizePolicy.hpp" stefank@2314: #include "gc_implementation/shared/gcPolicyCounters.hpp" stefank@2314: #include "gc_implementation/shared/vmGCOperations.hpp" stefank@2314: #include "memory/cardTableRS.hpp" stefank@2314: #include "memory/collectorPolicy.hpp" stefank@2314: #include "memory/gcLocker.inline.hpp" stefank@2314: #include "memory/genCollectedHeap.hpp" stefank@2314: #include "memory/generationSpec.hpp" stefank@2314: #include "memory/space.hpp" stefank@2314: #include "memory/universe.hpp" stefank@2314: #include "runtime/arguments.hpp" stefank@2314: #include "runtime/globals_extension.hpp" stefank@2314: #include "runtime/handles.inline.hpp" stefank@2314: #include "runtime/java.hpp" stefank@4299: #include "runtime/thread.inline.hpp" stefank@2314: #include "runtime/vmThread.hpp" jprovino@4542: #include "utilities/macros.hpp" jprovino@4542: #if INCLUDE_ALL_GCS stefank@2314: #include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp" stefank@2314: #include "gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp" jprovino@4542: #endif // INCLUDE_ALL_GCS duke@435: duke@435: // CollectorPolicy methods. duke@435: jwilhelm@6085: CollectorPolicy::CollectorPolicy() : jwilhelm@6085: _space_alignment(0), jwilhelm@6085: _heap_alignment(0), jwilhelm@6085: _initial_heap_byte_size(InitialHeapSize), jwilhelm@6085: _max_heap_byte_size(MaxHeapSize), jwilhelm@6085: _min_heap_byte_size(Arguments::min_heap_size()), jwilhelm@6085: _max_heap_size_cmdline(false), jwilhelm@6085: _size_policy(NULL), jwilhelm@6085: _should_clear_all_soft_refs(false), jwilhelm@6085: _all_soft_refs_clear(false) jwilhelm@6085: {} jwilhelm@6085: jwilhelm@6085: #ifdef ASSERT jwilhelm@6085: void CollectorPolicy::assert_flags() { jwilhelm@6085: assert(InitialHeapSize <= MaxHeapSize, "Ergonomics decided on incompatible initial and maximum heap sizes"); jwilhelm@6085: assert(InitialHeapSize % _heap_alignment == 0, "InitialHeapSize alignment"); jwilhelm@6085: assert(MaxHeapSize % _heap_alignment == 0, "MaxHeapSize alignment"); jwilhelm@6085: } jwilhelm@6085: jwilhelm@6085: void CollectorPolicy::assert_size_info() { jwilhelm@6085: assert(InitialHeapSize == _initial_heap_byte_size, "Discrepancy between InitialHeapSize flag and local storage"); jwilhelm@6085: assert(MaxHeapSize == _max_heap_byte_size, "Discrepancy between MaxHeapSize flag and local storage"); jwilhelm@6085: assert(_max_heap_byte_size >= _min_heap_byte_size, "Ergonomics decided on incompatible minimum and maximum heap sizes"); jwilhelm@6085: assert(_initial_heap_byte_size >= _min_heap_byte_size, "Ergonomics decided on incompatible initial and minimum heap sizes"); jwilhelm@6085: assert(_max_heap_byte_size >= _initial_heap_byte_size, "Ergonomics decided on incompatible initial and maximum heap sizes"); jwilhelm@6085: assert(_min_heap_byte_size % _heap_alignment == 0, "min_heap_byte_size alignment"); jwilhelm@6085: assert(_initial_heap_byte_size % _heap_alignment == 0, "initial_heap_byte_size alignment"); jwilhelm@6085: assert(_max_heap_byte_size % _heap_alignment == 0, "max_heap_byte_size alignment"); jwilhelm@6085: } jwilhelm@6085: #endif // ASSERT jwilhelm@6085: duke@435: void CollectorPolicy::initialize_flags() { jwilhelm@6085: assert(_space_alignment != 0, "Space alignment not set up properly"); jwilhelm@6085: assert(_heap_alignment != 0, "Heap alignment not set up properly"); jwilhelm@6085: assert(_heap_alignment >= _space_alignment, jwilhelm@6085: err_msg("heap_alignment: " SIZE_FORMAT " less than space_alignment: " SIZE_FORMAT, jwilhelm@6085: _heap_alignment, _space_alignment)); jwilhelm@6085: assert(_heap_alignment % _space_alignment == 0, jwilhelm@6085: err_msg("heap_alignment: " SIZE_FORMAT " not aligned by space_alignment: " SIZE_FORMAT, jwilhelm@6085: _heap_alignment, _space_alignment)); brutisso@5071: jwilhelm@6085: if (FLAG_IS_CMDLINE(MaxHeapSize)) { jwilhelm@6085: if (FLAG_IS_CMDLINE(InitialHeapSize) && InitialHeapSize > MaxHeapSize) { jwilhelm@6085: vm_exit_during_initialization("Initial heap size set to a larger value than the maximum heap size"); jwilhelm@6085: } jwilhelm@6085: if (_min_heap_byte_size != 0 && MaxHeapSize < _min_heap_byte_size) { jwilhelm@6085: vm_exit_during_initialization("Incompatible minimum and maximum heap sizes specified"); jwilhelm@6085: } jwilhelm@6085: _max_heap_size_cmdline = true; tschatzl@5073: } tschatzl@5073: jwilhelm@6085: // Check heap parameter properties jwilhelm@6085: if (InitialHeapSize < M) { jwilhelm@6085: vm_exit_during_initialization("Too small initial heap"); jwilhelm@6085: } jwilhelm@6085: if (_min_heap_byte_size < M) { jwilhelm@6085: vm_exit_during_initialization("Too small minimum heap"); jwilhelm@6085: } jwilhelm@6085: jwilhelm@6085: // User inputs from -Xmx and -Xms must be aligned jwilhelm@6085: _min_heap_byte_size = align_size_up(_min_heap_byte_size, _heap_alignment); jwilhelm@6085: uintx aligned_initial_heap_size = align_size_up(InitialHeapSize, _heap_alignment); jwilhelm@6085: uintx aligned_max_heap_size = align_size_up(MaxHeapSize, _heap_alignment); jwilhelm@6085: jwilhelm@6085: // Write back to flags if the values changed jwilhelm@6085: if (aligned_initial_heap_size != InitialHeapSize) { jwilhelm@6085: FLAG_SET_ERGO(uintx, InitialHeapSize, aligned_initial_heap_size); jwilhelm@6085: } jwilhelm@6085: if (aligned_max_heap_size != MaxHeapSize) { jwilhelm@6085: FLAG_SET_ERGO(uintx, MaxHeapSize, aligned_max_heap_size); jwilhelm@6085: } jwilhelm@6085: jwilhelm@6085: if (FLAG_IS_CMDLINE(InitialHeapSize) && _min_heap_byte_size != 0 && jwilhelm@6085: InitialHeapSize < _min_heap_byte_size) { jwilhelm@6085: vm_exit_during_initialization("Incompatible minimum and initial heap sizes specified"); jwilhelm@6085: } jwilhelm@6085: if (!FLAG_IS_DEFAULT(InitialHeapSize) && InitialHeapSize > MaxHeapSize) { jwilhelm@6085: FLAG_SET_ERGO(uintx, MaxHeapSize, InitialHeapSize); jwilhelm@6085: } else if (!FLAG_IS_DEFAULT(MaxHeapSize) && InitialHeapSize > MaxHeapSize) { jwilhelm@6085: FLAG_SET_ERGO(uintx, InitialHeapSize, MaxHeapSize); jwilhelm@6085: if (InitialHeapSize < _min_heap_byte_size) { jwilhelm@6085: _min_heap_byte_size = InitialHeapSize; jwilhelm@6085: } jwilhelm@6085: } jwilhelm@6085: jwilhelm@6085: _initial_heap_byte_size = InitialHeapSize; jwilhelm@6085: _max_heap_byte_size = MaxHeapSize; jwilhelm@6085: jwilhelm@6085: FLAG_SET_ERGO(uintx, MinHeapDeltaBytes, align_size_up(MinHeapDeltaBytes, _space_alignment)); jwilhelm@6085: jwilhelm@6085: DEBUG_ONLY(CollectorPolicy::assert_flags();) duke@435: } duke@435: duke@435: void CollectorPolicy::initialize_size_info() { jmasa@448: if (PrintGCDetails && Verbose) { jmasa@448: gclog_or_tty->print_cr("Minimum heap " SIZE_FORMAT " Initial heap " jmasa@448: SIZE_FORMAT " Maximum heap " SIZE_FORMAT, jwilhelm@5855: _min_heap_byte_size, _initial_heap_byte_size, _max_heap_byte_size); jmasa@448: } jwilhelm@6085: jwilhelm@6085: DEBUG_ONLY(CollectorPolicy::assert_size_info();) duke@435: } duke@435: jmasa@1822: bool CollectorPolicy::use_should_clear_all_soft_refs(bool v) { jmasa@1822: bool result = _should_clear_all_soft_refs; jmasa@1822: set_should_clear_all_soft_refs(false); jmasa@1822: return result; jmasa@1822: } duke@435: duke@435: GenRemSet* CollectorPolicy::create_rem_set(MemRegion whole_heap, duke@435: int max_covered_regions) { jwilhelm@5818: return new CardTableRS(whole_heap, max_covered_regions); duke@435: } duke@435: jmasa@1822: void CollectorPolicy::cleared_all_soft_refs() { jmasa@1822: // If near gc overhear limit, continue to clear SoftRefs. SoftRefs may jmasa@1822: // have been cleared in the last collection but if the gc overhear jmasa@1822: // limit continues to be near, SoftRefs should still be cleared. jmasa@1822: if (size_policy() != NULL) { jmasa@1822: _should_clear_all_soft_refs = size_policy()->gc_overhead_limit_near(); jmasa@1822: } jmasa@1822: _all_soft_refs_clear = true; jmasa@1822: } jmasa@1822: jwilhelm@6085: size_t CollectorPolicy::compute_heap_alignment() { tschatzl@5701: // The card marking array and the offset arrays for old generations are tschatzl@5701: // committed in os pages as well. Make sure they are entirely full (to tschatzl@5701: // avoid partial page problems), e.g. if 512 bytes heap corresponds to 1 tschatzl@5701: // byte entry and the os page size is 4096, the maximum heap size should tschatzl@5701: // be 512*4096 = 2MB aligned. tschatzl@5701: tschatzl@5701: // There is only the GenRemSet in Hotspot and only the GenRemSet::CardTable tschatzl@5701: // is supported. tschatzl@5701: // Requirements of any new remembered set implementations must be added here. tschatzl@5701: size_t alignment = GenRemSet::max_alignment_constraint(GenRemSet::CardTable); tschatzl@5701: tschatzl@5701: // Parallel GC does its own alignment of the generations to avoid requiring a tschatzl@5701: // large page (256M on some platforms) for the permanent generation. The tschatzl@5701: // other collectors should also be updated to do their own alignment and then tschatzl@5701: // this use of lcm() should be removed. tschatzl@5701: if (UseLargePages && !UseParallelGC) { tschatzl@5701: // in presence of large pages we have to make sure that our tschatzl@5701: // alignment is large page aware tschatzl@5701: alignment = lcm(os::large_page_size(), alignment); tschatzl@5701: } tschatzl@5701: tschatzl@5701: return alignment; tschatzl@5701: } jmasa@1822: duke@435: // GenCollectorPolicy methods. duke@435: jwilhelm@6085: GenCollectorPolicy::GenCollectorPolicy() : jwilhelm@6085: _min_gen0_size(0), jwilhelm@6085: _initial_gen0_size(0), jwilhelm@6085: _max_gen0_size(0), jwilhelm@6085: _gen_alignment(0), jwilhelm@6085: _generations(NULL) jwilhelm@6085: {} jwilhelm@6085: jmasa@448: size_t GenCollectorPolicy::scale_by_NewRatio_aligned(size_t base_size) { jwilhelm@6085: return align_size_down_bounded(base_size / (NewRatio + 1), _gen_alignment); jmasa@448: } jmasa@448: jmasa@448: size_t GenCollectorPolicy::bound_minus_alignment(size_t desired_size, jmasa@448: size_t maximum_size) { jwilhelm@6085: size_t max_minus = maximum_size - _gen_alignment; jmasa@448: return desired_size < max_minus ? desired_size : max_minus; jmasa@448: } jmasa@448: jmasa@448: duke@435: void GenCollectorPolicy::initialize_size_policy(size_t init_eden_size, duke@435: size_t init_promo_size, duke@435: size_t init_survivor_size) { jwilhelm@6084: const double max_gc_pause_sec = ((double) MaxGCPauseMillis) / 1000.0; duke@435: _size_policy = new AdaptiveSizePolicy(init_eden_size, duke@435: init_promo_size, duke@435: init_survivor_size, tamao@4613: max_gc_pause_sec, duke@435: GCTimeRatio); duke@435: } duke@435: jwilhelm@6085: size_t GenCollectorPolicy::young_gen_size_lower_bound() { jwilhelm@6085: // The young generation must be aligned and have room for eden + two survivors jwilhelm@6085: return align_size_up(3 * _space_alignment, _gen_alignment); jwilhelm@6085: } jwilhelm@6085: jwilhelm@6085: #ifdef ASSERT jwilhelm@6085: void GenCollectorPolicy::assert_flags() { jwilhelm@6085: CollectorPolicy::assert_flags(); jwilhelm@6085: assert(NewSize >= _min_gen0_size, "Ergonomics decided on a too small young gen size"); jwilhelm@6085: assert(NewSize <= MaxNewSize, "Ergonomics decided on incompatible initial and maximum young gen sizes"); jwilhelm@6085: assert(FLAG_IS_DEFAULT(MaxNewSize) || MaxNewSize < MaxHeapSize, "Ergonomics decided on incompatible maximum young gen and heap sizes"); jwilhelm@6085: assert(NewSize % _gen_alignment == 0, "NewSize alignment"); jwilhelm@6085: assert(FLAG_IS_DEFAULT(MaxNewSize) || MaxNewSize % _gen_alignment == 0, "MaxNewSize alignment"); jwilhelm@6085: } jwilhelm@6085: jwilhelm@6085: void TwoGenerationCollectorPolicy::assert_flags() { jwilhelm@6085: GenCollectorPolicy::assert_flags(); jwilhelm@6085: assert(OldSize + NewSize <= MaxHeapSize, "Ergonomics decided on incompatible generation and heap sizes"); jwilhelm@6085: assert(OldSize % _gen_alignment == 0, "OldSize alignment"); jwilhelm@6085: } jwilhelm@6085: jwilhelm@6085: void GenCollectorPolicy::assert_size_info() { jwilhelm@6085: CollectorPolicy::assert_size_info(); jwilhelm@6085: // GenCollectorPolicy::initialize_size_info may update the MaxNewSize jwilhelm@6085: assert(MaxNewSize < MaxHeapSize, "Ergonomics decided on incompatible maximum young and heap sizes"); jwilhelm@6085: assert(NewSize == _initial_gen0_size, "Discrepancy between NewSize flag and local storage"); jwilhelm@6085: assert(MaxNewSize == _max_gen0_size, "Discrepancy between MaxNewSize flag and local storage"); jwilhelm@6085: assert(_min_gen0_size <= _initial_gen0_size, "Ergonomics decided on incompatible minimum and initial young gen sizes"); jwilhelm@6085: assert(_initial_gen0_size <= _max_gen0_size, "Ergonomics decided on incompatible initial and maximum young gen sizes"); jwilhelm@6085: assert(_min_gen0_size % _gen_alignment == 0, "_min_gen0_size alignment"); jwilhelm@6085: assert(_initial_gen0_size % _gen_alignment == 0, "_initial_gen0_size alignment"); jwilhelm@6085: assert(_max_gen0_size % _gen_alignment == 0, "_max_gen0_size alignment"); jwilhelm@6085: } jwilhelm@6085: jwilhelm@6085: void TwoGenerationCollectorPolicy::assert_size_info() { jwilhelm@6085: GenCollectorPolicy::assert_size_info(); jwilhelm@6085: assert(OldSize == _initial_gen1_size, "Discrepancy between OldSize flag and local storage"); jwilhelm@6085: assert(_min_gen1_size <= _initial_gen1_size, "Ergonomics decided on incompatible minimum and initial old gen sizes"); jwilhelm@6085: assert(_initial_gen1_size <= _max_gen1_size, "Ergonomics decided on incompatible initial and maximum old gen sizes"); jwilhelm@6085: assert(_max_gen1_size % _gen_alignment == 0, "_max_gen1_size alignment"); jwilhelm@6085: assert(_initial_gen1_size % _gen_alignment == 0, "_initial_gen1_size alignment"); jwilhelm@6085: assert(_max_heap_byte_size <= (_max_gen0_size + _max_gen1_size), "Total maximum heap sizes must be sum of generation maximum sizes"); jwilhelm@6085: } jwilhelm@6085: #endif // ASSERT jwilhelm@6085: duke@435: void GenCollectorPolicy::initialize_flags() { duke@435: CollectorPolicy::initialize_flags(); duke@435: jwilhelm@6085: assert(_gen_alignment != 0, "Generation alignment not set up properly"); jwilhelm@6085: assert(_heap_alignment >= _gen_alignment, jwilhelm@6085: err_msg("heap_alignment: " SIZE_FORMAT " less than gen_alignment: " SIZE_FORMAT, jwilhelm@6085: _heap_alignment, _gen_alignment)); jwilhelm@6085: assert(_gen_alignment % _space_alignment == 0, jwilhelm@6085: err_msg("gen_alignment: " SIZE_FORMAT " not aligned by space_alignment: " SIZE_FORMAT, jwilhelm@6085: _gen_alignment, _space_alignment)); jwilhelm@6085: assert(_heap_alignment % _gen_alignment == 0, jwilhelm@6085: err_msg("heap_alignment: " SIZE_FORMAT " not aligned by gen_alignment: " SIZE_FORMAT, jwilhelm@6085: _heap_alignment, _gen_alignment)); duke@435: jwilhelm@6085: // All generational heaps have a youngest gen; handle those flags here jwilhelm@6085: jwilhelm@6085: // Make sure the heap is large enough for two generations jwilhelm@6085: uintx smallest_new_size = young_gen_size_lower_bound(); jwilhelm@6085: uintx smallest_heap_size = align_size_up(smallest_new_size + align_size_up(_space_alignment, _gen_alignment), jwilhelm@6085: _heap_alignment); jwilhelm@6085: if (MaxHeapSize < smallest_heap_size) { jwilhelm@6085: FLAG_SET_ERGO(uintx, MaxHeapSize, smallest_heap_size); jwilhelm@6085: _max_heap_byte_size = MaxHeapSize; jwilhelm@6085: } jwilhelm@6085: // If needed, synchronize _min_heap_byte size and _initial_heap_byte_size jwilhelm@6085: if (_min_heap_byte_size < smallest_heap_size) { jwilhelm@6085: _min_heap_byte_size = smallest_heap_size; jwilhelm@6085: if (InitialHeapSize < _min_heap_byte_size) { jwilhelm@6085: FLAG_SET_ERGO(uintx, InitialHeapSize, smallest_heap_size); jwilhelm@6085: _initial_heap_byte_size = smallest_heap_size; jwilhelm@6085: } jwilhelm@6085: } jwilhelm@6085: jwilhelm@6085: // Now take the actual NewSize into account. We will silently increase NewSize sjohanss@6642: // if the user specified a smaller or unaligned value. jwilhelm@6085: smallest_new_size = MAX2(smallest_new_size, (uintx)align_size_down(NewSize, _gen_alignment)); jwilhelm@6085: if (smallest_new_size != NewSize) { sjohanss@6642: // Do not use FLAG_SET_ERGO to update NewSize here, since this will override sjohanss@6642: // if NewSize was set on the command line or not. This information is needed sjohanss@6642: // later when setting the initial and minimum young generation size. sjohanss@6642: NewSize = smallest_new_size; jwilhelm@6085: } jwilhelm@6085: _initial_gen0_size = NewSize; jwilhelm@6085: jwilhelm@6085: if (!FLAG_IS_DEFAULT(MaxNewSize)) { jwilhelm@6085: uintx min_new_size = MAX2(_gen_alignment, _min_gen0_size); jwilhelm@6085: jwilhelm@6085: if (MaxNewSize >= MaxHeapSize) { jwilhelm@6085: // Make sure there is room for an old generation jwilhelm@6085: uintx smaller_max_new_size = MaxHeapSize - _gen_alignment; jwilhelm@6085: if (FLAG_IS_CMDLINE(MaxNewSize)) { jwilhelm@6085: warning("MaxNewSize (" SIZE_FORMAT "k) is equal to or greater than the entire " jwilhelm@6085: "heap (" SIZE_FORMAT "k). A new max generation size of " SIZE_FORMAT "k will be used.", jwilhelm@6085: MaxNewSize/K, MaxHeapSize/K, smaller_max_new_size/K); jwilhelm@6085: } jwilhelm@6085: FLAG_SET_ERGO(uintx, MaxNewSize, smaller_max_new_size); jwilhelm@6085: if (NewSize > MaxNewSize) { jwilhelm@6085: FLAG_SET_ERGO(uintx, NewSize, MaxNewSize); jwilhelm@6085: _initial_gen0_size = NewSize; jwilhelm@6085: } jwilhelm@6085: } else if (MaxNewSize < min_new_size) { jwilhelm@6085: FLAG_SET_ERGO(uintx, MaxNewSize, min_new_size); jwilhelm@6085: } else if (!is_size_aligned(MaxNewSize, _gen_alignment)) { jwilhelm@6085: FLAG_SET_ERGO(uintx, MaxNewSize, align_size_down(MaxNewSize, _gen_alignment)); jwilhelm@6085: } jwilhelm@6085: _max_gen0_size = MaxNewSize; jwilhelm@6085: } jwilhelm@6085: duke@435: if (NewSize > MaxNewSize) { jwilhelm@6085: // At this point this should only happen if the user specifies a large NewSize and/or jwilhelm@6085: // a small (but not too small) MaxNewSize. jwilhelm@6085: if (FLAG_IS_CMDLINE(MaxNewSize)) { jwilhelm@6085: warning("NewSize (" SIZE_FORMAT "k) is greater than the MaxNewSize (" SIZE_FORMAT "k). " jwilhelm@6085: "A new max generation size of " SIZE_FORMAT "k will be used.", jwilhelm@6085: NewSize/K, MaxNewSize/K, NewSize/K); jwilhelm@6085: } jwilhelm@6085: FLAG_SET_ERGO(uintx, MaxNewSize, NewSize); jwilhelm@6085: _max_gen0_size = MaxNewSize; duke@435: } jwilhelm@6084: duke@435: if (SurvivorRatio < 1 || NewRatio < 1) { jwilhelm@5856: vm_exit_during_initialization("Invalid young gen ratio specified"); duke@435: } jwilhelm@6085: jwilhelm@6085: DEBUG_ONLY(GenCollectorPolicy::assert_flags();) duke@435: } duke@435: duke@435: void TwoGenerationCollectorPolicy::initialize_flags() { duke@435: GenCollectorPolicy::initialize_flags(); duke@435: jwilhelm@6085: if (!is_size_aligned(OldSize, _gen_alignment)) { jwilhelm@6085: FLAG_SET_ERGO(uintx, OldSize, align_size_down(OldSize, _gen_alignment)); jwilhelm@6085: } jwilhelm@4554: jwilhelm@6085: if (FLAG_IS_CMDLINE(OldSize) && FLAG_IS_DEFAULT(MaxHeapSize)) { jwilhelm@4554: // NewRatio will be used later to set the young generation size so we use jwilhelm@4554: // it to calculate how big the heap should be based on the requested OldSize jwilhelm@4554: // and NewRatio. jwilhelm@4554: assert(NewRatio > 0, "NewRatio should have been set up earlier"); jwilhelm@4554: size_t calculated_heapsize = (OldSize / NewRatio) * (NewRatio + 1); jwilhelm@4554: jwilhelm@6085: calculated_heapsize = align_size_up(calculated_heapsize, _heap_alignment); jwilhelm@6085: FLAG_SET_ERGO(uintx, MaxHeapSize, calculated_heapsize); jwilhelm@6085: _max_heap_byte_size = MaxHeapSize; jwilhelm@6085: FLAG_SET_ERGO(uintx, InitialHeapSize, calculated_heapsize); jwilhelm@6085: _initial_heap_byte_size = InitialHeapSize; jwilhelm@4554: } duke@435: tschatzl@5073: // adjust max heap size if necessary tschatzl@5073: if (NewSize + OldSize > MaxHeapSize) { jwilhelm@6085: if (_max_heap_size_cmdline) { tschatzl@5073: // somebody set a maximum heap size with the intention that we should not tschatzl@5073: // exceed it. Adjust New/OldSize as necessary. tschatzl@5073: uintx calculated_size = NewSize + OldSize; tschatzl@5073: double shrink_factor = (double) MaxHeapSize / calculated_size; jwilhelm@6085: uintx smaller_new_size = align_size_down((uintx)(NewSize * shrink_factor), _gen_alignment); jwilhelm@6085: FLAG_SET_ERGO(uintx, NewSize, MAX2(young_gen_size_lower_bound(), smaller_new_size)); jwilhelm@6085: _initial_gen0_size = NewSize; jwilhelm@6085: tschatzl@5073: // OldSize is already aligned because above we aligned MaxHeapSize to jwilhelm@6085: // _heap_alignment, and we just made sure that NewSize is aligned to jwilhelm@6085: // _gen_alignment. In initialize_flags() we verified that _heap_alignment jwilhelm@6085: // is a multiple of _gen_alignment. jwilhelm@6085: FLAG_SET_ERGO(uintx, OldSize, MaxHeapSize - NewSize); tschatzl@5073: } else { jwilhelm@6085: FLAG_SET_ERGO(uintx, MaxHeapSize, align_size_up(NewSize + OldSize, _heap_alignment)); jwilhelm@6085: _max_heap_byte_size = MaxHeapSize; tschatzl@5073: } tschatzl@5073: } tschatzl@5116: duke@435: always_do_update_barrier = UseConcMarkSweepGC; duke@435: jwilhelm@6085: DEBUG_ONLY(TwoGenerationCollectorPolicy::assert_flags();) duke@435: } duke@435: jmasa@448: // Values set on the command line win over any ergonomically jmasa@448: // set command line parameters. jmasa@448: // Ergonomic choice of parameters are done before this jmasa@448: // method is called. Values for command line parameters such as NewSize jmasa@448: // and MaxNewSize feed those ergonomic choices into this method. jmasa@448: // This method makes the final generation sizings consistent with jmasa@448: // themselves and with overall heap sizings. jmasa@448: // In the absence of explicitly set command line flags, policies jmasa@448: // such as the use of NewRatio are used to size the generation. duke@435: void GenCollectorPolicy::initialize_size_info() { duke@435: CollectorPolicy::initialize_size_info(); duke@435: jwilhelm@6085: // _space_alignment is used for alignment within a generation. jmasa@448: // There is additional alignment done down stream for some jmasa@448: // collectors that sometimes causes unwanted rounding up of jmasa@448: // generations sizes. jmasa@448: jmasa@448: // Determine maximum size of gen0 jmasa@448: jmasa@448: size_t max_new_size = 0; jwilhelm@6085: if (!FLAG_IS_DEFAULT(MaxNewSize)) { jwilhelm@6085: max_new_size = MaxNewSize; duke@435: } else { jwilhelm@5855: max_new_size = scale_by_NewRatio_aligned(_max_heap_byte_size); jmasa@448: // Bound the maximum size by NewSize below (since it historically duke@435: // would have been NewSize and because the NewRatio calculation could duke@435: // yield a size that is too small) and bound it by MaxNewSize above. jmasa@448: // Ergonomics plays here by previously calculating the desired jmasa@448: // NewSize and MaxNewSize. jmasa@448: max_new_size = MIN2(MAX2(max_new_size, NewSize), MaxNewSize); jmasa@448: } jmasa@448: assert(max_new_size > 0, "All paths should set max_new_size"); jmasa@448: jmasa@448: // Given the maximum gen0 size, determine the initial and ysr@2650: // minimum gen0 sizes. jmasa@448: jwilhelm@5855: if (_max_heap_byte_size == _min_heap_byte_size) { jmasa@448: // The maximum and minimum heap sizes are the same so jmasa@448: // the generations minimum and initial must be the jmasa@448: // same as its maximum. jwilhelm@5855: _min_gen0_size = max_new_size; jwilhelm@5855: _initial_gen0_size = max_new_size; jwilhelm@5855: _max_gen0_size = max_new_size; jmasa@448: } else { jmasa@448: size_t desired_new_size = 0; sjohanss@6641: if (FLAG_IS_CMDLINE(NewSize)) { sjohanss@6641: // If NewSize is set on the command line, we must use it as sjohanss@6641: // the initial size and it also makes sense to use it as the sjohanss@6641: // lower limit. jmasa@448: _min_gen0_size = NewSize; jmasa@448: desired_new_size = NewSize; jmasa@448: max_new_size = MAX2(max_new_size, NewSize); sjohanss@6641: } else if (FLAG_IS_ERGO(NewSize)) { sjohanss@6641: // If NewSize is set ergonomically, we should use it as a lower sjohanss@6641: // limit, but use NewRatio to calculate the initial size. sjohanss@6641: _min_gen0_size = NewSize; sjohanss@6641: desired_new_size = sjohanss@6641: MAX2(scale_by_NewRatio_aligned(_initial_heap_byte_size), NewSize); sjohanss@6641: max_new_size = MAX2(max_new_size, NewSize); jmasa@448: } else { jmasa@448: // For the case where NewSize is the default, use NewRatio jmasa@448: // to size the minimum and initial generation sizes. jmasa@448: // Use the default NewSize as the floor for these values. If jmasa@448: // NewRatio is overly large, the resulting sizes can be too jmasa@448: // small. jwilhelm@5855: _min_gen0_size = MAX2(scale_by_NewRatio_aligned(_min_heap_byte_size), NewSize); jmasa@448: desired_new_size = jwilhelm@5855: MAX2(scale_by_NewRatio_aligned(_initial_heap_byte_size), NewSize); jmasa@448: } jmasa@448: jmasa@448: assert(_min_gen0_size > 0, "Sanity check"); jwilhelm@5855: _initial_gen0_size = desired_new_size; jwilhelm@5855: _max_gen0_size = max_new_size; jmasa@448: jmasa@448: // At this point the desirable initial and minimum sizes have been jmasa@448: // determined without regard to the maximum sizes. jmasa@448: jmasa@448: // Bound the sizes by the corresponding overall heap sizes. jwilhelm@5855: _min_gen0_size = bound_minus_alignment(_min_gen0_size, _min_heap_byte_size); jwilhelm@5855: _initial_gen0_size = bound_minus_alignment(_initial_gen0_size, _initial_heap_byte_size); jwilhelm@5855: _max_gen0_size = bound_minus_alignment(_max_gen0_size, _max_heap_byte_size); jmasa@448: jmasa@448: // At this point all three sizes have been checked against the jmasa@448: // maximum sizes but have not been checked for consistency ysr@777: // among the three. jmasa@448: jmasa@448: // Final check min <= initial <= max jwilhelm@5855: _min_gen0_size = MIN2(_min_gen0_size, _max_gen0_size); jwilhelm@5855: _initial_gen0_size = MAX2(MIN2(_initial_gen0_size, _max_gen0_size), _min_gen0_size); jwilhelm@5855: _min_gen0_size = MIN2(_min_gen0_size, _initial_gen0_size); duke@435: } duke@435: jwilhelm@6085: // Write back to flags if necessary jwilhelm@6085: if (NewSize != _initial_gen0_size) { jwilhelm@6085: FLAG_SET_ERGO(uintx, NewSize, _initial_gen0_size); jwilhelm@6085: } jwilhelm@6085: jwilhelm@6085: if (MaxNewSize != _max_gen0_size) { jwilhelm@6085: FLAG_SET_ERGO(uintx, MaxNewSize, _max_gen0_size); jwilhelm@6085: } jwilhelm@6085: jmasa@448: if (PrintGCDetails && Verbose) { ysr@2650: gclog_or_tty->print_cr("1: Minimum gen0 " SIZE_FORMAT " Initial gen0 " jmasa@448: SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT, jwilhelm@5855: _min_gen0_size, _initial_gen0_size, _max_gen0_size); jmasa@448: } jwilhelm@6085: jwilhelm@6085: DEBUG_ONLY(GenCollectorPolicy::assert_size_info();) jmasa@448: } duke@435: jmasa@448: // Call this method during the sizing of the gen1 to make jmasa@448: // adjustments to gen0 because of gen1 sizing policy. gen0 initially has jmasa@448: // the most freedom in sizing because it is done before the jmasa@448: // policy for gen1 is applied. Once gen1 policies have been applied, jmasa@448: // there may be conflicts in the shape of the heap and this method jmasa@448: // is used to make the needed adjustments. The application of the jmasa@448: // policies could be more sophisticated (iterative for example) but jmasa@448: // keeping it simple also seems a worthwhile goal. jmasa@448: bool TwoGenerationCollectorPolicy::adjust_gen0_sizes(size_t* gen0_size_ptr, jmasa@448: size_t* gen1_size_ptr, jwilhelm@6091: const size_t heap_size) { jmasa@448: bool result = false; jwilhelm@4554: jwilhelm@6091: if ((*gen0_size_ptr + *gen1_size_ptr) > heap_size) { jwilhelm@6085: uintx smallest_new_size = young_gen_size_lower_bound(); jwilhelm@6091: if ((heap_size < (*gen0_size_ptr + _min_gen1_size)) && jwilhelm@6091: (heap_size >= _min_gen1_size + smallest_new_size)) { jwilhelm@6091: // Adjust gen0 down to accommodate _min_gen1_size jwilhelm@6091: *gen0_size_ptr = align_size_down_bounded(heap_size - _min_gen1_size, _gen_alignment); jmasa@448: result = true; jmasa@448: } else { jwilhelm@6085: *gen1_size_ptr = align_size_down_bounded(heap_size - *gen0_size_ptr, _gen_alignment); jmasa@448: } jmasa@448: } jmasa@448: return result; jmasa@448: } duke@435: jmasa@448: // Minimum sizes of the generations may be different than jmasa@448: // the initial sizes. An inconsistently is permitted here jmasa@448: // in the total size that can be specified explicitly by jmasa@448: // command line specification of OldSize and NewSize and jmasa@448: // also a command line specification of -Xms. Issue a warning jmasa@448: // but allow the values to pass. duke@435: duke@435: void TwoGenerationCollectorPolicy::initialize_size_info() { duke@435: GenCollectorPolicy::initialize_size_info(); duke@435: jmasa@448: // At this point the minimum, initial and maximum sizes jmasa@448: // of the overall heap and of gen0 have been determined. jmasa@448: // The maximum gen1 size can be determined from the maximum gen0 ysr@2650: // and maximum heap size since no explicit flags exits jmasa@448: // for setting the gen1 maximum. jwilhelm@6085: _max_gen1_size = MAX2(_max_heap_byte_size - _max_gen0_size, _gen_alignment); jwilhelm@6085: jmasa@448: // If no explicit command line flag has been set for the jmasa@448: // gen1 size, use what is left for gen1. jwilhelm@6085: if (!FLAG_IS_CMDLINE(OldSize)) { jwilhelm@6085: // The user has not specified any value but the ergonomics jwilhelm@6085: // may have chosen a value (which may or may not be consistent jmasa@448: // with the overall heap size). In either case make jmasa@448: // the minimum, maximum and initial sizes consistent jmasa@448: // with the gen0 sizes and the overall heap sizes. jwilhelm@6085: _min_gen1_size = MAX2(_min_heap_byte_size - _min_gen0_size, _gen_alignment); jwilhelm@6085: _initial_gen1_size = MAX2(_initial_heap_byte_size - _initial_gen0_size, _gen_alignment); jwilhelm@6085: // _max_gen1_size has already been made consistent above jwilhelm@6085: FLAG_SET_ERGO(uintx, OldSize, _initial_gen1_size); jmasa@448: } else { jmasa@448: // It's been explicitly set on the command line. Use the jmasa@448: // OldSize and then determine the consequences. jwilhelm@6085: _min_gen1_size = MIN2(OldSize, _min_heap_byte_size - _min_gen0_size); jwilhelm@5855: _initial_gen1_size = OldSize; jmasa@448: jmasa@448: // If the user has explicitly set an OldSize that is inconsistent jmasa@448: // with other command line flags, issue a warning. duke@435: // The generation minimums and the overall heap mimimum should jwilhelm@6085: // be within one generation alignment. jwilhelm@6085: if ((_min_gen1_size + _min_gen0_size + _gen_alignment) < _min_heap_byte_size) { duke@435: warning("Inconsistency between minimum heap size and minimum " jwilhelm@5855: "generation sizes: using minimum heap = " SIZE_FORMAT, jwilhelm@5855: _min_heap_byte_size); duke@435: } jwilhelm@6084: if (OldSize > _max_gen1_size) { jmasa@448: warning("Inconsistency between maximum heap size and maximum " jwilhelm@5855: "generation sizes: using maximum heap = " SIZE_FORMAT jwilhelm@5855: " -XX:OldSize flag is being ignored", jwilhelm@5855: _max_heap_byte_size); ysr@2650: } jmasa@448: // If there is an inconsistency between the OldSize and the minimum and/or jmasa@448: // initial size of gen0, since OldSize was explicitly set, OldSize wins. jwilhelm@6091: if (adjust_gen0_sizes(&_min_gen0_size, &_min_gen1_size, _min_heap_byte_size)) { jmasa@448: if (PrintGCDetails && Verbose) { ysr@2650: gclog_or_tty->print_cr("2: Minimum gen0 " SIZE_FORMAT " Initial gen0 " jmasa@448: SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT, jwilhelm@5855: _min_gen0_size, _initial_gen0_size, _max_gen0_size); jmasa@448: } jmasa@448: } jmasa@448: // Initial size jmasa@448: if (adjust_gen0_sizes(&_initial_gen0_size, &_initial_gen1_size, jwilhelm@6091: _initial_heap_byte_size)) { jmasa@448: if (PrintGCDetails && Verbose) { ysr@2650: gclog_or_tty->print_cr("3: Minimum gen0 " SIZE_FORMAT " Initial gen0 " jmasa@448: SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT, jwilhelm@5855: _min_gen0_size, _initial_gen0_size, _max_gen0_size); jmasa@448: } jmasa@448: } jmasa@448: } jmasa@448: // Enforce the maximum gen1 size. jwilhelm@5855: _min_gen1_size = MIN2(_min_gen1_size, _max_gen1_size); duke@435: jmasa@448: // Check that min gen1 <= initial gen1 <= max gen1 jwilhelm@5855: _initial_gen1_size = MAX2(_initial_gen1_size, _min_gen1_size); jwilhelm@5855: _initial_gen1_size = MIN2(_initial_gen1_size, _max_gen1_size); jmasa@448: jwilhelm@6085: // Write back to flags if necessary jwilhelm@6085: if (NewSize != _initial_gen0_size) { jwilhelm@6090: FLAG_SET_ERGO(uintx, NewSize, _initial_gen0_size); jwilhelm@6085: } jwilhelm@6085: jwilhelm@6085: if (MaxNewSize != _max_gen0_size) { jwilhelm@6085: FLAG_SET_ERGO(uintx, MaxNewSize, _max_gen0_size); jwilhelm@6085: } jwilhelm@6085: jwilhelm@6085: if (OldSize != _initial_gen1_size) { jwilhelm@6085: FLAG_SET_ERGO(uintx, OldSize, _initial_gen1_size); jwilhelm@6085: } jwilhelm@6085: jmasa@448: if (PrintGCDetails && Verbose) { jmasa@448: gclog_or_tty->print_cr("Minimum gen1 " SIZE_FORMAT " Initial gen1 " jmasa@448: SIZE_FORMAT " Maximum gen1 " SIZE_FORMAT, jwilhelm@5855: _min_gen1_size, _initial_gen1_size, _max_gen1_size); jmasa@448: } jwilhelm@6085: jwilhelm@6085: DEBUG_ONLY(TwoGenerationCollectorPolicy::assert_size_info();) duke@435: } duke@435: duke@435: HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size, duke@435: bool is_tlab, duke@435: bool* gc_overhead_limit_was_exceeded) { duke@435: GenCollectedHeap *gch = GenCollectedHeap::heap(); duke@435: duke@435: debug_only(gch->check_for_valid_allocation_state()); duke@435: assert(gch->no_gc_in_progress(), "Allocation during gc not allowed"); jmasa@1822: jmasa@1822: // In general gc_overhead_limit_was_exceeded should be false so jmasa@1822: // set it so here and reset it to true only if the gc time jmasa@1822: // limit is being exceeded as checked below. jmasa@1822: *gc_overhead_limit_was_exceeded = false; jmasa@1822: duke@435: HeapWord* result = NULL; duke@435: duke@435: // Loop until the allocation is satisified, duke@435: // or unsatisfied after GC. mgerdin@4853: for (int try_count = 1, gclocker_stalled_count = 0; /* return or throw */; try_count += 1) { duke@435: HandleMark hm; // discard any handles allocated in each iteration duke@435: duke@435: // First allocation attempt is lock-free. duke@435: Generation *gen0 = gch->get_gen(0); duke@435: assert(gen0->supports_inline_contig_alloc(), duke@435: "Otherwise, must do alloc within heap lock"); duke@435: if (gen0->should_allocate(size, is_tlab)) { duke@435: result = gen0->par_allocate(size, is_tlab); duke@435: if (result != NULL) { duke@435: assert(gch->is_in_reserved(result), "result not in heap"); duke@435: return result; duke@435: } duke@435: } duke@435: unsigned int gc_count_before; // read inside the Heap_lock locked region duke@435: { duke@435: MutexLocker ml(Heap_lock); duke@435: if (PrintGC && Verbose) { duke@435: gclog_or_tty->print_cr("TwoGenerationCollectorPolicy::mem_allocate_work:" duke@435: " attempting locked slow path allocation"); duke@435: } duke@435: // Note that only large objects get a shot at being duke@435: // allocated in later generations. duke@435: bool first_only = ! should_try_older_generation_allocation(size); duke@435: duke@435: result = gch->attempt_allocation(size, is_tlab, first_only); duke@435: if (result != NULL) { duke@435: assert(gch->is_in_reserved(result), "result not in heap"); duke@435: return result; duke@435: } duke@435: duke@435: if (GC_locker::is_active_and_needs_gc()) { duke@435: if (is_tlab) { duke@435: return NULL; // Caller will retry allocating individual object duke@435: } duke@435: if (!gch->is_maximal_no_gc()) { duke@435: // Try and expand heap to satisfy request duke@435: result = expand_heap_and_allocate(size, is_tlab); duke@435: // result could be null if we are out of space duke@435: if (result != NULL) { duke@435: return result; duke@435: } duke@435: } duke@435: mgerdin@4853: if (gclocker_stalled_count > GCLockerRetryAllocationCount) { mgerdin@4853: return NULL; // we didn't get to do a GC and we didn't get any memory mgerdin@4853: } mgerdin@4853: duke@435: // If this thread is not in a jni critical section, we stall duke@435: // the requestor until the critical section has cleared and duke@435: // GC allowed. When the critical section clears, a GC is duke@435: // initiated by the last thread exiting the critical section; so duke@435: // we retry the allocation sequence from the beginning of the loop, duke@435: // rather than causing more, now probably unnecessary, GC attempts. duke@435: JavaThread* jthr = JavaThread::current(); duke@435: if (!jthr->in_critical()) { duke@435: MutexUnlocker mul(Heap_lock); duke@435: // Wait for JNI critical section to be exited duke@435: GC_locker::stall_until_clear(); mgerdin@4853: gclocker_stalled_count += 1; duke@435: continue; duke@435: } else { duke@435: if (CheckJNICalls) { duke@435: fatal("Possible deadlock due to allocating while" duke@435: " in jni critical section"); duke@435: } duke@435: return NULL; duke@435: } duke@435: } duke@435: duke@435: // Read the gc count while the heap lock is held. duke@435: gc_count_before = Universe::heap()->total_collections(); duke@435: } duke@435: jwilhelm@6084: VM_GenCollectForAllocation op(size, is_tlab, gc_count_before); duke@435: VMThread::execute(&op); duke@435: if (op.prologue_succeeded()) { duke@435: result = op.result(); duke@435: if (op.gc_locked()) { duke@435: assert(result == NULL, "must be NULL if gc_locked() is true"); duke@435: continue; // retry and/or stall as necessary duke@435: } jmasa@1822: jmasa@1822: // Allocation has failed and a collection jmasa@1822: // has been done. If the gc time limit was exceeded the jmasa@1822: // this time, return NULL so that an out-of-memory jmasa@1822: // will be thrown. Clear gc_overhead_limit_exceeded jmasa@1822: // so that the overhead exceeded does not persist. jmasa@1822: jmasa@1822: const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded(); jmasa@1822: const bool softrefs_clear = all_soft_refs_clear(); jmasa@4743: jmasa@1822: if (limit_exceeded && softrefs_clear) { jmasa@1822: *gc_overhead_limit_was_exceeded = true; jmasa@1822: size_policy()->set_gc_overhead_limit_exceeded(false); jmasa@1822: if (op.result() != NULL) { jmasa@1822: CollectedHeap::fill_with_object(op.result(), size); jmasa@1822: } jmasa@1822: return NULL; jmasa@1822: } duke@435: assert(result == NULL || gch->is_in_reserved(result), duke@435: "result not in heap"); duke@435: return result; duke@435: } duke@435: duke@435: // Give a warning if we seem to be looping forever. duke@435: if ((QueuedAllocationWarningCount > 0) && duke@435: (try_count % QueuedAllocationWarningCount == 0)) { duke@435: warning("TwoGenerationCollectorPolicy::mem_allocate_work retries %d times \n\t" drchase@6680: " size=" SIZE_FORMAT " %s", try_count, size, is_tlab ? "(TLAB)" : ""); duke@435: } duke@435: } duke@435: } duke@435: duke@435: HeapWord* GenCollectorPolicy::expand_heap_and_allocate(size_t size, duke@435: bool is_tlab) { duke@435: GenCollectedHeap *gch = GenCollectedHeap::heap(); duke@435: HeapWord* result = NULL; duke@435: for (int i = number_of_generations() - 1; i >= 0 && result == NULL; i--) { duke@435: Generation *gen = gch->get_gen(i); duke@435: if (gen->should_allocate(size, is_tlab)) { duke@435: result = gen->expand_and_allocate(size, is_tlab); duke@435: } duke@435: } duke@435: assert(result == NULL || gch->is_in_reserved(result), "result not in heap"); duke@435: return result; duke@435: } duke@435: duke@435: HeapWord* GenCollectorPolicy::satisfy_failed_allocation(size_t size, duke@435: bool is_tlab) { duke@435: GenCollectedHeap *gch = GenCollectedHeap::heap(); duke@435: GCCauseSetter x(gch, GCCause::_allocation_failure); duke@435: HeapWord* result = NULL; duke@435: duke@435: assert(size != 0, "Precondition violated"); duke@435: if (GC_locker::is_active_and_needs_gc()) { duke@435: // GC locker is active; instead of a collection we will attempt duke@435: // to expand the heap, if there's room for expansion. duke@435: if (!gch->is_maximal_no_gc()) { duke@435: result = expand_heap_and_allocate(size, is_tlab); duke@435: } duke@435: return result; // could be null if we are out of space ysr@2336: } else if (!gch->incremental_collection_will_fail(false /* don't consult_young */)) { duke@435: // Do an incremental collection. duke@435: gch->do_collection(false /* full */, duke@435: false /* clear_all_soft_refs */, duke@435: size /* size */, duke@435: is_tlab /* is_tlab */, duke@435: number_of_generations() - 1 /* max_level */); duke@435: } else { ysr@2336: if (Verbose && PrintGCDetails) { ysr@2336: gclog_or_tty->print(" :: Trying full because partial may fail :: "); ysr@2336: } duke@435: // Try a full collection; see delta for bug id 6266275 duke@435: // for the original code and why this has been simplified duke@435: // with from-space allocation criteria modified and duke@435: // such allocation moved out of the safepoint path. duke@435: gch->do_collection(true /* full */, duke@435: false /* clear_all_soft_refs */, duke@435: size /* size */, duke@435: is_tlab /* is_tlab */, duke@435: number_of_generations() - 1 /* max_level */); duke@435: } duke@435: duke@435: result = gch->attempt_allocation(size, is_tlab, false /*first_only*/); duke@435: duke@435: if (result != NULL) { duke@435: assert(gch->is_in_reserved(result), "result not in heap"); duke@435: return result; duke@435: } duke@435: duke@435: // OK, collection failed, try expansion. duke@435: result = expand_heap_and_allocate(size, is_tlab); duke@435: if (result != NULL) { duke@435: return result; duke@435: } duke@435: duke@435: // If we reach this point, we're really out of memory. Try every trick duke@435: // we can to reclaim memory. Force collection of soft references. Force duke@435: // a complete compaction of the heap. Any additional methods for finding duke@435: // free memory should be here, especially if they are expensive. If this duke@435: // attempt fails, an OOM exception will be thrown. duke@435: { tschatzl@5119: UIntFlagSetting flag_change(MarkSweepAlwaysCompactCount, 1); // Make sure the heap is fully compacted duke@435: duke@435: gch->do_collection(true /* full */, duke@435: true /* clear_all_soft_refs */, duke@435: size /* size */, duke@435: is_tlab /* is_tlab */, duke@435: number_of_generations() - 1 /* max_level */); duke@435: } duke@435: duke@435: result = gch->attempt_allocation(size, is_tlab, false /* first_only */); duke@435: if (result != NULL) { duke@435: assert(gch->is_in_reserved(result), "result not in heap"); duke@435: return result; duke@435: } duke@435: jmasa@1822: assert(!should_clear_all_soft_refs(), jmasa@1822: "Flag should have been handled and cleared prior to this point"); jmasa@1822: duke@435: // What else? We might try synchronous finalization later. If the total duke@435: // space available is large enough for the allocation, then a more duke@435: // complete compaction phase than we've tried so far might be duke@435: // appropriate. duke@435: return NULL; duke@435: } duke@435: coleenp@4037: MetaWord* CollectorPolicy::satisfy_failed_metadata_allocation( coleenp@4037: ClassLoaderData* loader_data, coleenp@4037: size_t word_size, coleenp@4037: Metaspace::MetadataType mdtype) { coleenp@4037: uint loop_count = 0; coleenp@4037: uint gc_count = 0; coleenp@4037: uint full_gc_count = 0; coleenp@4037: jmasa@4234: assert(!Heap_lock->owned_by_self(), "Should not be holding the Heap_lock"); jmasa@4234: coleenp@4037: do { jmasa@4064: MetaWord* result = NULL; jmasa@4064: if (GC_locker::is_active_and_needs_gc()) { jmasa@4064: // If the GC_locker is active, just expand and allocate. jmasa@4064: // If that does not succeed, wait if this thread is not jmasa@4064: // in a critical section itself. jmasa@4064: result = jmasa@4064: loader_data->metaspace_non_null()->expand_and_allocate(word_size, jmasa@4064: mdtype); jmasa@4064: if (result != NULL) { jmasa@4064: return result; jmasa@4064: } jmasa@4064: JavaThread* jthr = JavaThread::current(); jmasa@4064: if (!jthr->in_critical()) { jmasa@4064: // Wait for JNI critical section to be exited jmasa@4064: GC_locker::stall_until_clear(); jmasa@4064: // The GC invoked by the last thread leaving the critical jmasa@4064: // section will be a young collection and a full collection jmasa@4064: // is (currently) needed for unloading classes so continue jmasa@4064: // to the next iteration to get a full GC. jmasa@4064: continue; jmasa@4064: } else { jmasa@4064: if (CheckJNICalls) { jmasa@4064: fatal("Possible deadlock due to allocating while" jmasa@4064: " in jni critical section"); jmasa@4064: } jmasa@4064: return NULL; jmasa@4064: } jmasa@4064: } jmasa@4064: coleenp@4037: { // Need lock to get self consistent gc_count's coleenp@4037: MutexLocker ml(Heap_lock); coleenp@4037: gc_count = Universe::heap()->total_collections(); coleenp@4037: full_gc_count = Universe::heap()->total_full_collections(); coleenp@4037: } coleenp@4037: coleenp@4037: // Generate a VM operation coleenp@4037: VM_CollectForMetadataAllocation op(loader_data, coleenp@4037: word_size, coleenp@4037: mdtype, coleenp@4037: gc_count, coleenp@4037: full_gc_count, coleenp@4037: GCCause::_metadata_GC_threshold); coleenp@4037: VMThread::execute(&op); jmasa@4382: jmasa@4382: // If GC was locked out, try again. Check jmasa@4382: // before checking success because the prologue jmasa@4382: // could have succeeded and the GC still have jmasa@4382: // been locked out. jmasa@4382: if (op.gc_locked()) { jmasa@4382: continue; jmasa@4382: } jmasa@4382: coleenp@4037: if (op.prologue_succeeded()) { coleenp@4037: return op.result(); coleenp@4037: } coleenp@4037: loop_count++; coleenp@4037: if ((QueuedAllocationWarningCount > 0) && coleenp@4037: (loop_count % QueuedAllocationWarningCount == 0)) { coleenp@4037: warning("satisfy_failed_metadata_allocation() retries %d times \n\t" drchase@6680: " size=" SIZE_FORMAT, loop_count, word_size); coleenp@4037: } coleenp@4037: } while (true); // Until a GC is done coleenp@4037: } coleenp@4037: duke@435: // Return true if any of the following is true: duke@435: // . the allocation won't fit into the current young gen heap duke@435: // . gc locker is occupied (jni critical section) duke@435: // . heap memory is tight -- the most recent previous collection duke@435: // was a full collection because a partial collection (would duke@435: // have) failed and is likely to fail again duke@435: bool GenCollectorPolicy::should_try_older_generation_allocation( duke@435: size_t word_size) const { duke@435: GenCollectedHeap* gch = GenCollectedHeap::heap(); duke@435: size_t gen0_capacity = gch->get_gen(0)->capacity_before_gc(); duke@435: return (word_size > heap_word_size(gen0_capacity)) ysr@2243: || GC_locker::is_active_and_needs_gc() ysr@2243: || gch->incremental_collection_failed(); duke@435: } duke@435: duke@435: duke@435: // duke@435: // MarkSweepPolicy methods duke@435: // duke@435: jwilhelm@6085: void MarkSweepPolicy::initialize_alignments() { jwilhelm@6085: _space_alignment = _gen_alignment = (uintx)Generation::GenGrain; jwilhelm@6085: _heap_alignment = compute_heap_alignment(); duke@435: } duke@435: duke@435: void MarkSweepPolicy::initialize_generations() { zgu@7074: _generations = NEW_C_HEAP_ARRAY3(GenerationSpecPtr, number_of_generations(), mtGC, CURRENT_PC, zgu@7074: AllocFailStrategy::RETURN_NULL); jwilhelm@6084: if (_generations == NULL) { duke@435: vm_exit_during_initialization("Unable to allocate gen spec"); jwilhelm@6084: } duke@435: brutisso@4387: if (UseParNewGC) { duke@435: _generations[0] = new GenerationSpec(Generation::ParNew, _initial_gen0_size, _max_gen0_size); duke@435: } else { duke@435: _generations[0] = new GenerationSpec(Generation::DefNew, _initial_gen0_size, _max_gen0_size); duke@435: } duke@435: _generations[1] = new GenerationSpec(Generation::MarkSweepCompact, _initial_gen1_size, _max_gen1_size); duke@435: jwilhelm@6084: if (_generations[0] == NULL || _generations[1] == NULL) { duke@435: vm_exit_during_initialization("Unable to allocate gen spec"); jwilhelm@6084: } duke@435: } duke@435: duke@435: void MarkSweepPolicy::initialize_gc_policy_counters() { duke@435: // initialize the policy counters - 2 collectors, 3 generations brutisso@4387: if (UseParNewGC) { duke@435: _gc_policy_counters = new GCPolicyCounters("ParNew:MSC", 2, 3); brutisso@4387: } else { duke@435: _gc_policy_counters = new GCPolicyCounters("Copy:MSC", 2, 3); duke@435: } duke@435: } sjohanss@6641: sjohanss@6641: /////////////// Unit tests /////////////// sjohanss@6641: sjohanss@6641: #ifndef PRODUCT sjohanss@6641: // Testing that the NewSize flag is handled correct is hard because it sjohanss@6641: // depends on so many other configurable variables. This test only tries to sjohanss@6641: // verify that there are some basic rules for NewSize honored by the policies. sjohanss@6641: class TestGenCollectorPolicy { sjohanss@6641: public: sjohanss@6641: static void test() { sjohanss@6641: size_t flag_value; sjohanss@6641: sjohanss@6641: save_flags(); sjohanss@6641: sjohanss@6641: // Set some limits that makes the math simple. sjohanss@6641: FLAG_SET_ERGO(uintx, MaxHeapSize, 180 * M); sjohanss@6641: FLAG_SET_ERGO(uintx, InitialHeapSize, 120 * M); sjohanss@6641: Arguments::set_min_heap_size(40 * M); sjohanss@6641: sjohanss@6641: // If NewSize is set on the command line, it should be used sjohanss@6641: // for both min and initial young size if less than min heap. sjohanss@6641: flag_value = 20 * M; sjohanss@6641: FLAG_SET_CMDLINE(uintx, NewSize, flag_value); sjohanss@6641: verify_min(flag_value); sjohanss@6641: verify_initial(flag_value); sjohanss@6641: sjohanss@6641: // If NewSize is set on command line, but is larger than the min sjohanss@6641: // heap size, it should only be used for initial young size. sjohanss@6641: flag_value = 80 * M; sjohanss@6641: FLAG_SET_CMDLINE(uintx, NewSize, flag_value); sjohanss@6641: verify_initial(flag_value); sjohanss@6641: sjohanss@6641: // If NewSize has been ergonomically set, the collector policy sjohanss@6641: // should use it for min but calculate the initial young size sjohanss@6641: // using NewRatio. sjohanss@6641: flag_value = 20 * M; sjohanss@6641: FLAG_SET_ERGO(uintx, NewSize, flag_value); sjohanss@6641: verify_min(flag_value); sjohanss@6641: verify_scaled_initial(InitialHeapSize); sjohanss@6641: sjohanss@6641: restore_flags(); sjohanss@6641: sjohanss@6641: } sjohanss@6641: sjohanss@6641: static void verify_min(size_t expected) { sjohanss@6641: MarkSweepPolicy msp; sjohanss@6641: msp.initialize_all(); sjohanss@6641: sjohanss@6641: assert(msp.min_gen0_size() <= expected, err_msg("%zu > %zu", msp.min_gen0_size(), expected)); sjohanss@6641: } sjohanss@6641: sjohanss@6641: static void verify_initial(size_t expected) { sjohanss@6641: MarkSweepPolicy msp; sjohanss@6641: msp.initialize_all(); sjohanss@6641: sjohanss@6641: assert(msp.initial_gen0_size() == expected, err_msg("%zu != %zu", msp.initial_gen0_size(), expected)); sjohanss@6641: } sjohanss@6641: sjohanss@6641: static void verify_scaled_initial(size_t initial_heap_size) { sjohanss@6641: MarkSweepPolicy msp; sjohanss@6641: msp.initialize_all(); sjohanss@6641: sjohanss@6641: size_t expected = msp.scale_by_NewRatio_aligned(initial_heap_size); sjohanss@6641: assert(msp.initial_gen0_size() == expected, err_msg("%zu != %zu", msp.initial_gen0_size(), expected)); sjohanss@6641: assert(FLAG_IS_ERGO(NewSize) && NewSize == expected, sjohanss@6641: err_msg("NewSize should have been set ergonomically to %zu, but was %zu", expected, NewSize)); sjohanss@6641: } sjohanss@6641: sjohanss@6641: private: sjohanss@6641: static size_t original_InitialHeapSize; sjohanss@6641: static size_t original_MaxHeapSize; sjohanss@6641: static size_t original_MaxNewSize; sjohanss@6641: static size_t original_MinHeapDeltaBytes; sjohanss@6641: static size_t original_NewSize; sjohanss@6641: static size_t original_OldSize; sjohanss@6641: sjohanss@6641: static void save_flags() { sjohanss@6641: original_InitialHeapSize = InitialHeapSize; sjohanss@6641: original_MaxHeapSize = MaxHeapSize; sjohanss@6641: original_MaxNewSize = MaxNewSize; sjohanss@6641: original_MinHeapDeltaBytes = MinHeapDeltaBytes; sjohanss@6641: original_NewSize = NewSize; sjohanss@6641: original_OldSize = OldSize; sjohanss@6641: } sjohanss@6641: sjohanss@6641: static void restore_flags() { sjohanss@6641: InitialHeapSize = original_InitialHeapSize; sjohanss@6641: MaxHeapSize = original_MaxHeapSize; sjohanss@6641: MaxNewSize = original_MaxNewSize; sjohanss@6641: MinHeapDeltaBytes = original_MinHeapDeltaBytes; sjohanss@6641: NewSize = original_NewSize; sjohanss@6641: OldSize = original_OldSize; sjohanss@6641: } sjohanss@6641: }; sjohanss@6641: sjohanss@6641: size_t TestGenCollectorPolicy::original_InitialHeapSize = 0; sjohanss@6641: size_t TestGenCollectorPolicy::original_MaxHeapSize = 0; sjohanss@6641: size_t TestGenCollectorPolicy::original_MaxNewSize = 0; sjohanss@6641: size_t TestGenCollectorPolicy::original_MinHeapDeltaBytes = 0; sjohanss@6641: size_t TestGenCollectorPolicy::original_NewSize = 0; sjohanss@6641: size_t TestGenCollectorPolicy::original_OldSize = 0; sjohanss@6641: sjohanss@6641: void TestNewSize_test() { sjohanss@6641: TestGenCollectorPolicy::test(); sjohanss@6641: } sjohanss@6641: sjohanss@6641: #endif