duke@435: /* jwilhelm@4554: * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. duke@435: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. duke@435: * duke@435: * This code is free software; you can redistribute it and/or modify it duke@435: * under the terms of the GNU General Public License version 2 only, as duke@435: * published by the Free Software Foundation. duke@435: * duke@435: * This code is distributed in the hope that it will be useful, but WITHOUT duke@435: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or duke@435: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License duke@435: * version 2 for more details (a copy is included in the LICENSE file that duke@435: * accompanied this code). duke@435: * duke@435: * You should have received a copy of the GNU General Public License version duke@435: * 2 along with this work; if not, write to the Free Software Foundation, duke@435: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. duke@435: * trims@1907: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA trims@1907: * or visit www.oracle.com if you need additional information or have any trims@1907: * questions. duke@435: * duke@435: */ duke@435: stefank@2314: #include "precompiled.hpp" stefank@2314: #include "gc_implementation/shared/adaptiveSizePolicy.hpp" stefank@2314: #include "gc_implementation/shared/gcPolicyCounters.hpp" stefank@2314: #include "gc_implementation/shared/vmGCOperations.hpp" stefank@2314: #include "memory/cardTableRS.hpp" stefank@2314: #include "memory/collectorPolicy.hpp" stefank@2314: #include "memory/gcLocker.inline.hpp" stefank@2314: #include "memory/genCollectedHeap.hpp" stefank@2314: #include "memory/generationSpec.hpp" stefank@2314: #include "memory/space.hpp" stefank@2314: #include "memory/universe.hpp" stefank@2314: #include "runtime/arguments.hpp" stefank@2314: #include "runtime/globals_extension.hpp" stefank@2314: #include "runtime/handles.inline.hpp" stefank@2314: #include "runtime/java.hpp" stefank@4299: #include "runtime/thread.inline.hpp" stefank@2314: #include "runtime/vmThread.hpp" jprovino@4542: #include "utilities/macros.hpp" jprovino@4542: #if INCLUDE_ALL_GCS stefank@2314: #include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp" stefank@2314: #include "gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp" jprovino@4542: #endif // INCLUDE_ALL_GCS duke@435: duke@435: // CollectorPolicy methods. duke@435: duke@435: void CollectorPolicy::initialize_flags() { brutisso@5071: assert(max_alignment() >= min_alignment(), brutisso@5071: err_msg("max_alignment: " SIZE_FORMAT " less than min_alignment: " SIZE_FORMAT, brutisso@5071: max_alignment(), min_alignment())); brutisso@5071: assert(max_alignment() % min_alignment() == 0, brutisso@5071: err_msg("max_alignment: " SIZE_FORMAT " not aligned by min_alignment: " SIZE_FORMAT, brutisso@5071: max_alignment(), min_alignment())); brutisso@5071: tschatzl@5073: if (MaxHeapSize < InitialHeapSize) { tschatzl@5073: vm_exit_during_initialization("Incompatible initial and maximum heap sizes specified"); tschatzl@5073: } tschatzl@5073: coleenp@4037: if (MetaspaceSize > MaxMetaspaceSize) { coleenp@4037: MaxMetaspaceSize = MetaspaceSize; duke@435: } coleenp@4037: MetaspaceSize = MAX2(min_alignment(), align_size_down_(MetaspaceSize, min_alignment())); coleenp@4037: // Don't increase Metaspace size limit above specified. coleenp@4037: MaxMetaspaceSize = align_size_down(MaxMetaspaceSize, max_alignment()); coleenp@4037: if (MetaspaceSize > MaxMetaspaceSize) { coleenp@4037: MetaspaceSize = MaxMetaspaceSize; kvn@2150: } duke@435: coleenp@4037: MinMetaspaceExpansion = MAX2(min_alignment(), align_size_down_(MinMetaspaceExpansion, min_alignment())); coleenp@4037: MaxMetaspaceExpansion = MAX2(min_alignment(), align_size_down_(MaxMetaspaceExpansion, min_alignment())); duke@435: duke@435: MinHeapDeltaBytes = align_size_up(MinHeapDeltaBytes, min_alignment()); duke@435: coleenp@4037: assert(MetaspaceSize % min_alignment() == 0, "metapace alignment"); coleenp@4037: assert(MaxMetaspaceSize % max_alignment() == 0, "maximum metaspace alignment"); coleenp@4037: if (MetaspaceSize < 256*K) { coleenp@4037: vm_exit_during_initialization("Too small initial Metaspace size"); duke@435: } duke@435: } duke@435: duke@435: void CollectorPolicy::initialize_size_info() { tschatzl@5073: // User inputs from -mx and ms must be aligned tschatzl@5073: set_min_heap_byte_size(align_size_up(Arguments::min_heap_size(), min_alignment())); tschatzl@5073: set_initial_heap_byte_size(align_size_up(InitialHeapSize, min_alignment())); ysr@777: set_max_heap_byte_size(align_size_up(MaxHeapSize, max_alignment())); duke@435: duke@435: // Check heap parameter properties jmasa@448: if (initial_heap_byte_size() < M) { duke@435: vm_exit_during_initialization("Too small initial heap"); duke@435: } duke@435: // Check heap parameter properties jmasa@448: if (min_heap_byte_size() < M) { duke@435: vm_exit_during_initialization("Too small minimum heap"); duke@435: } jmasa@448: if (initial_heap_byte_size() <= NewSize) { duke@435: // make sure there is at least some room in old space duke@435: vm_exit_during_initialization("Too small initial heap for new size specified"); duke@435: } jmasa@448: if (max_heap_byte_size() < min_heap_byte_size()) { duke@435: vm_exit_during_initialization("Incompatible minimum and maximum heap sizes specified"); duke@435: } jmasa@448: if (initial_heap_byte_size() < min_heap_byte_size()) { duke@435: vm_exit_during_initialization("Incompatible minimum and initial heap sizes specified"); duke@435: } jmasa@448: if (max_heap_byte_size() < initial_heap_byte_size()) { duke@435: vm_exit_during_initialization("Incompatible initial and maximum heap sizes specified"); duke@435: } jmasa@448: jmasa@448: if (PrintGCDetails && Verbose) { jmasa@448: gclog_or_tty->print_cr("Minimum heap " SIZE_FORMAT " Initial heap " jmasa@448: SIZE_FORMAT " Maximum heap " SIZE_FORMAT, jmasa@448: min_heap_byte_size(), initial_heap_byte_size(), max_heap_byte_size()); jmasa@448: } duke@435: } duke@435: jmasa@1822: bool CollectorPolicy::use_should_clear_all_soft_refs(bool v) { jmasa@1822: bool result = _should_clear_all_soft_refs; jmasa@1822: set_should_clear_all_soft_refs(false); jmasa@1822: return result; jmasa@1822: } duke@435: duke@435: GenRemSet* CollectorPolicy::create_rem_set(MemRegion whole_heap, duke@435: int max_covered_regions) { duke@435: switch (rem_set_name()) { duke@435: case GenRemSet::CardTable: { duke@435: CardTableRS* res = new CardTableRS(whole_heap, max_covered_regions); duke@435: return res; duke@435: } duke@435: default: duke@435: guarantee(false, "unrecognized GenRemSet::Name"); duke@435: return NULL; duke@435: } duke@435: } duke@435: jmasa@1822: void CollectorPolicy::cleared_all_soft_refs() { jmasa@1822: // If near gc overhear limit, continue to clear SoftRefs. SoftRefs may jmasa@1822: // have been cleared in the last collection but if the gc overhear jmasa@1822: // limit continues to be near, SoftRefs should still be cleared. jmasa@1822: if (size_policy() != NULL) { jmasa@1822: _should_clear_all_soft_refs = size_policy()->gc_overhead_limit_near(); jmasa@1822: } jmasa@1822: _all_soft_refs_clear = true; jmasa@1822: } jmasa@1822: jmasa@1822: duke@435: // GenCollectorPolicy methods. duke@435: jmasa@448: size_t GenCollectorPolicy::scale_by_NewRatio_aligned(size_t base_size) { jmasa@448: size_t x = base_size / (NewRatio+1); jmasa@448: size_t new_gen_size = x > min_alignment() ? jmasa@448: align_size_down(x, min_alignment()) : jmasa@448: min_alignment(); jmasa@448: return new_gen_size; jmasa@448: } jmasa@448: jmasa@448: size_t GenCollectorPolicy::bound_minus_alignment(size_t desired_size, jmasa@448: size_t maximum_size) { jmasa@448: size_t alignment = min_alignment(); jmasa@448: size_t max_minus = maximum_size - alignment; jmasa@448: return desired_size < max_minus ? desired_size : max_minus; jmasa@448: } jmasa@448: jmasa@448: duke@435: void GenCollectorPolicy::initialize_size_policy(size_t init_eden_size, duke@435: size_t init_promo_size, duke@435: size_t init_survivor_size) { tamao@4613: const double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0; duke@435: _size_policy = new AdaptiveSizePolicy(init_eden_size, duke@435: init_promo_size, duke@435: init_survivor_size, tamao@4613: max_gc_pause_sec, duke@435: GCTimeRatio); duke@435: } duke@435: duke@435: size_t GenCollectorPolicy::compute_max_alignment() { duke@435: // The card marking array and the offset arrays for old generations are duke@435: // committed in os pages as well. Make sure they are entirely full (to duke@435: // avoid partial page problems), e.g. if 512 bytes heap corresponds to 1 duke@435: // byte entry and the os page size is 4096, the maximum heap size should duke@435: // be 512*4096 = 2MB aligned. duke@435: size_t alignment = GenRemSet::max_alignment_constraint(rem_set_name()); duke@435: duke@435: // Parallel GC does its own alignment of the generations to avoid requiring a duke@435: // large page (256M on some platforms) for the permanent generation. The duke@435: // other collectors should also be updated to do their own alignment and then duke@435: // this use of lcm() should be removed. duke@435: if (UseLargePages && !UseParallelGC) { duke@435: // in presence of large pages we have to make sure that our duke@435: // alignment is large page aware duke@435: alignment = lcm(os::large_page_size(), alignment); duke@435: } duke@435: stefank@5578: assert(alignment >= min_alignment(), "Must be"); stefank@5578: duke@435: return alignment; duke@435: } duke@435: duke@435: void GenCollectorPolicy::initialize_flags() { duke@435: // All sizes must be multiples of the generation granularity. duke@435: set_min_alignment((uintx) Generation::GenGrain); duke@435: set_max_alignment(compute_max_alignment()); duke@435: duke@435: CollectorPolicy::initialize_flags(); duke@435: duke@435: // All generational heaps have a youngest gen; handle those flags here. duke@435: duke@435: // Adjust max size parameters duke@435: if (NewSize > MaxNewSize) { duke@435: MaxNewSize = NewSize; duke@435: } duke@435: NewSize = align_size_down(NewSize, min_alignment()); duke@435: MaxNewSize = align_size_down(MaxNewSize, min_alignment()); duke@435: duke@435: // Check validity of heap flags duke@435: assert(NewSize % min_alignment() == 0, "eden space alignment"); duke@435: assert(MaxNewSize % min_alignment() == 0, "survivor space alignment"); duke@435: duke@435: if (NewSize < 3*min_alignment()) { duke@435: // make sure there room for eden and two survivor spaces duke@435: vm_exit_during_initialization("Too small new size specified"); duke@435: } duke@435: if (SurvivorRatio < 1 || NewRatio < 1) { duke@435: vm_exit_during_initialization("Invalid heap ratio specified"); duke@435: } duke@435: } duke@435: duke@435: void TwoGenerationCollectorPolicy::initialize_flags() { duke@435: GenCollectorPolicy::initialize_flags(); duke@435: duke@435: OldSize = align_size_down(OldSize, min_alignment()); jwilhelm@4554: jwilhelm@4554: if (FLAG_IS_CMDLINE(OldSize) && FLAG_IS_DEFAULT(NewSize)) { jwilhelm@4554: // NewRatio will be used later to set the young generation size so we use jwilhelm@4554: // it to calculate how big the heap should be based on the requested OldSize jwilhelm@4554: // and NewRatio. jwilhelm@4554: assert(NewRatio > 0, "NewRatio should have been set up earlier"); jwilhelm@4554: size_t calculated_heapsize = (OldSize / NewRatio) * (NewRatio + 1); jwilhelm@4554: jwilhelm@4554: calculated_heapsize = align_size_up(calculated_heapsize, max_alignment()); jwilhelm@4554: MaxHeapSize = calculated_heapsize; jwilhelm@4554: InitialHeapSize = calculated_heapsize; jwilhelm@4554: } duke@435: MaxHeapSize = align_size_up(MaxHeapSize, max_alignment()); duke@435: tschatzl@5073: // adjust max heap size if necessary tschatzl@5073: if (NewSize + OldSize > MaxHeapSize) { tschatzl@5073: if (FLAG_IS_CMDLINE(MaxHeapSize)) { tschatzl@5073: // somebody set a maximum heap size with the intention that we should not tschatzl@5073: // exceed it. Adjust New/OldSize as necessary. tschatzl@5073: uintx calculated_size = NewSize + OldSize; tschatzl@5073: double shrink_factor = (double) MaxHeapSize / calculated_size; tschatzl@5073: // align tschatzl@5073: NewSize = align_size_down((uintx) (NewSize * shrink_factor), min_alignment()); tschatzl@5073: // OldSize is already aligned because above we aligned MaxHeapSize to tschatzl@5073: // max_alignment(), and we just made sure that NewSize is aligned to tschatzl@5073: // min_alignment(). In initialize_flags() we verified that max_alignment() tschatzl@5073: // is a multiple of min_alignment(). tschatzl@5073: OldSize = MaxHeapSize - NewSize; tschatzl@5073: } else { tschatzl@5073: MaxHeapSize = NewSize + OldSize; tschatzl@5073: } tschatzl@5073: } tschatzl@5073: // need to do this again tschatzl@5073: MaxHeapSize = align_size_up(MaxHeapSize, max_alignment()); tschatzl@5073: tschatzl@5116: // adjust max heap size if necessary tschatzl@5116: if (NewSize + OldSize > MaxHeapSize) { tschatzl@5116: if (FLAG_IS_CMDLINE(MaxHeapSize)) { tschatzl@5116: // somebody set a maximum heap size with the intention that we should not tschatzl@5116: // exceed it. Adjust New/OldSize as necessary. tschatzl@5116: uintx calculated_size = NewSize + OldSize; tschatzl@5116: double shrink_factor = (double) MaxHeapSize / calculated_size; tschatzl@5116: // align tschatzl@5116: NewSize = align_size_down((uintx) (NewSize * shrink_factor), min_alignment()); tschatzl@5116: // OldSize is already aligned because above we aligned MaxHeapSize to tschatzl@5116: // max_alignment(), and we just made sure that NewSize is aligned to tschatzl@5116: // min_alignment(). In initialize_flags() we verified that max_alignment() tschatzl@5116: // is a multiple of min_alignment(). tschatzl@5116: OldSize = MaxHeapSize - NewSize; tschatzl@5116: } else { tschatzl@5116: MaxHeapSize = NewSize + OldSize; tschatzl@5116: } tschatzl@5116: } tschatzl@5116: // need to do this again tschatzl@5116: MaxHeapSize = align_size_up(MaxHeapSize, max_alignment()); tschatzl@5116: duke@435: always_do_update_barrier = UseConcMarkSweepGC; duke@435: duke@435: // Check validity of heap flags duke@435: assert(OldSize % min_alignment() == 0, "old space alignment"); duke@435: assert(MaxHeapSize % max_alignment() == 0, "maximum heap alignment"); duke@435: } duke@435: jmasa@448: // Values set on the command line win over any ergonomically jmasa@448: // set command line parameters. jmasa@448: // Ergonomic choice of parameters are done before this jmasa@448: // method is called. Values for command line parameters such as NewSize jmasa@448: // and MaxNewSize feed those ergonomic choices into this method. jmasa@448: // This method makes the final generation sizings consistent with jmasa@448: // themselves and with overall heap sizings. jmasa@448: // In the absence of explicitly set command line flags, policies jmasa@448: // such as the use of NewRatio are used to size the generation. duke@435: void GenCollectorPolicy::initialize_size_info() { duke@435: CollectorPolicy::initialize_size_info(); duke@435: jmasa@448: // min_alignment() is used for alignment within a generation. jmasa@448: // There is additional alignment done down stream for some jmasa@448: // collectors that sometimes causes unwanted rounding up of jmasa@448: // generations sizes. jmasa@448: jmasa@448: // Determine maximum size of gen0 jmasa@448: jmasa@448: size_t max_new_size = 0; ysr@2650: if (FLAG_IS_CMDLINE(MaxNewSize) || FLAG_IS_ERGO(MaxNewSize)) { jmasa@448: if (MaxNewSize < min_alignment()) { jmasa@448: max_new_size = min_alignment(); ysr@2650: } ysr@2650: if (MaxNewSize >= max_heap_byte_size()) { jmasa@448: max_new_size = align_size_down(max_heap_byte_size() - min_alignment(), jmasa@448: min_alignment()); jmasa@448: warning("MaxNewSize (" SIZE_FORMAT "k) is equal to or " jmasa@448: "greater than the entire heap (" SIZE_FORMAT "k). A " jmasa@448: "new generation size of " SIZE_FORMAT "k will be used.", jmasa@448: MaxNewSize/K, max_heap_byte_size()/K, max_new_size/K); jmasa@448: } else { jmasa@448: max_new_size = align_size_down(MaxNewSize, min_alignment()); jmasa@448: } jmasa@448: jmasa@448: // The case for FLAG_IS_ERGO(MaxNewSize) could be treated jmasa@448: // specially at this point to just use an ergonomically set jmasa@448: // MaxNewSize to set max_new_size. For cases with small jmasa@448: // heaps such a policy often did not work because the MaxNewSize jmasa@448: // was larger than the entire heap. The interpretation given jmasa@448: // to ergonomically set flags is that the flags are set jmasa@448: // by different collectors for their own special needs but jmasa@448: // are not allowed to badly shape the heap. This allows the jmasa@448: // different collectors to decide what's best for themselves jmasa@448: // without having to factor in the overall heap shape. It jmasa@448: // can be the case in the future that the collectors would jmasa@448: // only make "wise" ergonomics choices and this policy could jmasa@448: // just accept those choices. The choices currently made are jmasa@448: // not always "wise". duke@435: } else { jmasa@448: max_new_size = scale_by_NewRatio_aligned(max_heap_byte_size()); jmasa@448: // Bound the maximum size by NewSize below (since it historically duke@435: // would have been NewSize and because the NewRatio calculation could duke@435: // yield a size that is too small) and bound it by MaxNewSize above. jmasa@448: // Ergonomics plays here by previously calculating the desired jmasa@448: // NewSize and MaxNewSize. jmasa@448: max_new_size = MIN2(MAX2(max_new_size, NewSize), MaxNewSize); jmasa@448: } jmasa@448: assert(max_new_size > 0, "All paths should set max_new_size"); jmasa@448: jmasa@448: // Given the maximum gen0 size, determine the initial and ysr@2650: // minimum gen0 sizes. jmasa@448: jmasa@448: if (max_heap_byte_size() == min_heap_byte_size()) { jmasa@448: // The maximum and minimum heap sizes are the same so jmasa@448: // the generations minimum and initial must be the jmasa@448: // same as its maximum. jmasa@448: set_min_gen0_size(max_new_size); jmasa@448: set_initial_gen0_size(max_new_size); jmasa@448: set_max_gen0_size(max_new_size); jmasa@448: } else { jmasa@448: size_t desired_new_size = 0; jmasa@448: if (!FLAG_IS_DEFAULT(NewSize)) { jmasa@448: // If NewSize is set ergonomically (for example by cms), it jmasa@448: // would make sense to use it. If it is used, also use it jmasa@448: // to set the initial size. Although there is no reason jmasa@448: // the minimum size and the initial size have to be the same, jmasa@448: // the current implementation gets into trouble during the calculation jmasa@448: // of the tenured generation sizes if they are different. jmasa@448: // Note that this makes the initial size and the minimum size jmasa@448: // generally small compared to the NewRatio calculation. jmasa@448: _min_gen0_size = NewSize; jmasa@448: desired_new_size = NewSize; jmasa@448: max_new_size = MAX2(max_new_size, NewSize); jmasa@448: } else { jmasa@448: // For the case where NewSize is the default, use NewRatio jmasa@448: // to size the minimum and initial generation sizes. jmasa@448: // Use the default NewSize as the floor for these values. If jmasa@448: // NewRatio is overly large, the resulting sizes can be too jmasa@448: // small. jmasa@448: _min_gen0_size = MAX2(scale_by_NewRatio_aligned(min_heap_byte_size()), jmasa@448: NewSize); jmasa@448: desired_new_size = jmasa@448: MAX2(scale_by_NewRatio_aligned(initial_heap_byte_size()), jmasa@448: NewSize); jmasa@448: } jmasa@448: jmasa@448: assert(_min_gen0_size > 0, "Sanity check"); jmasa@448: set_initial_gen0_size(desired_new_size); jmasa@448: set_max_gen0_size(max_new_size); jmasa@448: jmasa@448: // At this point the desirable initial and minimum sizes have been jmasa@448: // determined without regard to the maximum sizes. jmasa@448: jmasa@448: // Bound the sizes by the corresponding overall heap sizes. jmasa@448: set_min_gen0_size( jmasa@448: bound_minus_alignment(_min_gen0_size, min_heap_byte_size())); jmasa@448: set_initial_gen0_size( jmasa@448: bound_minus_alignment(_initial_gen0_size, initial_heap_byte_size())); jmasa@448: set_max_gen0_size( jmasa@448: bound_minus_alignment(_max_gen0_size, max_heap_byte_size())); jmasa@448: jmasa@448: // At this point all three sizes have been checked against the jmasa@448: // maximum sizes but have not been checked for consistency ysr@777: // among the three. jmasa@448: jmasa@448: // Final check min <= initial <= max jmasa@448: set_min_gen0_size(MIN2(_min_gen0_size, _max_gen0_size)); jmasa@448: set_initial_gen0_size( jmasa@448: MAX2(MIN2(_initial_gen0_size, _max_gen0_size), _min_gen0_size)); jmasa@448: set_min_gen0_size(MIN2(_min_gen0_size, _initial_gen0_size)); duke@435: } duke@435: jmasa@448: if (PrintGCDetails && Verbose) { ysr@2650: gclog_or_tty->print_cr("1: Minimum gen0 " SIZE_FORMAT " Initial gen0 " jmasa@448: SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT, jmasa@448: min_gen0_size(), initial_gen0_size(), max_gen0_size()); jmasa@448: } jmasa@448: } duke@435: jmasa@448: // Call this method during the sizing of the gen1 to make jmasa@448: // adjustments to gen0 because of gen1 sizing policy. gen0 initially has jmasa@448: // the most freedom in sizing because it is done before the jmasa@448: // policy for gen1 is applied. Once gen1 policies have been applied, jmasa@448: // there may be conflicts in the shape of the heap and this method jmasa@448: // is used to make the needed adjustments. The application of the jmasa@448: // policies could be more sophisticated (iterative for example) but jmasa@448: // keeping it simple also seems a worthwhile goal. jmasa@448: bool TwoGenerationCollectorPolicy::adjust_gen0_sizes(size_t* gen0_size_ptr, jmasa@448: size_t* gen1_size_ptr, jwilhelm@4554: const size_t heap_size, jwilhelm@4554: const size_t min_gen1_size) { jmasa@448: bool result = false; jwilhelm@4554: jmasa@448: if ((*gen1_size_ptr + *gen0_size_ptr) > heap_size) { jwilhelm@4554: if ((heap_size < (*gen0_size_ptr + min_gen1_size)) && jwilhelm@4554: (heap_size >= min_gen1_size + min_alignment())) { jwilhelm@4554: // Adjust gen0 down to accommodate min_gen1_size jwilhelm@4554: *gen0_size_ptr = heap_size - min_gen1_size; jmasa@448: *gen0_size_ptr = jmasa@448: MAX2((uintx)align_size_down(*gen0_size_ptr, min_alignment()), jmasa@448: min_alignment()); jmasa@448: assert(*gen0_size_ptr > 0, "Min gen0 is too large"); jmasa@448: result = true; jmasa@448: } else { jmasa@448: *gen1_size_ptr = heap_size - *gen0_size_ptr; jmasa@448: *gen1_size_ptr = jmasa@448: MAX2((uintx)align_size_down(*gen1_size_ptr, min_alignment()), jmasa@448: min_alignment()); jmasa@448: } jmasa@448: } jmasa@448: return result; jmasa@448: } duke@435: jmasa@448: // Minimum sizes of the generations may be different than jmasa@448: // the initial sizes. An inconsistently is permitted here jmasa@448: // in the total size that can be specified explicitly by jmasa@448: // command line specification of OldSize and NewSize and jmasa@448: // also a command line specification of -Xms. Issue a warning jmasa@448: // but allow the values to pass. duke@435: duke@435: void TwoGenerationCollectorPolicy::initialize_size_info() { duke@435: GenCollectorPolicy::initialize_size_info(); duke@435: jmasa@448: // At this point the minimum, initial and maximum sizes jmasa@448: // of the overall heap and of gen0 have been determined. jmasa@448: // The maximum gen1 size can be determined from the maximum gen0 ysr@2650: // and maximum heap size since no explicit flags exits jmasa@448: // for setting the gen1 maximum. jmasa@448: _max_gen1_size = max_heap_byte_size() - _max_gen0_size; jmasa@448: _max_gen1_size = jmasa@448: MAX2((uintx)align_size_down(_max_gen1_size, min_alignment()), jmasa@448: min_alignment()); jmasa@448: // If no explicit command line flag has been set for the jmasa@448: // gen1 size, use what is left for gen1. jmasa@448: if (FLAG_IS_DEFAULT(OldSize) || FLAG_IS_ERGO(OldSize)) { jmasa@448: // The user has not specified any value or ergonomics jmasa@448: // has chosen a value (which may or may not be consistent jmasa@448: // with the overall heap size). In either case make jmasa@448: // the minimum, maximum and initial sizes consistent jmasa@448: // with the gen0 sizes and the overall heap sizes. jmasa@448: assert(min_heap_byte_size() > _min_gen0_size, jmasa@448: "gen0 has an unexpected minimum size"); jmasa@448: set_min_gen1_size(min_heap_byte_size() - min_gen0_size()); jmasa@448: set_min_gen1_size( jmasa@448: MAX2((uintx)align_size_down(_min_gen1_size, min_alignment()), jmasa@448: min_alignment())); jmasa@448: set_initial_gen1_size(initial_heap_byte_size() - initial_gen0_size()); jmasa@448: set_initial_gen1_size( jmasa@448: MAX2((uintx)align_size_down(_initial_gen1_size, min_alignment()), jmasa@448: min_alignment())); jmasa@448: jmasa@448: } else { jmasa@448: // It's been explicitly set on the command line. Use the jmasa@448: // OldSize and then determine the consequences. jmasa@448: set_min_gen1_size(OldSize); jmasa@448: set_initial_gen1_size(OldSize); jmasa@448: jmasa@448: // If the user has explicitly set an OldSize that is inconsistent jmasa@448: // with other command line flags, issue a warning. duke@435: // The generation minimums and the overall heap mimimum should duke@435: // be within one heap alignment. jmasa@448: if ((_min_gen1_size + _min_gen0_size + min_alignment()) < jmasa@448: min_heap_byte_size()) { duke@435: warning("Inconsistency between minimum heap size and minimum " jmasa@448: "generation sizes: using minimum heap = " SIZE_FORMAT, jmasa@448: min_heap_byte_size()); duke@435: } jmasa@448: if ((OldSize > _max_gen1_size)) { jmasa@448: warning("Inconsistency between maximum heap size and maximum " jmasa@448: "generation sizes: using maximum heap = " SIZE_FORMAT jmasa@448: " -XX:OldSize flag is being ignored", jmasa@448: max_heap_byte_size()); ysr@2650: } jmasa@448: // If there is an inconsistency between the OldSize and the minimum and/or jmasa@448: // initial size of gen0, since OldSize was explicitly set, OldSize wins. jmasa@448: if (adjust_gen0_sizes(&_min_gen0_size, &_min_gen1_size, jmasa@448: min_heap_byte_size(), OldSize)) { jmasa@448: if (PrintGCDetails && Verbose) { ysr@2650: gclog_or_tty->print_cr("2: Minimum gen0 " SIZE_FORMAT " Initial gen0 " jmasa@448: SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT, jmasa@448: min_gen0_size(), initial_gen0_size(), max_gen0_size()); jmasa@448: } jmasa@448: } jmasa@448: // Initial size jmasa@448: if (adjust_gen0_sizes(&_initial_gen0_size, &_initial_gen1_size, jmasa@448: initial_heap_byte_size(), OldSize)) { jmasa@448: if (PrintGCDetails && Verbose) { ysr@2650: gclog_or_tty->print_cr("3: Minimum gen0 " SIZE_FORMAT " Initial gen0 " jmasa@448: SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT, jmasa@448: min_gen0_size(), initial_gen0_size(), max_gen0_size()); jmasa@448: } jmasa@448: } jmasa@448: } jmasa@448: // Enforce the maximum gen1 size. jmasa@448: set_min_gen1_size(MIN2(_min_gen1_size, _max_gen1_size)); duke@435: jmasa@448: // Check that min gen1 <= initial gen1 <= max gen1 jmasa@448: set_initial_gen1_size(MAX2(_initial_gen1_size, _min_gen1_size)); jmasa@448: set_initial_gen1_size(MIN2(_initial_gen1_size, _max_gen1_size)); jmasa@448: jmasa@448: if (PrintGCDetails && Verbose) { jmasa@448: gclog_or_tty->print_cr("Minimum gen1 " SIZE_FORMAT " Initial gen1 " jmasa@448: SIZE_FORMAT " Maximum gen1 " SIZE_FORMAT, jmasa@448: min_gen1_size(), initial_gen1_size(), max_gen1_size()); jmasa@448: } duke@435: } duke@435: duke@435: HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size, duke@435: bool is_tlab, duke@435: bool* gc_overhead_limit_was_exceeded) { duke@435: GenCollectedHeap *gch = GenCollectedHeap::heap(); duke@435: duke@435: debug_only(gch->check_for_valid_allocation_state()); duke@435: assert(gch->no_gc_in_progress(), "Allocation during gc not allowed"); jmasa@1822: jmasa@1822: // In general gc_overhead_limit_was_exceeded should be false so jmasa@1822: // set it so here and reset it to true only if the gc time jmasa@1822: // limit is being exceeded as checked below. jmasa@1822: *gc_overhead_limit_was_exceeded = false; jmasa@1822: duke@435: HeapWord* result = NULL; duke@435: duke@435: // Loop until the allocation is satisified, duke@435: // or unsatisfied after GC. mgerdin@4853: for (int try_count = 1, gclocker_stalled_count = 0; /* return or throw */; try_count += 1) { duke@435: HandleMark hm; // discard any handles allocated in each iteration duke@435: duke@435: // First allocation attempt is lock-free. duke@435: Generation *gen0 = gch->get_gen(0); duke@435: assert(gen0->supports_inline_contig_alloc(), duke@435: "Otherwise, must do alloc within heap lock"); duke@435: if (gen0->should_allocate(size, is_tlab)) { duke@435: result = gen0->par_allocate(size, is_tlab); duke@435: if (result != NULL) { duke@435: assert(gch->is_in_reserved(result), "result not in heap"); duke@435: return result; duke@435: } duke@435: } duke@435: unsigned int gc_count_before; // read inside the Heap_lock locked region duke@435: { duke@435: MutexLocker ml(Heap_lock); duke@435: if (PrintGC && Verbose) { duke@435: gclog_or_tty->print_cr("TwoGenerationCollectorPolicy::mem_allocate_work:" duke@435: " attempting locked slow path allocation"); duke@435: } duke@435: // Note that only large objects get a shot at being duke@435: // allocated in later generations. duke@435: bool first_only = ! should_try_older_generation_allocation(size); duke@435: duke@435: result = gch->attempt_allocation(size, is_tlab, first_only); duke@435: if (result != NULL) { duke@435: assert(gch->is_in_reserved(result), "result not in heap"); duke@435: return result; duke@435: } duke@435: duke@435: if (GC_locker::is_active_and_needs_gc()) { duke@435: if (is_tlab) { duke@435: return NULL; // Caller will retry allocating individual object duke@435: } duke@435: if (!gch->is_maximal_no_gc()) { duke@435: // Try and expand heap to satisfy request duke@435: result = expand_heap_and_allocate(size, is_tlab); duke@435: // result could be null if we are out of space duke@435: if (result != NULL) { duke@435: return result; duke@435: } duke@435: } duke@435: mgerdin@4853: if (gclocker_stalled_count > GCLockerRetryAllocationCount) { mgerdin@4853: return NULL; // we didn't get to do a GC and we didn't get any memory mgerdin@4853: } mgerdin@4853: duke@435: // If this thread is not in a jni critical section, we stall duke@435: // the requestor until the critical section has cleared and duke@435: // GC allowed. When the critical section clears, a GC is duke@435: // initiated by the last thread exiting the critical section; so duke@435: // we retry the allocation sequence from the beginning of the loop, duke@435: // rather than causing more, now probably unnecessary, GC attempts. duke@435: JavaThread* jthr = JavaThread::current(); duke@435: if (!jthr->in_critical()) { duke@435: MutexUnlocker mul(Heap_lock); duke@435: // Wait for JNI critical section to be exited duke@435: GC_locker::stall_until_clear(); mgerdin@4853: gclocker_stalled_count += 1; duke@435: continue; duke@435: } else { duke@435: if (CheckJNICalls) { duke@435: fatal("Possible deadlock due to allocating while" duke@435: " in jni critical section"); duke@435: } duke@435: return NULL; duke@435: } duke@435: } duke@435: duke@435: // Read the gc count while the heap lock is held. duke@435: gc_count_before = Universe::heap()->total_collections(); duke@435: } duke@435: duke@435: VM_GenCollectForAllocation op(size, duke@435: is_tlab, duke@435: gc_count_before); duke@435: VMThread::execute(&op); duke@435: if (op.prologue_succeeded()) { duke@435: result = op.result(); duke@435: if (op.gc_locked()) { duke@435: assert(result == NULL, "must be NULL if gc_locked() is true"); duke@435: continue; // retry and/or stall as necessary duke@435: } jmasa@1822: jmasa@1822: // Allocation has failed and a collection jmasa@1822: // has been done. If the gc time limit was exceeded the jmasa@1822: // this time, return NULL so that an out-of-memory jmasa@1822: // will be thrown. Clear gc_overhead_limit_exceeded jmasa@1822: // so that the overhead exceeded does not persist. jmasa@1822: jmasa@1822: const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded(); jmasa@1822: const bool softrefs_clear = all_soft_refs_clear(); jmasa@4743: jmasa@1822: if (limit_exceeded && softrefs_clear) { jmasa@1822: *gc_overhead_limit_was_exceeded = true; jmasa@1822: size_policy()->set_gc_overhead_limit_exceeded(false); jmasa@1822: if (op.result() != NULL) { jmasa@1822: CollectedHeap::fill_with_object(op.result(), size); jmasa@1822: } jmasa@1822: return NULL; jmasa@1822: } duke@435: assert(result == NULL || gch->is_in_reserved(result), duke@435: "result not in heap"); duke@435: return result; duke@435: } duke@435: duke@435: // Give a warning if we seem to be looping forever. duke@435: if ((QueuedAllocationWarningCount > 0) && duke@435: (try_count % QueuedAllocationWarningCount == 0)) { duke@435: warning("TwoGenerationCollectorPolicy::mem_allocate_work retries %d times \n\t" duke@435: " size=%d %s", try_count, size, is_tlab ? "(TLAB)" : ""); duke@435: } duke@435: } duke@435: } duke@435: duke@435: HeapWord* GenCollectorPolicy::expand_heap_and_allocate(size_t size, duke@435: bool is_tlab) { duke@435: GenCollectedHeap *gch = GenCollectedHeap::heap(); duke@435: HeapWord* result = NULL; duke@435: for (int i = number_of_generations() - 1; i >= 0 && result == NULL; i--) { duke@435: Generation *gen = gch->get_gen(i); duke@435: if (gen->should_allocate(size, is_tlab)) { duke@435: result = gen->expand_and_allocate(size, is_tlab); duke@435: } duke@435: } duke@435: assert(result == NULL || gch->is_in_reserved(result), "result not in heap"); duke@435: return result; duke@435: } duke@435: duke@435: HeapWord* GenCollectorPolicy::satisfy_failed_allocation(size_t size, duke@435: bool is_tlab) { duke@435: GenCollectedHeap *gch = GenCollectedHeap::heap(); duke@435: GCCauseSetter x(gch, GCCause::_allocation_failure); duke@435: HeapWord* result = NULL; duke@435: duke@435: assert(size != 0, "Precondition violated"); duke@435: if (GC_locker::is_active_and_needs_gc()) { duke@435: // GC locker is active; instead of a collection we will attempt duke@435: // to expand the heap, if there's room for expansion. duke@435: if (!gch->is_maximal_no_gc()) { duke@435: result = expand_heap_and_allocate(size, is_tlab); duke@435: } duke@435: return result; // could be null if we are out of space ysr@2336: } else if (!gch->incremental_collection_will_fail(false /* don't consult_young */)) { duke@435: // Do an incremental collection. duke@435: gch->do_collection(false /* full */, duke@435: false /* clear_all_soft_refs */, duke@435: size /* size */, duke@435: is_tlab /* is_tlab */, duke@435: number_of_generations() - 1 /* max_level */); duke@435: } else { ysr@2336: if (Verbose && PrintGCDetails) { ysr@2336: gclog_or_tty->print(" :: Trying full because partial may fail :: "); ysr@2336: } duke@435: // Try a full collection; see delta for bug id 6266275 duke@435: // for the original code and why this has been simplified duke@435: // with from-space allocation criteria modified and duke@435: // such allocation moved out of the safepoint path. duke@435: gch->do_collection(true /* full */, duke@435: false /* clear_all_soft_refs */, duke@435: size /* size */, duke@435: is_tlab /* is_tlab */, duke@435: number_of_generations() - 1 /* max_level */); duke@435: } duke@435: duke@435: result = gch->attempt_allocation(size, is_tlab, false /*first_only*/); duke@435: duke@435: if (result != NULL) { duke@435: assert(gch->is_in_reserved(result), "result not in heap"); duke@435: return result; duke@435: } duke@435: duke@435: // OK, collection failed, try expansion. duke@435: result = expand_heap_and_allocate(size, is_tlab); duke@435: if (result != NULL) { duke@435: return result; duke@435: } duke@435: duke@435: // If we reach this point, we're really out of memory. Try every trick duke@435: // we can to reclaim memory. Force collection of soft references. Force duke@435: // a complete compaction of the heap. Any additional methods for finding duke@435: // free memory should be here, especially if they are expensive. If this duke@435: // attempt fails, an OOM exception will be thrown. duke@435: { tschatzl@5119: UIntFlagSetting flag_change(MarkSweepAlwaysCompactCount, 1); // Make sure the heap is fully compacted duke@435: duke@435: gch->do_collection(true /* full */, duke@435: true /* clear_all_soft_refs */, duke@435: size /* size */, duke@435: is_tlab /* is_tlab */, duke@435: number_of_generations() - 1 /* max_level */); duke@435: } duke@435: duke@435: result = gch->attempt_allocation(size, is_tlab, false /* first_only */); duke@435: if (result != NULL) { duke@435: assert(gch->is_in_reserved(result), "result not in heap"); duke@435: return result; duke@435: } duke@435: jmasa@1822: assert(!should_clear_all_soft_refs(), jmasa@1822: "Flag should have been handled and cleared prior to this point"); jmasa@1822: duke@435: // What else? We might try synchronous finalization later. If the total duke@435: // space available is large enough for the allocation, then a more duke@435: // complete compaction phase than we've tried so far might be duke@435: // appropriate. duke@435: return NULL; duke@435: } duke@435: coleenp@4037: MetaWord* CollectorPolicy::satisfy_failed_metadata_allocation( coleenp@4037: ClassLoaderData* loader_data, coleenp@4037: size_t word_size, coleenp@4037: Metaspace::MetadataType mdtype) { coleenp@4037: uint loop_count = 0; coleenp@4037: uint gc_count = 0; coleenp@4037: uint full_gc_count = 0; coleenp@4037: jmasa@4234: assert(!Heap_lock->owned_by_self(), "Should not be holding the Heap_lock"); jmasa@4234: coleenp@4037: do { jmasa@4064: MetaWord* result = NULL; jmasa@4064: if (GC_locker::is_active_and_needs_gc()) { jmasa@4064: // If the GC_locker is active, just expand and allocate. jmasa@4064: // If that does not succeed, wait if this thread is not jmasa@4064: // in a critical section itself. jmasa@4064: result = jmasa@4064: loader_data->metaspace_non_null()->expand_and_allocate(word_size, jmasa@4064: mdtype); jmasa@4064: if (result != NULL) { jmasa@4064: return result; jmasa@4064: } jmasa@4064: JavaThread* jthr = JavaThread::current(); jmasa@4064: if (!jthr->in_critical()) { jmasa@4064: // Wait for JNI critical section to be exited jmasa@4064: GC_locker::stall_until_clear(); jmasa@4064: // The GC invoked by the last thread leaving the critical jmasa@4064: // section will be a young collection and a full collection jmasa@4064: // is (currently) needed for unloading classes so continue jmasa@4064: // to the next iteration to get a full GC. jmasa@4064: continue; jmasa@4064: } else { jmasa@4064: if (CheckJNICalls) { jmasa@4064: fatal("Possible deadlock due to allocating while" jmasa@4064: " in jni critical section"); jmasa@4064: } jmasa@4064: return NULL; jmasa@4064: } jmasa@4064: } jmasa@4064: coleenp@4037: { // Need lock to get self consistent gc_count's coleenp@4037: MutexLocker ml(Heap_lock); coleenp@4037: gc_count = Universe::heap()->total_collections(); coleenp@4037: full_gc_count = Universe::heap()->total_full_collections(); coleenp@4037: } coleenp@4037: coleenp@4037: // Generate a VM operation coleenp@4037: VM_CollectForMetadataAllocation op(loader_data, coleenp@4037: word_size, coleenp@4037: mdtype, coleenp@4037: gc_count, coleenp@4037: full_gc_count, coleenp@4037: GCCause::_metadata_GC_threshold); coleenp@4037: VMThread::execute(&op); jmasa@4382: jmasa@4382: // If GC was locked out, try again. Check jmasa@4382: // before checking success because the prologue jmasa@4382: // could have succeeded and the GC still have jmasa@4382: // been locked out. jmasa@4382: if (op.gc_locked()) { jmasa@4382: continue; jmasa@4382: } jmasa@4382: coleenp@4037: if (op.prologue_succeeded()) { coleenp@4037: return op.result(); coleenp@4037: } coleenp@4037: loop_count++; coleenp@4037: if ((QueuedAllocationWarningCount > 0) && coleenp@4037: (loop_count % QueuedAllocationWarningCount == 0)) { coleenp@4037: warning("satisfy_failed_metadata_allocation() retries %d times \n\t" coleenp@4037: " size=%d", loop_count, word_size); coleenp@4037: } coleenp@4037: } while (true); // Until a GC is done coleenp@4037: } coleenp@4037: duke@435: // Return true if any of the following is true: duke@435: // . the allocation won't fit into the current young gen heap duke@435: // . gc locker is occupied (jni critical section) duke@435: // . heap memory is tight -- the most recent previous collection duke@435: // was a full collection because a partial collection (would duke@435: // have) failed and is likely to fail again duke@435: bool GenCollectorPolicy::should_try_older_generation_allocation( duke@435: size_t word_size) const { duke@435: GenCollectedHeap* gch = GenCollectedHeap::heap(); duke@435: size_t gen0_capacity = gch->get_gen(0)->capacity_before_gc(); duke@435: return (word_size > heap_word_size(gen0_capacity)) ysr@2243: || GC_locker::is_active_and_needs_gc() ysr@2243: || gch->incremental_collection_failed(); duke@435: } duke@435: duke@435: duke@435: // duke@435: // MarkSweepPolicy methods duke@435: // duke@435: duke@435: MarkSweepPolicy::MarkSweepPolicy() { duke@435: initialize_all(); duke@435: } duke@435: duke@435: void MarkSweepPolicy::initialize_generations() { minqi@5103: _generations = NEW_C_HEAP_ARRAY3(GenerationSpecPtr, number_of_generations(), mtGC, 0, AllocFailStrategy::RETURN_NULL); duke@435: if (_generations == NULL) duke@435: vm_exit_during_initialization("Unable to allocate gen spec"); duke@435: brutisso@4387: if (UseParNewGC) { duke@435: _generations[0] = new GenerationSpec(Generation::ParNew, _initial_gen0_size, _max_gen0_size); duke@435: } else { duke@435: _generations[0] = new GenerationSpec(Generation::DefNew, _initial_gen0_size, _max_gen0_size); duke@435: } duke@435: _generations[1] = new GenerationSpec(Generation::MarkSweepCompact, _initial_gen1_size, _max_gen1_size); duke@435: duke@435: if (_generations[0] == NULL || _generations[1] == NULL) duke@435: vm_exit_during_initialization("Unable to allocate gen spec"); duke@435: } duke@435: duke@435: void MarkSweepPolicy::initialize_gc_policy_counters() { duke@435: // initialize the policy counters - 2 collectors, 3 generations brutisso@4387: if (UseParNewGC) { duke@435: _gc_policy_counters = new GCPolicyCounters("ParNew:MSC", 2, 3); brutisso@4387: } else { duke@435: _gc_policy_counters = new GCPolicyCounters("Copy:MSC", 2, 3); duke@435: } duke@435: }