duke@435: /* jwilhelm@4554: * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. duke@435: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. duke@435: * duke@435: * This code is free software; you can redistribute it and/or modify it duke@435: * under the terms of the GNU General Public License version 2 only, as duke@435: * published by the Free Software Foundation. duke@435: * duke@435: * This code is distributed in the hope that it will be useful, but WITHOUT duke@435: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or duke@435: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License duke@435: * version 2 for more details (a copy is included in the LICENSE file that duke@435: * accompanied this code). duke@435: * duke@435: * You should have received a copy of the GNU General Public License version duke@435: * 2 along with this work; if not, write to the Free Software Foundation, duke@435: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. duke@435: * trims@1907: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA trims@1907: * or visit www.oracle.com if you need additional information or have any trims@1907: * questions. duke@435: * duke@435: */ duke@435: stefank@2314: #ifndef SHARE_VM_MEMORY_COLLECTORPOLICY_HPP stefank@2314: #define SHARE_VM_MEMORY_COLLECTORPOLICY_HPP stefank@2314: coleenp@4037: #include "memory/allocation.hpp" stefank@2314: #include "memory/barrierSet.hpp" coleenp@4037: #include "memory/generationSpec.hpp" stefank@2314: #include "memory/genRemSet.hpp" jprovino@4542: #include "utilities/macros.hpp" stefank@2314: duke@435: // This class (or more correctly, subtypes of this class) duke@435: // are used to define global garbage collector attributes. duke@435: // This includes initialization of generations and any other duke@435: // shared resources they may need. duke@435: // duke@435: // In general, all flag adjustment and validation should be duke@435: // done in initialize_flags(), which is called prior to duke@435: // initialize_size_info(). duke@435: // duke@435: // This class is not fully developed yet. As more collector(s) duke@435: // are added, it is expected that we will come across further duke@435: // behavior that requires global attention. The correct place duke@435: // to deal with those issues is this class. duke@435: duke@435: // Forward declarations. duke@435: class GenCollectorPolicy; duke@435: class TwoGenerationCollectorPolicy; ysr@777: class AdaptiveSizePolicy; jprovino@4542: #if INCLUDE_ALL_GCS duke@435: class ConcurrentMarkSweepPolicy; ysr@777: class G1CollectorPolicy; jprovino@4542: #endif // INCLUDE_ALL_GCS ysr@777: duke@435: class GCPolicyCounters; duke@435: class MarkSweepPolicy; duke@435: zgu@3900: class CollectorPolicy : public CHeapObj { duke@435: protected: duke@435: GCPolicyCounters* _gc_policy_counters; duke@435: jwilhelm@6085: virtual void initialize_alignments() = 0; duke@435: virtual void initialize_flags(); ysr@777: virtual void initialize_size_info(); duke@435: jwilhelm@6085: DEBUG_ONLY(virtual void assert_flags();) jwilhelm@6085: DEBUG_ONLY(virtual void assert_size_info();) jwilhelm@6085: duke@435: size_t _initial_heap_byte_size; duke@435: size_t _max_heap_byte_size; duke@435: size_t _min_heap_byte_size; duke@435: jwilhelm@6085: size_t _space_alignment; jwilhelm@6085: size_t _heap_alignment; jwilhelm@6085: jwilhelm@6085: // Needed to keep information if MaxHeapSize was set on the command line jwilhelm@6085: // when the flag value is aligned etc by ergonomics jwilhelm@6085: bool _max_heap_size_cmdline; duke@435: jmasa@1822: // The sizing of the heap are controlled by a sizing policy. jmasa@1822: AdaptiveSizePolicy* _size_policy; jmasa@1822: jmasa@1822: // Set to true when policy wants soft refs cleared. jmasa@1822: // Reset to false by gc after it clears all soft refs. jmasa@1822: bool _should_clear_all_soft_refs; jwilhelm@6084: jmasa@1822: // Set to true by the GC if the just-completed gc cleared all jmasa@1822: // softrefs. This is set to true whenever a gc clears all softrefs, and jmasa@1822: // set to false each time gc returns to the mutator. For example, in the jmasa@1822: // ParallelScavengeHeap case the latter would be done toward the end of jmasa@1822: // mem_allocate() where it returns op.result() jmasa@1822: bool _all_soft_refs_clear; jmasa@1822: jwilhelm@6085: CollectorPolicy(); duke@435: duke@435: public: jwilhelm@6085: virtual void initialize_all() { jwilhelm@6085: initialize_alignments(); jwilhelm@6085: initialize_flags(); jwilhelm@6085: initialize_size_info(); jwilhelm@6085: } jwilhelm@6085: tschatzl@5701: // Return maximum heap alignment that may be imposed by the policy jwilhelm@6085: static size_t compute_heap_alignment(); tschatzl@5701: jwilhelm@6085: size_t space_alignment() { return _space_alignment; } jwilhelm@6085: size_t heap_alignment() { return _heap_alignment; } duke@435: duke@435: size_t initial_heap_byte_size() { return _initial_heap_byte_size; } duke@435: size_t max_heap_byte_size() { return _max_heap_byte_size; } duke@435: size_t min_heap_byte_size() { return _min_heap_byte_size; } duke@435: duke@435: enum Name { duke@435: CollectorPolicyKind, duke@435: TwoGenerationCollectorPolicyKind, duke@435: ConcurrentMarkSweepPolicyKind, ysr@777: ASConcurrentMarkSweepPolicyKind, ysr@777: G1CollectorPolicyKind duke@435: }; duke@435: jmasa@1822: AdaptiveSizePolicy* size_policy() { return _size_policy; } jmasa@1822: bool should_clear_all_soft_refs() { return _should_clear_all_soft_refs; } jmasa@1822: void set_should_clear_all_soft_refs(bool v) { _should_clear_all_soft_refs = v; } jmasa@1822: // Returns the current value of _should_clear_all_soft_refs. jmasa@1822: // _should_clear_all_soft_refs is set to false as a side effect. jmasa@1822: bool use_should_clear_all_soft_refs(bool v); jmasa@1822: bool all_soft_refs_clear() { return _all_soft_refs_clear; } jmasa@1822: void set_all_soft_refs_clear(bool v) { _all_soft_refs_clear = v; } jmasa@1822: jmasa@1822: // Called by the GC after Soft Refs have been cleared to indicate jmasa@1822: // that the request in _should_clear_all_soft_refs has been fulfilled. jmasa@1822: void cleared_all_soft_refs(); jmasa@1822: duke@435: // Identification methods. ysr@777: virtual GenCollectorPolicy* as_generation_policy() { return NULL; } duke@435: virtual TwoGenerationCollectorPolicy* as_two_generation_policy() { return NULL; } duke@435: virtual MarkSweepPolicy* as_mark_sweep_policy() { return NULL; } jprovino@4542: #if INCLUDE_ALL_GCS duke@435: virtual ConcurrentMarkSweepPolicy* as_concurrent_mark_sweep_policy() { return NULL; } ysr@777: virtual G1CollectorPolicy* as_g1_policy() { return NULL; } jprovino@4542: #endif // INCLUDE_ALL_GCS duke@435: // Note that these are not virtual. duke@435: bool is_generation_policy() { return as_generation_policy() != NULL; } duke@435: bool is_two_generation_policy() { return as_two_generation_policy() != NULL; } duke@435: bool is_mark_sweep_policy() { return as_mark_sweep_policy() != NULL; } jprovino@4542: #if INCLUDE_ALL_GCS duke@435: bool is_concurrent_mark_sweep_policy() { return as_concurrent_mark_sweep_policy() != NULL; } ysr@777: bool is_g1_policy() { return as_g1_policy() != NULL; } jprovino@4542: #else // INCLUDE_ALL_GCS duke@435: bool is_concurrent_mark_sweep_policy() { return false; } ysr@777: bool is_g1_policy() { return false; } jprovino@4542: #endif // INCLUDE_ALL_GCS duke@435: ysr@777: duke@435: virtual BarrierSet::Name barrier_set_name() = 0; duke@435: duke@435: // Create the remembered set (to cover the given reserved region, duke@435: // allowing breaking up into at most "max_covered_regions"). duke@435: virtual GenRemSet* create_rem_set(MemRegion reserved, duke@435: int max_covered_regions); duke@435: duke@435: // This method controls how a collector satisfies a request duke@435: // for a block of memory. "gc_time_limit_was_exceeded" will duke@435: // be set to true if the adaptive size policy determine that duke@435: // an excessive amount of time is being spent doing collections duke@435: // and caused a NULL to be returned. If a NULL is not returned, duke@435: // "gc_time_limit_was_exceeded" has an undefined meaning. duke@435: virtual HeapWord* mem_allocate_work(size_t size, duke@435: bool is_tlab, duke@435: bool* gc_overhead_limit_was_exceeded) = 0; duke@435: duke@435: // This method controls how a collector handles one or more duke@435: // of its generations being fully allocated. duke@435: virtual HeapWord *satisfy_failed_allocation(size_t size, bool is_tlab) = 0; coleenp@4037: // This method controls how a collector handles a metadata allocation coleenp@4037: // failure. coleenp@4037: virtual MetaWord* satisfy_failed_metadata_allocation(ClassLoaderData* loader_data, coleenp@4037: size_t size, coleenp@4037: Metaspace::MetadataType mdtype); coleenp@4037: duke@435: // Performace Counter support duke@435: GCPolicyCounters* counters() { return _gc_policy_counters; } duke@435: duke@435: // Create the jstat counters for the GC policy. By default, policy's duke@435: // don't have associated counters, and we complain if this is invoked. duke@435: virtual void initialize_gc_policy_counters() { duke@435: ShouldNotReachHere(); duke@435: } duke@435: duke@435: virtual CollectorPolicy::Name kind() { duke@435: return CollectorPolicy::CollectorPolicyKind; duke@435: } duke@435: duke@435: // Returns true if a collector has eden space with soft end. duke@435: virtual bool has_soft_ended_eden() { duke@435: return false; duke@435: } duke@435: jwilhelm@6085: // Do any updates required to global flags that are due to heap initialization jwilhelm@6085: // changes jwilhelm@6085: virtual void post_heap_initialize() = 0; duke@435: }; duke@435: jmasa@1822: class ClearedAllSoftRefs : public StackObj { jmasa@1822: bool _clear_all_soft_refs; jmasa@1822: CollectorPolicy* _collector_policy; jmasa@1822: public: jmasa@1822: ClearedAllSoftRefs(bool clear_all_soft_refs, jmasa@1822: CollectorPolicy* collector_policy) : jmasa@1822: _clear_all_soft_refs(clear_all_soft_refs), jmasa@1822: _collector_policy(collector_policy) {} jmasa@1822: jmasa@1822: ~ClearedAllSoftRefs() { jmasa@1822: if (_clear_all_soft_refs) { jmasa@1822: _collector_policy->cleared_all_soft_refs(); jmasa@1822: } jmasa@1822: } jmasa@1822: }; jmasa@1822: duke@435: class GenCollectorPolicy : public CollectorPolicy { sjohanss@6641: friend class TestGenCollectorPolicy; duke@435: protected: duke@435: size_t _min_gen0_size; duke@435: size_t _initial_gen0_size; duke@435: size_t _max_gen0_size; duke@435: jwilhelm@6085: // _gen_alignment and _space_alignment will have the same value most of the jwilhelm@6085: // time. When using large pages they can differ. jwilhelm@6085: size_t _gen_alignment; jwilhelm@6085: duke@435: GenerationSpec **_generations; duke@435: duke@435: // Return true if an allocation should be attempted in the older duke@435: // generation if it fails in the younger generation. Return duke@435: // false, otherwise. duke@435: virtual bool should_try_older_generation_allocation(size_t word_size) const; duke@435: duke@435: void initialize_flags(); duke@435: void initialize_size_info(); duke@435: jwilhelm@6085: DEBUG_ONLY(void assert_flags();) jwilhelm@6085: DEBUG_ONLY(void assert_size_info();) jwilhelm@6085: duke@435: // Try to allocate space by expanding the heap. duke@435: virtual HeapWord* expand_heap_and_allocate(size_t size, bool is_tlab); duke@435: jwilhelm@6085: // Compute max heap alignment jwilhelm@6085: size_t compute_max_alignment(); jwilhelm@6085: jwilhelm@6085: // Scale the base_size by NewRatio according to jmasa@448: // result = base_size / (NewRatio + 1) jmasa@448: // and align by min_alignment() jmasa@448: size_t scale_by_NewRatio_aligned(size_t base_size); jmasa@448: jwilhelm@6085: // Bound the value by the given maximum minus the min_alignment jmasa@448: size_t bound_minus_alignment(size_t desired_size, size_t maximum_size); duke@435: duke@435: public: jwilhelm@6085: GenCollectorPolicy(); jwilhelm@6085: jmasa@448: // Accessors jwilhelm@5855: size_t min_gen0_size() { return _min_gen0_size; } jmasa@448: size_t initial_gen0_size() { return _initial_gen0_size; } jwilhelm@5855: size_t max_gen0_size() { return _max_gen0_size; } jwilhelm@6085: size_t gen_alignment() { return _gen_alignment; } jmasa@448: duke@435: virtual int number_of_generations() = 0; duke@435: jwilhelm@6084: virtual GenerationSpec **generations() { duke@435: assert(_generations != NULL, "Sanity check"); duke@435: return _generations; duke@435: } duke@435: duke@435: virtual GenCollectorPolicy* as_generation_policy() { return this; } duke@435: jwilhelm@6085: virtual void initialize_generations() { }; duke@435: duke@435: virtual void initialize_all() { jwilhelm@6085: CollectorPolicy::initialize_all(); duke@435: initialize_generations(); duke@435: } duke@435: jwilhelm@6085: size_t young_gen_size_lower_bound(); jwilhelm@6085: duke@435: HeapWord* mem_allocate_work(size_t size, duke@435: bool is_tlab, duke@435: bool* gc_overhead_limit_was_exceeded); duke@435: duke@435: HeapWord *satisfy_failed_allocation(size_t size, bool is_tlab); duke@435: duke@435: // Adaptive size policy duke@435: virtual void initialize_size_policy(size_t init_eden_size, duke@435: size_t init_promo_size, duke@435: size_t init_survivor_size); jwilhelm@6084: jwilhelm@6085: virtual void post_heap_initialize() { jwilhelm@6085: assert(_max_gen0_size == MaxNewSize, "Should be taken care of by initialize_size_info"); jwilhelm@6084: } duke@435: }; duke@435: duke@435: // All of hotspot's current collectors are subtypes of this duke@435: // class. Currently, these collectors all use the same gen[0], duke@435: // but have different gen[1] types. If we add another subtype duke@435: // of CollectorPolicy, this class should be broken out into duke@435: // its own file. duke@435: duke@435: class TwoGenerationCollectorPolicy : public GenCollectorPolicy { duke@435: protected: duke@435: size_t _min_gen1_size; duke@435: size_t _initial_gen1_size; duke@435: size_t _max_gen1_size; duke@435: duke@435: void initialize_flags(); duke@435: void initialize_size_info(); jwilhelm@6085: jwilhelm@6085: DEBUG_ONLY(void assert_flags();) jwilhelm@6085: DEBUG_ONLY(void assert_size_info();) duke@435: duke@435: public: jwilhelm@6085: TwoGenerationCollectorPolicy() : GenCollectorPolicy(), _min_gen1_size(0), jwilhelm@6085: _initial_gen1_size(0), _max_gen1_size(0) {} jwilhelm@6085: jmasa@448: // Accessors jwilhelm@5855: size_t min_gen1_size() { return _min_gen1_size; } jmasa@448: size_t initial_gen1_size() { return _initial_gen1_size; } jwilhelm@5855: size_t max_gen1_size() { return _max_gen1_size; } jmasa@448: duke@435: // Inherited methods duke@435: TwoGenerationCollectorPolicy* as_two_generation_policy() { return this; } duke@435: jwilhelm@6084: int number_of_generations() { return 2; } jwilhelm@6084: BarrierSet::Name barrier_set_name() { return BarrierSet::CardTableModRef; } duke@435: duke@435: virtual CollectorPolicy::Name kind() { duke@435: return CollectorPolicy::TwoGenerationCollectorPolicyKind; duke@435: } jmasa@448: jwilhelm@6091: // Returns true if gen0 sizes were adjusted jmasa@448: bool adjust_gen0_sizes(size_t* gen0_size_ptr, size_t* gen1_size_ptr, jwilhelm@6091: const size_t heap_size); duke@435: }; duke@435: duke@435: class MarkSweepPolicy : public TwoGenerationCollectorPolicy { duke@435: protected: jwilhelm@6085: void initialize_alignments(); duke@435: void initialize_generations(); duke@435: duke@435: public: jwilhelm@6085: MarkSweepPolicy() {} duke@435: duke@435: MarkSweepPolicy* as_mark_sweep_policy() { return this; } duke@435: duke@435: void initialize_gc_policy_counters(); duke@435: }; stefank@2314: stefank@2314: #endif // SHARE_VM_MEMORY_COLLECTORPOLICY_HPP