duke@435: /* tonyp@2971: * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. duke@435: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. duke@435: * duke@435: * This code is free software; you can redistribute it and/or modify it duke@435: * under the terms of the GNU General Public License version 2 only, as duke@435: * published by the Free Software Foundation. duke@435: * duke@435: * This code is distributed in the hope that it will be useful, but WITHOUT duke@435: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or duke@435: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License duke@435: * version 2 for more details (a copy is included in the LICENSE file that duke@435: * accompanied this code). duke@435: * duke@435: * You should have received a copy of the GNU General Public License version duke@435: * 2 along with this work; if not, write to the Free Software Foundation, duke@435: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. duke@435: * trims@1907: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA trims@1907: * or visit www.oracle.com if you need additional information or have any trims@1907: * questions. duke@435: * duke@435: */ duke@435: stefank@2314: #ifndef SHARE_VM_MEMORY_COLLECTORPOLICY_HPP stefank@2314: #define SHARE_VM_MEMORY_COLLECTORPOLICY_HPP stefank@2314: stefank@2314: #include "memory/barrierSet.hpp" stefank@2314: #include "memory/genRemSet.hpp" stefank@2314: #include "memory/permGen.hpp" stefank@2314: duke@435: // This class (or more correctly, subtypes of this class) duke@435: // are used to define global garbage collector attributes. duke@435: // This includes initialization of generations and any other duke@435: // shared resources they may need. duke@435: // duke@435: // In general, all flag adjustment and validation should be duke@435: // done in initialize_flags(), which is called prior to duke@435: // initialize_size_info(). duke@435: // duke@435: // This class is not fully developed yet. As more collector(s) duke@435: // are added, it is expected that we will come across further duke@435: // behavior that requires global attention. The correct place duke@435: // to deal with those issues is this class. duke@435: duke@435: // Forward declarations. duke@435: class GenCollectorPolicy; duke@435: class TwoGenerationCollectorPolicy; ysr@777: class AdaptiveSizePolicy; duke@435: #ifndef SERIALGC duke@435: class ConcurrentMarkSweepPolicy; ysr@777: class G1CollectorPolicy; duke@435: #endif // SERIALGC ysr@777: duke@435: class GCPolicyCounters; duke@435: class PermanentGenerationSpec; duke@435: class MarkSweepPolicy; duke@435: duke@435: class CollectorPolicy : public CHeapObj { duke@435: protected: duke@435: PermanentGenerationSpec *_permanent_generation; duke@435: GCPolicyCounters* _gc_policy_counters; duke@435: duke@435: // Requires that the concrete subclass sets the alignment constraints duke@435: // before calling. duke@435: virtual void initialize_flags(); ysr@777: virtual void initialize_size_info(); duke@435: // Initialize "_permanent_generation" to a spec for the given kind of duke@435: // Perm Gen. duke@435: void initialize_perm_generation(PermGen::Name pgnm); duke@435: duke@435: size_t _initial_heap_byte_size; duke@435: size_t _max_heap_byte_size; duke@435: size_t _min_heap_byte_size; duke@435: duke@435: size_t _min_alignment; duke@435: size_t _max_alignment; duke@435: jmasa@1822: // The sizing of the heap are controlled by a sizing policy. jmasa@1822: AdaptiveSizePolicy* _size_policy; jmasa@1822: jmasa@1822: // Set to true when policy wants soft refs cleared. jmasa@1822: // Reset to false by gc after it clears all soft refs. jmasa@1822: bool _should_clear_all_soft_refs; jmasa@1822: // Set to true by the GC if the just-completed gc cleared all jmasa@1822: // softrefs. This is set to true whenever a gc clears all softrefs, and jmasa@1822: // set to false each time gc returns to the mutator. For example, in the jmasa@1822: // ParallelScavengeHeap case the latter would be done toward the end of jmasa@1822: // mem_allocate() where it returns op.result() jmasa@1822: bool _all_soft_refs_clear; jmasa@1822: duke@435: CollectorPolicy() : duke@435: _min_alignment(1), duke@435: _max_alignment(1), duke@435: _initial_heap_byte_size(0), duke@435: _max_heap_byte_size(0), jmasa@1822: _min_heap_byte_size(0), jmasa@1822: _size_policy(NULL), jmasa@1822: _should_clear_all_soft_refs(false), jmasa@1822: _all_soft_refs_clear(false) duke@435: {} duke@435: duke@435: public: duke@435: void set_min_alignment(size_t align) { _min_alignment = align; } duke@435: size_t min_alignment() { return _min_alignment; } duke@435: void set_max_alignment(size_t align) { _max_alignment = align; } duke@435: size_t max_alignment() { return _max_alignment; } duke@435: duke@435: size_t initial_heap_byte_size() { return _initial_heap_byte_size; } jmasa@448: void set_initial_heap_byte_size(size_t v) { _initial_heap_byte_size = v; } duke@435: size_t max_heap_byte_size() { return _max_heap_byte_size; } jmasa@448: void set_max_heap_byte_size(size_t v) { _max_heap_byte_size = v; } duke@435: size_t min_heap_byte_size() { return _min_heap_byte_size; } jmasa@448: void set_min_heap_byte_size(size_t v) { _min_heap_byte_size = v; } duke@435: duke@435: enum Name { duke@435: CollectorPolicyKind, duke@435: TwoGenerationCollectorPolicyKind, duke@435: ConcurrentMarkSweepPolicyKind, ysr@777: ASConcurrentMarkSweepPolicyKind, ysr@777: G1CollectorPolicyKind duke@435: }; duke@435: jmasa@1822: AdaptiveSizePolicy* size_policy() { return _size_policy; } jmasa@1822: bool should_clear_all_soft_refs() { return _should_clear_all_soft_refs; } jmasa@1822: void set_should_clear_all_soft_refs(bool v) { _should_clear_all_soft_refs = v; } jmasa@1822: // Returns the current value of _should_clear_all_soft_refs. jmasa@1822: // _should_clear_all_soft_refs is set to false as a side effect. jmasa@1822: bool use_should_clear_all_soft_refs(bool v); jmasa@1822: bool all_soft_refs_clear() { return _all_soft_refs_clear; } jmasa@1822: void set_all_soft_refs_clear(bool v) { _all_soft_refs_clear = v; } jmasa@1822: jmasa@1822: // Called by the GC after Soft Refs have been cleared to indicate jmasa@1822: // that the request in _should_clear_all_soft_refs has been fulfilled. jmasa@1822: void cleared_all_soft_refs(); jmasa@1822: duke@435: // Identification methods. ysr@777: virtual GenCollectorPolicy* as_generation_policy() { return NULL; } duke@435: virtual TwoGenerationCollectorPolicy* as_two_generation_policy() { return NULL; } duke@435: virtual MarkSweepPolicy* as_mark_sweep_policy() { return NULL; } duke@435: #ifndef SERIALGC duke@435: virtual ConcurrentMarkSweepPolicy* as_concurrent_mark_sweep_policy() { return NULL; } ysr@777: virtual G1CollectorPolicy* as_g1_policy() { return NULL; } duke@435: #endif // SERIALGC duke@435: // Note that these are not virtual. duke@435: bool is_generation_policy() { return as_generation_policy() != NULL; } duke@435: bool is_two_generation_policy() { return as_two_generation_policy() != NULL; } duke@435: bool is_mark_sweep_policy() { return as_mark_sweep_policy() != NULL; } duke@435: #ifndef SERIALGC duke@435: bool is_concurrent_mark_sweep_policy() { return as_concurrent_mark_sweep_policy() != NULL; } ysr@777: bool is_g1_policy() { return as_g1_policy() != NULL; } duke@435: #else // SERIALGC duke@435: bool is_concurrent_mark_sweep_policy() { return false; } ysr@777: bool is_g1_policy() { return false; } duke@435: #endif // SERIALGC duke@435: ysr@777: duke@435: virtual PermanentGenerationSpec *permanent_generation() { duke@435: assert(_permanent_generation != NULL, "Sanity check"); duke@435: return _permanent_generation; duke@435: } duke@435: duke@435: virtual BarrierSet::Name barrier_set_name() = 0; duke@435: virtual GenRemSet::Name rem_set_name() = 0; duke@435: duke@435: // Create the remembered set (to cover the given reserved region, duke@435: // allowing breaking up into at most "max_covered_regions"). duke@435: virtual GenRemSet* create_rem_set(MemRegion reserved, duke@435: int max_covered_regions); duke@435: duke@435: // This method controls how a collector satisfies a request duke@435: // for a block of memory. "gc_time_limit_was_exceeded" will duke@435: // be set to true if the adaptive size policy determine that duke@435: // an excessive amount of time is being spent doing collections duke@435: // and caused a NULL to be returned. If a NULL is not returned, duke@435: // "gc_time_limit_was_exceeded" has an undefined meaning. duke@435: virtual HeapWord* mem_allocate_work(size_t size, duke@435: bool is_tlab, duke@435: bool* gc_overhead_limit_was_exceeded) = 0; duke@435: duke@435: // This method controls how a collector handles one or more duke@435: // of its generations being fully allocated. duke@435: virtual HeapWord *satisfy_failed_allocation(size_t size, bool is_tlab) = 0; duke@435: // Performace Counter support duke@435: GCPolicyCounters* counters() { return _gc_policy_counters; } duke@435: duke@435: // Create the jstat counters for the GC policy. By default, policy's duke@435: // don't have associated counters, and we complain if this is invoked. duke@435: virtual void initialize_gc_policy_counters() { duke@435: ShouldNotReachHere(); duke@435: } duke@435: duke@435: virtual CollectorPolicy::Name kind() { duke@435: return CollectorPolicy::CollectorPolicyKind; duke@435: } duke@435: duke@435: // Returns true if a collector has eden space with soft end. duke@435: virtual bool has_soft_ended_eden() { duke@435: return false; duke@435: } duke@435: duke@435: }; duke@435: jmasa@1822: class ClearedAllSoftRefs : public StackObj { jmasa@1822: bool _clear_all_soft_refs; jmasa@1822: CollectorPolicy* _collector_policy; jmasa@1822: public: jmasa@1822: ClearedAllSoftRefs(bool clear_all_soft_refs, jmasa@1822: CollectorPolicy* collector_policy) : jmasa@1822: _clear_all_soft_refs(clear_all_soft_refs), jmasa@1822: _collector_policy(collector_policy) {} jmasa@1822: jmasa@1822: ~ClearedAllSoftRefs() { jmasa@1822: if (_clear_all_soft_refs) { jmasa@1822: _collector_policy->cleared_all_soft_refs(); jmasa@1822: } jmasa@1822: } jmasa@1822: }; jmasa@1822: duke@435: class GenCollectorPolicy : public CollectorPolicy { duke@435: protected: duke@435: size_t _min_gen0_size; duke@435: size_t _initial_gen0_size; duke@435: size_t _max_gen0_size; duke@435: duke@435: GenerationSpec **_generations; duke@435: duke@435: // Return true if an allocation should be attempted in the older duke@435: // generation if it fails in the younger generation. Return duke@435: // false, otherwise. duke@435: virtual bool should_try_older_generation_allocation(size_t word_size) const; duke@435: duke@435: void initialize_flags(); duke@435: void initialize_size_info(); duke@435: duke@435: // Try to allocate space by expanding the heap. duke@435: virtual HeapWord* expand_heap_and_allocate(size_t size, bool is_tlab); duke@435: duke@435: // compute max heap alignment duke@435: size_t compute_max_alignment(); duke@435: jmasa@448: // Scale the base_size by NewRation according to jmasa@448: // result = base_size / (NewRatio + 1) jmasa@448: // and align by min_alignment() jmasa@448: size_t scale_by_NewRatio_aligned(size_t base_size); jmasa@448: jmasa@448: // Bound the value by the given maximum minus the jmasa@448: // min_alignment. jmasa@448: size_t bound_minus_alignment(size_t desired_size, size_t maximum_size); duke@435: duke@435: public: jmasa@448: // Accessors jmasa@448: size_t min_gen0_size() { return _min_gen0_size; } jmasa@448: void set_min_gen0_size(size_t v) { _min_gen0_size = v; } jmasa@448: size_t initial_gen0_size() { return _initial_gen0_size; } jmasa@448: void set_initial_gen0_size(size_t v) { _initial_gen0_size = v; } jmasa@448: size_t max_gen0_size() { return _max_gen0_size; } jmasa@448: void set_max_gen0_size(size_t v) { _max_gen0_size = v; } jmasa@448: duke@435: virtual int number_of_generations() = 0; duke@435: duke@435: virtual GenerationSpec **generations() { duke@435: assert(_generations != NULL, "Sanity check"); duke@435: return _generations; duke@435: } duke@435: duke@435: virtual GenCollectorPolicy* as_generation_policy() { return this; } duke@435: duke@435: virtual void initialize_generations() = 0; duke@435: duke@435: virtual void initialize_all() { duke@435: initialize_flags(); duke@435: initialize_size_info(); duke@435: initialize_generations(); duke@435: } duke@435: duke@435: HeapWord* mem_allocate_work(size_t size, duke@435: bool is_tlab, duke@435: bool* gc_overhead_limit_was_exceeded); duke@435: duke@435: HeapWord *satisfy_failed_allocation(size_t size, bool is_tlab); duke@435: duke@435: // Adaptive size policy duke@435: virtual void initialize_size_policy(size_t init_eden_size, duke@435: size_t init_promo_size, duke@435: size_t init_survivor_size); duke@435: }; duke@435: duke@435: // All of hotspot's current collectors are subtypes of this duke@435: // class. Currently, these collectors all use the same gen[0], duke@435: // but have different gen[1] types. If we add another subtype duke@435: // of CollectorPolicy, this class should be broken out into duke@435: // its own file. duke@435: duke@435: class TwoGenerationCollectorPolicy : public GenCollectorPolicy { duke@435: protected: duke@435: size_t _min_gen1_size; duke@435: size_t _initial_gen1_size; duke@435: size_t _max_gen1_size; duke@435: duke@435: void initialize_flags(); duke@435: void initialize_size_info(); duke@435: void initialize_generations() { ShouldNotReachHere(); } duke@435: duke@435: public: jmasa@448: // Accessors jmasa@448: size_t min_gen1_size() { return _min_gen1_size; } jmasa@448: void set_min_gen1_size(size_t v) { _min_gen1_size = v; } jmasa@448: size_t initial_gen1_size() { return _initial_gen1_size; } jmasa@448: void set_initial_gen1_size(size_t v) { _initial_gen1_size = v; } jmasa@448: size_t max_gen1_size() { return _max_gen1_size; } jmasa@448: void set_max_gen1_size(size_t v) { _max_gen1_size = v; } jmasa@448: duke@435: // Inherited methods duke@435: TwoGenerationCollectorPolicy* as_two_generation_policy() { return this; } duke@435: duke@435: int number_of_generations() { return 2; } duke@435: BarrierSet::Name barrier_set_name() { return BarrierSet::CardTableModRef; } duke@435: GenRemSet::Name rem_set_name() { return GenRemSet::CardTable; } duke@435: duke@435: virtual CollectorPolicy::Name kind() { duke@435: return CollectorPolicy::TwoGenerationCollectorPolicyKind; duke@435: } jmasa@448: jmasa@448: // Returns true is gen0 sizes were adjusted jmasa@448: bool adjust_gen0_sizes(size_t* gen0_size_ptr, size_t* gen1_size_ptr, jmasa@448: size_t heap_size, size_t min_gen1_size); duke@435: }; duke@435: duke@435: class MarkSweepPolicy : public TwoGenerationCollectorPolicy { duke@435: protected: duke@435: void initialize_generations(); duke@435: duke@435: public: duke@435: MarkSweepPolicy(); duke@435: duke@435: MarkSweepPolicy* as_mark_sweep_policy() { return this; } duke@435: duke@435: void initialize_gc_policy_counters(); duke@435: }; stefank@2314: stefank@2314: #endif // SHARE_VM_MEMORY_COLLECTORPOLICY_HPP