duke@435: /* jcoomes@3541: * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. duke@435: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. duke@435: * duke@435: * This code is free software; you can redistribute it and/or modify it duke@435: * under the terms of the GNU General Public License version 2 only, as duke@435: * published by the Free Software Foundation. duke@435: * duke@435: * This code is distributed in the hope that it will be useful, but WITHOUT duke@435: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or duke@435: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License duke@435: * version 2 for more details (a copy is included in the LICENSE file that duke@435: * accompanied this code). duke@435: * duke@435: * You should have received a copy of the GNU General Public License version duke@435: * 2 along with this work; if not, write to the Free Software Foundation, duke@435: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. duke@435: * trims@1907: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA trims@1907: * or visit www.oracle.com if you need additional information or have any trims@1907: * questions. duke@435: * duke@435: */ duke@435: stefank@2314: #ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PARALLELSCAVENGEHEAP_HPP stefank@2314: #define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PARALLELSCAVENGEHEAP_HPP stefank@2314: stefank@2314: #include "gc_implementation/parallelScavenge/objectStartArray.hpp" stefank@2314: #include "gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.hpp" stefank@2314: #include "gc_implementation/parallelScavenge/psOldGen.hpp" stefank@2314: #include "gc_implementation/parallelScavenge/psYoungGen.hpp" stefank@2314: #include "gc_implementation/shared/gcPolicyCounters.hpp" stefank@2314: #include "gc_interface/collectedHeap.inline.hpp" stefank@2314: #include "utilities/ostream.hpp" stefank@2314: duke@435: class AdjoiningGenerations; duke@435: class GCTaskManager; duke@435: class PSAdaptiveSizePolicy; jmasa@1822: class GenerationSizer; jmasa@1822: class CollectorPolicy; duke@435: duke@435: class ParallelScavengeHeap : public CollectedHeap { duke@435: friend class VMStructs; duke@435: private: duke@435: static PSYoungGen* _young_gen; duke@435: static PSOldGen* _old_gen; duke@435: duke@435: // Sizing policy for entire heap duke@435: static PSAdaptiveSizePolicy* _size_policy; duke@435: static PSGCAdaptivePolicyCounters* _gc_policy_counters; duke@435: duke@435: static ParallelScavengeHeap* _psh; duke@435: duke@435: size_t _young_gen_alignment; duke@435: size_t _old_gen_alignment; duke@435: jmasa@1822: GenerationSizer* _collector_policy; jmasa@1822: duke@435: inline size_t set_alignment(size_t& var, size_t val); duke@435: duke@435: // Collection of generations that are adjacent in the duke@435: // space reserved for the heap. duke@435: AdjoiningGenerations* _gens; jcoomes@3541: unsigned int _death_march_count; duke@435: duke@435: static GCTaskManager* _gc_task_manager; // The task manager. duke@435: duke@435: protected: duke@435: static inline size_t total_invocations(); duke@435: HeapWord* allocate_new_tlab(size_t size); duke@435: jcoomes@3541: inline bool should_alloc_in_eden(size_t size) const; jcoomes@3541: inline void death_march_check(HeapWord* const result, size_t size); jcoomes@3541: HeapWord* mem_allocate_old_gen(size_t size); jcoomes@3541: duke@435: public: duke@435: ParallelScavengeHeap() : CollectedHeap() { jcoomes@3541: _death_march_count = 0; jmasa@448: set_alignment(_young_gen_alignment, intra_heap_alignment()); jmasa@448: set_alignment(_old_gen_alignment, intra_heap_alignment()); duke@435: } duke@435: duke@435: // For use by VM operations duke@435: enum CollectionType { duke@435: Scavenge, duke@435: MarkSweep duke@435: }; duke@435: duke@435: ParallelScavengeHeap::Name kind() const { duke@435: return CollectedHeap::ParallelScavengeHeap; duke@435: } duke@435: coleenp@4037: virtual CollectorPolicy* collector_policy() const { return (CollectorPolicy*) _collector_policy; } jmasa@1822: duke@435: static PSYoungGen* young_gen() { return _young_gen; } duke@435: static PSOldGen* old_gen() { return _old_gen; } duke@435: duke@435: virtual PSAdaptiveSizePolicy* size_policy() { return _size_policy; } duke@435: duke@435: static PSGCAdaptivePolicyCounters* gc_policy_counters() { return _gc_policy_counters; } duke@435: duke@435: static ParallelScavengeHeap* heap(); duke@435: duke@435: static GCTaskManager* const gc_task_manager() { return _gc_task_manager; } duke@435: duke@435: AdjoiningGenerations* gens() { return _gens; } duke@435: duke@435: // Returns JNI_OK on success duke@435: virtual jint initialize(); duke@435: duke@435: void post_initialize(); duke@435: void update_counters(); duke@435: // The alignment used for the various generations. duke@435: size_t young_gen_alignment() const { return _young_gen_alignment; } duke@435: size_t old_gen_alignment() const { return _old_gen_alignment; } duke@435: jmasa@448: // The alignment used for eden and survivors within the young gen jmasa@448: // and for boundary between young gen and old gen. jcoomes@5201: size_t intra_heap_alignment() const { return 64 * K * HeapWordSize; } duke@435: duke@435: size_t capacity() const; duke@435: size_t used() const; duke@435: coleenp@4037: // Return "true" if all generations have reached the duke@435: // maximal committed limit that they can reach, without a garbage duke@435: // collection. duke@435: virtual bool is_maximal_no_gc() const; duke@435: jmasa@2909: // Return true if the reference points to an object that jmasa@2909: // can be moved in a partial collection. For currently implemented jmasa@2909: // generational collectors that means during a collection of jmasa@2909: // the young gen. jmasa@2909: virtual bool is_scavengable(const void* addr); jmasa@2909: duke@435: // Does this heap support heap inspection? (+PrintClassHistogram) duke@435: bool supports_heap_inspection() const { return true; } duke@435: duke@435: size_t max_capacity() const; duke@435: duke@435: // Whether p is in the allocated part of the heap duke@435: bool is_in(const void* p) const; duke@435: duke@435: bool is_in_reserved(const void* p) const; duke@435: jmasa@2909: #ifdef ASSERT jmasa@2909: virtual bool is_in_partial_collection(const void *p); jmasa@2909: #endif jmasa@2909: coleenp@4037: bool is_in_young(oop p); // reserved part coleenp@4037: bool is_in_old(oop p); // reserved part duke@435: duke@435: // Memory allocation. "gc_time_limit_was_exceeded" will duke@435: // be set to true if the adaptive size policy determine that duke@435: // an excessive amount of time is being spent doing collections duke@435: // and caused a NULL to be returned. If a NULL is not returned, duke@435: // "gc_time_limit_was_exceeded" has an undefined meaning. tonyp@2971: HeapWord* mem_allocate(size_t size, tonyp@2971: bool* gc_overhead_limit_was_exceeded); duke@435: tonyp@2971: // Allocation attempt(s) during a safepoint. It should never be called tonyp@2971: // to allocate a new TLAB as this allocation might be satisfied out tonyp@2971: // of the old generation. tonyp@2971: HeapWord* failed_mem_allocate(size_t size); duke@435: duke@435: // Support for System.gc() duke@435: void collect(GCCause::Cause cause); duke@435: duke@435: // These also should be called by the vm thread at a safepoint (e.g., from a duke@435: // VM operation). duke@435: // duke@435: // The first collects the young generation only, unless the scavenge fails; it duke@435: // will then attempt a full gc. The second collects the entire heap; if duke@435: // maximum_compaction is true, it will compact everything and clear all soft duke@435: // references. duke@435: inline void invoke_scavenge(); coleenp@4037: coleenp@4037: // Perform a full collection coleenp@4037: virtual void do_full_collection(bool clear_all_soft_refs); duke@435: duke@435: bool supports_inline_contig_alloc() const { return !UseNUMA; } iveresov@576: iveresov@576: HeapWord** top_addr() const { return !UseNUMA ? young_gen()->top_addr() : (HeapWord**)-1; } iveresov@576: HeapWord** end_addr() const { return !UseNUMA ? young_gen()->end_addr() : (HeapWord**)-1; } duke@435: duke@435: void ensure_parsability(bool retire_tlabs); duke@435: void accumulate_statistics_all_tlabs(); duke@435: void resize_all_tlabs(); duke@435: duke@435: size_t unsafe_max_alloc(); duke@435: duke@435: bool supports_tlab_allocation() const { return true; } duke@435: duke@435: size_t tlab_capacity(Thread* thr) const; duke@435: size_t unsafe_max_tlab_alloc(Thread* thr) const; duke@435: ysr@777: // Can a compiler initialize a new object without store barriers? ysr@777: // This permission only extends from the creation of a new object ysr@777: // via a TLAB up to the first subsequent safepoint. ysr@777: virtual bool can_elide_tlab_store_barriers() const { ysr@777: return true; ysr@777: } ysr@777: ysr@1601: virtual bool card_mark_must_follow_store() const { ysr@1601: return false; ysr@1601: } ysr@1601: ysr@1462: // Return true if we don't we need a store barrier for ysr@1462: // initializing stores to an object at this address. ysr@1462: virtual bool can_elide_initializing_store_barrier(oop new_obj); ysr@1462: coleenp@4037: void oop_iterate(ExtendedOopClosure* cl); duke@435: void object_iterate(ObjectClosure* cl); jmasa@952: void safe_object_iterate(ObjectClosure* cl) { object_iterate(cl); } duke@435: duke@435: HeapWord* block_start(const void* addr) const; duke@435: size_t block_size(const HeapWord* addr) const; duke@435: bool block_is_obj(const HeapWord* addr) const; duke@435: duke@435: jlong millis_since_last_gc(); duke@435: duke@435: void prepare_for_verify(); tonyp@3269: virtual void print_on(outputStream* st) const; stefank@4904: virtual void print_on_error(outputStream* st) const; duke@435: virtual void print_gc_threads_on(outputStream* st) const; duke@435: virtual void gc_threads_do(ThreadClosure* tc) const; duke@435: virtual void print_tracing_info() const; duke@435: brutisso@3711: void verify(bool silent, VerifyOption option /* ignored */); duke@435: duke@435: void print_heap_change(size_t prev_used); duke@435: duke@435: // Resize the young generation. The reserved space for the duke@435: // generation may be expanded in preparation for the resize. duke@435: void resize_young_gen(size_t eden_size, size_t survivor_size); duke@435: duke@435: // Resize the old generation. The reserved space for the duke@435: // generation may be expanded in preparation for the resize. duke@435: void resize_old_gen(size_t desired_free_space); jmasa@698: jmasa@698: // Save the tops of the spaces in all generations jmasa@698: void record_gen_tops_before_GC() PRODUCT_RETURN; jmasa@698: jmasa@698: // Mangle the unused parts of all spaces in the heap jmasa@698: void gen_mangle_unused_area() PRODUCT_RETURN; jrose@1424: jrose@1424: // Call these in sequential code around the processing of strong roots. jrose@1424: class ParStrongRootsScope : public MarkingCodeBlobClosure::MarkScope { jrose@1424: public: jrose@1424: ParStrongRootsScope(); jrose@1424: ~ParStrongRootsScope(); jrose@1424: }; duke@435: }; duke@435: duke@435: inline size_t ParallelScavengeHeap::set_alignment(size_t& var, size_t val) duke@435: { duke@435: assert(is_power_of_2((intptr_t)val), "must be a power of 2"); jmasa@448: var = round_to(val, intra_heap_alignment()); duke@435: return var; duke@435: } stefank@2314: stefank@2314: #endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PARALLELSCAVENGEHEAP_HPP