Fri, 16 Mar 2012 16:14:04 +0100
7154517: Build error in hotspot-gc without precompiled headers
Reviewed-by: jcoomes, brutisso
1 /*
2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PARALLELSCAVENGEHEAP_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PARALLELSCAVENGEHEAP_HPP
28 #include "gc_implementation/parallelScavenge/objectStartArray.hpp"
29 #include "gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.hpp"
30 #include "gc_implementation/parallelScavenge/psOldGen.hpp"
31 #include "gc_implementation/parallelScavenge/psPermGen.hpp"
32 #include "gc_implementation/parallelScavenge/psYoungGen.hpp"
33 #include "gc_implementation/shared/gcPolicyCounters.hpp"
34 #include "gc_interface/collectedHeap.inline.hpp"
35 #include "utilities/ostream.hpp"
37 class AdjoiningGenerations;
38 class GCTaskManager;
39 class PSAdaptiveSizePolicy;
40 class GenerationSizer;
41 class CollectorPolicy;
43 class ParallelScavengeHeap : public CollectedHeap {
44 friend class VMStructs;
45 private:
46 static PSYoungGen* _young_gen;
47 static PSOldGen* _old_gen;
48 static PSPermGen* _perm_gen;
50 // Sizing policy for entire heap
51 static PSAdaptiveSizePolicy* _size_policy;
52 static PSGCAdaptivePolicyCounters* _gc_policy_counters;
54 static ParallelScavengeHeap* _psh;
56 size_t _perm_gen_alignment;
57 size_t _young_gen_alignment;
58 size_t _old_gen_alignment;
60 GenerationSizer* _collector_policy;
62 inline size_t set_alignment(size_t& var, size_t val);
64 // Collection of generations that are adjacent in the
65 // space reserved for the heap.
66 AdjoiningGenerations* _gens;
67 unsigned int _death_march_count;
69 static GCTaskManager* _gc_task_manager; // The task manager.
71 protected:
72 static inline size_t total_invocations();
73 HeapWord* allocate_new_tlab(size_t size);
75 inline bool should_alloc_in_eden(size_t size) const;
76 inline void death_march_check(HeapWord* const result, size_t size);
77 HeapWord* mem_allocate_old_gen(size_t size);
79 public:
80 ParallelScavengeHeap() : CollectedHeap() {
81 _death_march_count = 0;
82 set_alignment(_perm_gen_alignment, intra_heap_alignment());
83 set_alignment(_young_gen_alignment, intra_heap_alignment());
84 set_alignment(_old_gen_alignment, intra_heap_alignment());
85 }
87 // For use by VM operations
88 enum CollectionType {
89 Scavenge,
90 MarkSweep
91 };
93 ParallelScavengeHeap::Name kind() const {
94 return CollectedHeap::ParallelScavengeHeap;
95 }
97 CollectorPolicy* collector_policy() const { return (CollectorPolicy*) _collector_policy; }
98 // GenerationSizer* collector_policy() const { return _collector_policy; }
100 static PSYoungGen* young_gen() { return _young_gen; }
101 static PSOldGen* old_gen() { return _old_gen; }
102 static PSPermGen* perm_gen() { return _perm_gen; }
104 virtual PSAdaptiveSizePolicy* size_policy() { return _size_policy; }
106 static PSGCAdaptivePolicyCounters* gc_policy_counters() { return _gc_policy_counters; }
108 static ParallelScavengeHeap* heap();
110 static GCTaskManager* const gc_task_manager() { return _gc_task_manager; }
112 AdjoiningGenerations* gens() { return _gens; }
114 // Returns JNI_OK on success
115 virtual jint initialize();
117 void post_initialize();
118 void update_counters();
119 // The alignment used for the various generations.
120 size_t perm_gen_alignment() const { return _perm_gen_alignment; }
121 size_t young_gen_alignment() const { return _young_gen_alignment; }
122 size_t old_gen_alignment() const { return _old_gen_alignment; }
124 // The alignment used for eden and survivors within the young gen
125 // and for boundary between young gen and old gen.
126 size_t intra_heap_alignment() const { return 64 * K; }
128 size_t capacity() const;
129 size_t used() const;
131 // Return "true" if all generations (but perm) have reached the
132 // maximal committed limit that they can reach, without a garbage
133 // collection.
134 virtual bool is_maximal_no_gc() const;
136 // Return true if the reference points to an object that
137 // can be moved in a partial collection. For currently implemented
138 // generational collectors that means during a collection of
139 // the young gen.
140 virtual bool is_scavengable(const void* addr);
142 // Does this heap support heap inspection? (+PrintClassHistogram)
143 bool supports_heap_inspection() const { return true; }
145 size_t permanent_capacity() const;
146 size_t permanent_used() const;
148 size_t max_capacity() const;
150 // Whether p is in the allocated part of the heap
151 bool is_in(const void* p) const;
153 bool is_in_reserved(const void* p) const;
154 bool is_in_permanent(const void *p) const { // reserved part
155 return perm_gen()->reserved().contains(p);
156 }
158 #ifdef ASSERT
159 virtual bool is_in_partial_collection(const void *p);
160 #endif
162 bool is_permanent(const void *p) const { // committed part
163 return perm_gen()->is_in(p);
164 }
166 inline bool is_in_young(oop p); // reserved part
167 inline bool is_in_old_or_perm(oop p); // reserved part
169 // Memory allocation. "gc_time_limit_was_exceeded" will
170 // be set to true if the adaptive size policy determine that
171 // an excessive amount of time is being spent doing collections
172 // and caused a NULL to be returned. If a NULL is not returned,
173 // "gc_time_limit_was_exceeded" has an undefined meaning.
174 HeapWord* mem_allocate(size_t size,
175 bool* gc_overhead_limit_was_exceeded);
177 // Allocation attempt(s) during a safepoint. It should never be called
178 // to allocate a new TLAB as this allocation might be satisfied out
179 // of the old generation.
180 HeapWord* failed_mem_allocate(size_t size);
182 HeapWord* permanent_mem_allocate(size_t size);
183 HeapWord* failed_permanent_mem_allocate(size_t size);
185 // Support for System.gc()
186 void collect(GCCause::Cause cause);
188 // This interface assumes that it's being called by the
189 // vm thread. It collects the heap assuming that the
190 // heap lock is already held and that we are executing in
191 // the context of the vm thread.
192 void collect_as_vm_thread(GCCause::Cause cause);
194 // These also should be called by the vm thread at a safepoint (e.g., from a
195 // VM operation).
196 //
197 // The first collects the young generation only, unless the scavenge fails; it
198 // will then attempt a full gc. The second collects the entire heap; if
199 // maximum_compaction is true, it will compact everything and clear all soft
200 // references.
201 inline void invoke_scavenge();
202 inline void invoke_full_gc(bool maximum_compaction);
204 bool supports_inline_contig_alloc() const { return !UseNUMA; }
206 HeapWord** top_addr() const { return !UseNUMA ? young_gen()->top_addr() : (HeapWord**)-1; }
207 HeapWord** end_addr() const { return !UseNUMA ? young_gen()->end_addr() : (HeapWord**)-1; }
209 void ensure_parsability(bool retire_tlabs);
210 void accumulate_statistics_all_tlabs();
211 void resize_all_tlabs();
213 size_t unsafe_max_alloc();
215 bool supports_tlab_allocation() const { return true; }
217 size_t tlab_capacity(Thread* thr) const;
218 size_t unsafe_max_tlab_alloc(Thread* thr) const;
220 // Can a compiler initialize a new object without store barriers?
221 // This permission only extends from the creation of a new object
222 // via a TLAB up to the first subsequent safepoint.
223 virtual bool can_elide_tlab_store_barriers() const {
224 return true;
225 }
227 virtual bool card_mark_must_follow_store() const {
228 return false;
229 }
231 // Return true if we don't we need a store barrier for
232 // initializing stores to an object at this address.
233 virtual bool can_elide_initializing_store_barrier(oop new_obj);
235 // Can a compiler elide a store barrier when it writes
236 // a permanent oop into the heap? Applies when the compiler
237 // is storing x to the heap, where x->is_perm() is true.
238 virtual bool can_elide_permanent_oop_store_barriers() const {
239 return true;
240 }
242 void oop_iterate(OopClosure* cl);
243 void object_iterate(ObjectClosure* cl);
244 void safe_object_iterate(ObjectClosure* cl) { object_iterate(cl); }
245 void permanent_oop_iterate(OopClosure* cl);
246 void permanent_object_iterate(ObjectClosure* cl);
248 HeapWord* block_start(const void* addr) const;
249 size_t block_size(const HeapWord* addr) const;
250 bool block_is_obj(const HeapWord* addr) const;
252 jlong millis_since_last_gc();
254 void prepare_for_verify();
255 virtual void print_on(outputStream* st) const;
256 virtual void print_gc_threads_on(outputStream* st) const;
257 virtual void gc_threads_do(ThreadClosure* tc) const;
258 virtual void print_tracing_info() const;
260 void verify(bool allow_dirty, bool silent, VerifyOption option /* ignored */);
262 void print_heap_change(size_t prev_used);
264 // Resize the young generation. The reserved space for the
265 // generation may be expanded in preparation for the resize.
266 void resize_young_gen(size_t eden_size, size_t survivor_size);
268 // Resize the old generation. The reserved space for the
269 // generation may be expanded in preparation for the resize.
270 void resize_old_gen(size_t desired_free_space);
272 // Save the tops of the spaces in all generations
273 void record_gen_tops_before_GC() PRODUCT_RETURN;
275 // Mangle the unused parts of all spaces in the heap
276 void gen_mangle_unused_area() PRODUCT_RETURN;
278 // Call these in sequential code around the processing of strong roots.
279 class ParStrongRootsScope : public MarkingCodeBlobClosure::MarkScope {
280 public:
281 ParStrongRootsScope();
282 ~ParStrongRootsScope();
283 };
284 };
286 inline size_t ParallelScavengeHeap::set_alignment(size_t& var, size_t val)
287 {
288 assert(is_power_of_2((intptr_t)val), "must be a power of 2");
289 var = round_to(val, intra_heap_alignment());
290 return var;
291 }
293 #endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PARALLELSCAVENGEHEAP_HPP