Fri, 31 May 2013 14:32:44 +0200
8022880: False sharing between PSPromotionManager instances
Summary: Pad the PSPromotionManager instances in the manager array.
Reviewed-by: brutisso, jmasa
1 /*
2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PARALLELSCAVENGEHEAP_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PARALLELSCAVENGEHEAP_HPP
28 #include "gc_implementation/parallelScavenge/objectStartArray.hpp"
29 #include "gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.hpp"
30 #include "gc_implementation/parallelScavenge/psOldGen.hpp"
31 #include "gc_implementation/parallelScavenge/psYoungGen.hpp"
32 #include "gc_implementation/shared/gcPolicyCounters.hpp"
33 #include "gc_implementation/shared/gcWhen.hpp"
34 #include "gc_interface/collectedHeap.inline.hpp"
35 #include "utilities/ostream.hpp"
37 class AdjoiningGenerations;
38 class CollectorPolicy;
39 class GCHeapSummary;
40 class GCTaskManager;
41 class GenerationSizer;
42 class CollectorPolicy;
43 class PSAdaptiveSizePolicy;
44 class PSHeapSummary;
46 class ParallelScavengeHeap : public CollectedHeap {
47 friend class VMStructs;
48 private:
49 static PSYoungGen* _young_gen;
50 static PSOldGen* _old_gen;
52 // Sizing policy for entire heap
53 static PSAdaptiveSizePolicy* _size_policy;
54 static PSGCAdaptivePolicyCounters* _gc_policy_counters;
56 static ParallelScavengeHeap* _psh;
58 size_t _young_gen_alignment;
59 size_t _old_gen_alignment;
61 GenerationSizer* _collector_policy;
63 inline size_t set_alignment(size_t& var, size_t val);
65 // Collection of generations that are adjacent in the
66 // space reserved for the heap.
67 AdjoiningGenerations* _gens;
68 unsigned int _death_march_count;
70 static GCTaskManager* _gc_task_manager; // The task manager.
72 void trace_heap(GCWhen::Type when, GCTracer* tracer);
74 protected:
75 static inline size_t total_invocations();
76 HeapWord* allocate_new_tlab(size_t size);
78 inline bool should_alloc_in_eden(size_t size) const;
79 inline void death_march_check(HeapWord* const result, size_t size);
80 HeapWord* mem_allocate_old_gen(size_t size);
82 public:
83 ParallelScavengeHeap() : CollectedHeap() {
84 _death_march_count = 0;
85 set_alignment(_young_gen_alignment, intra_heap_alignment());
86 set_alignment(_old_gen_alignment, intra_heap_alignment());
87 }
89 // For use by VM operations
90 enum CollectionType {
91 Scavenge,
92 MarkSweep
93 };
95 ParallelScavengeHeap::Name kind() const {
96 return CollectedHeap::ParallelScavengeHeap;
97 }
99 virtual CollectorPolicy* collector_policy() const { return (CollectorPolicy*) _collector_policy; }
101 static PSYoungGen* young_gen() { return _young_gen; }
102 static PSOldGen* old_gen() { return _old_gen; }
104 virtual PSAdaptiveSizePolicy* size_policy() { return _size_policy; }
106 static PSGCAdaptivePolicyCounters* gc_policy_counters() { return _gc_policy_counters; }
108 static ParallelScavengeHeap* heap();
110 static GCTaskManager* const gc_task_manager() { return _gc_task_manager; }
112 AdjoiningGenerations* gens() { return _gens; }
114 // Returns JNI_OK on success
115 virtual jint initialize();
117 void post_initialize();
118 void update_counters();
119 // The alignment used for the various generations.
120 size_t young_gen_alignment() const { return _young_gen_alignment; }
121 size_t old_gen_alignment() const { return _old_gen_alignment; }
123 // The alignment used for eden and survivors within the young gen
124 // and for boundary between young gen and old gen.
125 size_t intra_heap_alignment() const { return 64 * K * HeapWordSize; }
127 size_t capacity() const;
128 size_t used() const;
130 // Return "true" if all generations have reached the
131 // maximal committed limit that they can reach, without a garbage
132 // collection.
133 virtual bool is_maximal_no_gc() const;
135 // Return true if the reference points to an object that
136 // can be moved in a partial collection. For currently implemented
137 // generational collectors that means during a collection of
138 // the young gen.
139 virtual bool is_scavengable(const void* addr);
141 // Does this heap support heap inspection? (+PrintClassHistogram)
142 bool supports_heap_inspection() const { return true; }
144 size_t max_capacity() const;
146 // Whether p is in the allocated part of the heap
147 bool is_in(const void* p) const;
149 bool is_in_reserved(const void* p) const;
151 #ifdef ASSERT
152 virtual bool is_in_partial_collection(const void *p);
153 #endif
155 bool is_in_young(oop p); // reserved part
156 bool is_in_old(oop p); // reserved part
158 // Memory allocation. "gc_time_limit_was_exceeded" will
159 // be set to true if the adaptive size policy determine that
160 // an excessive amount of time is being spent doing collections
161 // and caused a NULL to be returned. If a NULL is not returned,
162 // "gc_time_limit_was_exceeded" has an undefined meaning.
163 HeapWord* mem_allocate(size_t size,
164 bool* gc_overhead_limit_was_exceeded);
166 // Allocation attempt(s) during a safepoint. It should never be called
167 // to allocate a new TLAB as this allocation might be satisfied out
168 // of the old generation.
169 HeapWord* failed_mem_allocate(size_t size);
171 // Support for System.gc()
172 void collect(GCCause::Cause cause);
174 // These also should be called by the vm thread at a safepoint (e.g., from a
175 // VM operation).
176 //
177 // The first collects the young generation only, unless the scavenge fails; it
178 // will then attempt a full gc. The second collects the entire heap; if
179 // maximum_compaction is true, it will compact everything and clear all soft
180 // references.
181 inline void invoke_scavenge();
183 // Perform a full collection
184 virtual void do_full_collection(bool clear_all_soft_refs);
186 bool supports_inline_contig_alloc() const { return !UseNUMA; }
188 HeapWord** top_addr() const { return !UseNUMA ? young_gen()->top_addr() : (HeapWord**)-1; }
189 HeapWord** end_addr() const { return !UseNUMA ? young_gen()->end_addr() : (HeapWord**)-1; }
191 void ensure_parsability(bool retire_tlabs);
192 void accumulate_statistics_all_tlabs();
193 void resize_all_tlabs();
195 size_t unsafe_max_alloc();
197 bool supports_tlab_allocation() const { return true; }
199 size_t tlab_capacity(Thread* thr) const;
200 size_t unsafe_max_tlab_alloc(Thread* thr) const;
202 // Can a compiler initialize a new object without store barriers?
203 // This permission only extends from the creation of a new object
204 // via a TLAB up to the first subsequent safepoint.
205 virtual bool can_elide_tlab_store_barriers() const {
206 return true;
207 }
209 virtual bool card_mark_must_follow_store() const {
210 return false;
211 }
213 // Return true if we don't we need a store barrier for
214 // initializing stores to an object at this address.
215 virtual bool can_elide_initializing_store_barrier(oop new_obj);
217 void oop_iterate(ExtendedOopClosure* cl);
218 void object_iterate(ObjectClosure* cl);
219 void safe_object_iterate(ObjectClosure* cl) { object_iterate(cl); }
221 HeapWord* block_start(const void* addr) const;
222 size_t block_size(const HeapWord* addr) const;
223 bool block_is_obj(const HeapWord* addr) const;
225 jlong millis_since_last_gc();
227 void prepare_for_verify();
228 PSHeapSummary create_ps_heap_summary();
229 virtual void print_on(outputStream* st) const;
230 virtual void print_on_error(outputStream* st) const;
231 virtual void print_gc_threads_on(outputStream* st) const;
232 virtual void gc_threads_do(ThreadClosure* tc) const;
233 virtual void print_tracing_info() const;
235 void verify(bool silent, VerifyOption option /* ignored */);
237 void print_heap_change(size_t prev_used);
239 // Resize the young generation. The reserved space for the
240 // generation may be expanded in preparation for the resize.
241 void resize_young_gen(size_t eden_size, size_t survivor_size);
243 // Resize the old generation. The reserved space for the
244 // generation may be expanded in preparation for the resize.
245 void resize_old_gen(size_t desired_free_space);
247 // Save the tops of the spaces in all generations
248 void record_gen_tops_before_GC() PRODUCT_RETURN;
250 // Mangle the unused parts of all spaces in the heap
251 void gen_mangle_unused_area() PRODUCT_RETURN;
253 // Call these in sequential code around the processing of strong roots.
254 class ParStrongRootsScope : public MarkingCodeBlobClosure::MarkScope {
255 public:
256 ParStrongRootsScope();
257 ~ParStrongRootsScope();
258 };
259 };
261 inline size_t ParallelScavengeHeap::set_alignment(size_t& var, size_t val)
262 {
263 assert(is_power_of_2((intptr_t)val), "must be a power of 2");
264 var = round_to(val, intra_heap_alignment());
265 return var;
266 }
268 #endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PARALLELSCAVENGEHEAP_HPP