Fri, 01 Nov 2013 17:09:38 +0100
8016309: assert(eden_size > 0 && survivor_size > 0) failed: just checking
7057939: jmap shows MaxNewSize=4GB when Java is using parallel collector
Summary: Major cleanup of the collectorpolicy classes
Reviewed-by: tschatzl, jcoomes
1 /*
2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_MEMORY_ADAPTIVEFREELIST_HPP
26 #define SHARE_VM_MEMORY_ADAPTIVEFREELIST_HPP
28 #include "memory/freeList.hpp"
29 #include "gc_implementation/shared/allocationStats.hpp"
31 class CompactibleFreeListSpace;
33 // A class for maintaining a free list of Chunk's. The FreeList
34 // maintains a the structure of the list (head, tail, etc.) plus
35 // statistics for allocations from the list. The links between items
36 // are not part of FreeList. The statistics are
37 // used to make decisions about coalescing Chunk's when they
38 // are swept during collection.
39 //
40 // See the corresponding .cpp file for a description of the specifics
41 // for that implementation.
43 class Mutex;
45 template <class Chunk>
46 class AdaptiveFreeList : public FreeList<Chunk> {
47 friend class CompactibleFreeListSpace;
48 friend class VMStructs;
49 // friend class PrintTreeCensusClosure<Chunk, FreeList_t>;
51 size_t _hint; // next larger size list with a positive surplus
53 AllocationStats _allocation_stats; // allocation-related statistics
55 public:
57 AdaptiveFreeList();
59 using FreeList<Chunk>::assert_proper_lock_protection;
60 #ifdef ASSERT
61 using FreeList<Chunk>::protecting_lock;
62 #endif
63 using FreeList<Chunk>::count;
64 using FreeList<Chunk>::size;
65 using FreeList<Chunk>::verify_chunk_in_free_list;
66 using FreeList<Chunk>::getFirstNChunksFromList;
67 using FreeList<Chunk>::print_on;
68 void return_chunk_at_head(Chunk* fc, bool record_return);
69 void return_chunk_at_head(Chunk* fc);
70 void return_chunk_at_tail(Chunk* fc, bool record_return);
71 void return_chunk_at_tail(Chunk* fc);
72 using FreeList<Chunk>::return_chunk_at_tail;
73 using FreeList<Chunk>::remove_chunk;
74 using FreeList<Chunk>::prepend;
75 using FreeList<Chunk>::print_labels_on;
76 using FreeList<Chunk>::get_chunk_at_head;
78 // Initialize.
79 void initialize();
81 // Reset the head, tail, hint, and count of a free list.
82 void reset(size_t hint);
84 void assert_proper_lock_protection_work() const PRODUCT_RETURN;
86 void print_on(outputStream* st, const char* c = NULL) const;
88 size_t hint() const {
89 return _hint;
90 }
91 void set_hint(size_t v) {
92 assert_proper_lock_protection();
93 assert(v == 0 || size() < v, "Bad hint");
94 _hint = v;
95 }
97 size_t get_better_size();
99 // Accessors for statistics
100 void init_statistics(bool split_birth = false);
102 AllocationStats* allocation_stats() {
103 assert_proper_lock_protection();
104 return &_allocation_stats;
105 }
107 ssize_t desired() const {
108 return _allocation_stats.desired();
109 }
110 void set_desired(ssize_t v) {
111 assert_proper_lock_protection();
112 _allocation_stats.set_desired(v);
113 }
114 void compute_desired(float inter_sweep_current,
115 float inter_sweep_estimate,
116 float intra_sweep_estimate) {
117 assert_proper_lock_protection();
118 _allocation_stats.compute_desired(count(),
119 inter_sweep_current,
120 inter_sweep_estimate,
121 intra_sweep_estimate);
122 }
123 ssize_t coal_desired() const {
124 return _allocation_stats.coal_desired();
125 }
126 void set_coal_desired(ssize_t v) {
127 assert_proper_lock_protection();
128 _allocation_stats.set_coal_desired(v);
129 }
131 ssize_t surplus() const {
132 return _allocation_stats.surplus();
133 }
134 void set_surplus(ssize_t v) {
135 assert_proper_lock_protection();
136 _allocation_stats.set_surplus(v);
137 }
138 void increment_surplus() {
139 assert_proper_lock_protection();
140 _allocation_stats.increment_surplus();
141 }
142 void decrement_surplus() {
143 assert_proper_lock_protection();
144 _allocation_stats.decrement_surplus();
145 }
147 ssize_t bfr_surp() const {
148 return _allocation_stats.bfr_surp();
149 }
150 void set_bfr_surp(ssize_t v) {
151 assert_proper_lock_protection();
152 _allocation_stats.set_bfr_surp(v);
153 }
154 ssize_t prev_sweep() const {
155 return _allocation_stats.prev_sweep();
156 }
157 void set_prev_sweep(ssize_t v) {
158 assert_proper_lock_protection();
159 _allocation_stats.set_prev_sweep(v);
160 }
161 ssize_t before_sweep() const {
162 return _allocation_stats.before_sweep();
163 }
164 void set_before_sweep(ssize_t v) {
165 assert_proper_lock_protection();
166 _allocation_stats.set_before_sweep(v);
167 }
169 ssize_t coal_births() const {
170 return _allocation_stats.coal_births();
171 }
172 void set_coal_births(ssize_t v) {
173 assert_proper_lock_protection();
174 _allocation_stats.set_coal_births(v);
175 }
176 void increment_coal_births() {
177 assert_proper_lock_protection();
178 _allocation_stats.increment_coal_births();
179 }
181 ssize_t coal_deaths() const {
182 return _allocation_stats.coal_deaths();
183 }
184 void set_coal_deaths(ssize_t v) {
185 assert_proper_lock_protection();
186 _allocation_stats.set_coal_deaths(v);
187 }
188 void increment_coal_deaths() {
189 assert_proper_lock_protection();
190 _allocation_stats.increment_coal_deaths();
191 }
193 ssize_t split_births() const {
194 return _allocation_stats.split_births();
195 }
196 void set_split_births(ssize_t v) {
197 assert_proper_lock_protection();
198 _allocation_stats.set_split_births(v);
199 }
200 void increment_split_births() {
201 assert_proper_lock_protection();
202 _allocation_stats.increment_split_births();
203 }
205 ssize_t split_deaths() const {
206 return _allocation_stats.split_deaths();
207 }
208 void set_split_deaths(ssize_t v) {
209 assert_proper_lock_protection();
210 _allocation_stats.set_split_deaths(v);
211 }
212 void increment_split_deaths() {
213 assert_proper_lock_protection();
214 _allocation_stats.increment_split_deaths();
215 }
217 #ifndef PRODUCT
218 // For debugging. The "_returned_bytes" in all the lists are summed
219 // and compared with the total number of bytes swept during a
220 // collection.
221 size_t returned_bytes() const { return _allocation_stats.returned_bytes(); }
222 void set_returned_bytes(size_t v) { _allocation_stats.set_returned_bytes(v); }
223 void increment_returned_bytes_by(size_t v) {
224 _allocation_stats.set_returned_bytes(_allocation_stats.returned_bytes() + v);
225 }
226 // Stats verification
227 void verify_stats() const;
228 #endif // NOT PRODUCT
229 };
231 #endif // SHARE_VM_MEMORY_ADAPTIVEFREELIST_HPP