Wed, 23 Jan 2013 13:02:39 -0500
8005915: Unify SERIALGC and INCLUDE_ALTERNATE_GCS
Summary: Rename INCLUDE_ALTERNATE_GCS to INCLUDE_ALL_GCS and replace SERIALGC with INCLUDE_ALL_GCS.
Reviewed-by: coleenp, stefank
1 /*
2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "gc_implementation/shared/collectorCounters.hpp"
27 #include "gc_implementation/shared/parGCAllocBuffer.hpp"
28 #include "memory/allocation.inline.hpp"
29 #include "memory/blockOffsetTable.inline.hpp"
30 #include "memory/generation.inline.hpp"
31 #include "memory/generationSpec.hpp"
32 #include "memory/space.hpp"
33 #include "memory/tenuredGeneration.hpp"
34 #include "oops/oop.inline.hpp"
35 #include "runtime/java.hpp"
36 #include "utilities/macros.hpp"
38 TenuredGeneration::TenuredGeneration(ReservedSpace rs,
39 size_t initial_byte_size, int level,
40 GenRemSet* remset) :
41 OneContigSpaceCardGeneration(rs, initial_byte_size,
42 MinHeapDeltaBytes, level, remset, NULL)
43 {
44 HeapWord* bottom = (HeapWord*) _virtual_space.low();
45 HeapWord* end = (HeapWord*) _virtual_space.high();
46 _the_space = new TenuredSpace(_bts, MemRegion(bottom, end));
47 _the_space->reset_saved_mark();
48 _shrink_factor = 0;
49 _capacity_at_prologue = 0;
51 _gc_stats = new GCStats();
53 // initialize performance counters
55 const char* gen_name = "old";
57 // Generation Counters -- generation 1, 1 subspace
58 _gen_counters = new GenerationCounters(gen_name, 1, 1, &_virtual_space);
60 _gc_counters = new CollectorCounters("MSC", 1);
62 _space_counters = new CSpaceCounters(gen_name, 0,
63 _virtual_space.reserved_size(),
64 _the_space, _gen_counters);
65 #if INCLUDE_ALL_GCS
66 if (UseParNewGC) {
67 typedef ParGCAllocBufferWithBOT* ParGCAllocBufferWithBOTPtr;
68 _alloc_buffers = NEW_C_HEAP_ARRAY(ParGCAllocBufferWithBOTPtr,
69 ParallelGCThreads, mtGC);
70 if (_alloc_buffers == NULL)
71 vm_exit_during_initialization("Could not allocate alloc_buffers");
72 for (uint i = 0; i < ParallelGCThreads; i++) {
73 _alloc_buffers[i] =
74 new ParGCAllocBufferWithBOT(OldPLABSize, _bts);
75 if (_alloc_buffers[i] == NULL)
76 vm_exit_during_initialization("Could not allocate alloc_buffers");
77 }
78 } else {
79 _alloc_buffers = NULL;
80 }
81 #endif // INCLUDE_ALL_GCS
82 }
85 const char* TenuredGeneration::name() const {
86 return "tenured generation";
87 }
89 void TenuredGeneration::compute_new_size() {
90 assert(_shrink_factor <= 100, "invalid shrink factor");
91 size_t current_shrink_factor = _shrink_factor;
92 _shrink_factor = 0;
94 // We don't have floating point command-line arguments
95 // Note: argument processing ensures that MinHeapFreeRatio < 100.
96 const double minimum_free_percentage = MinHeapFreeRatio / 100.0;
97 const double maximum_used_percentage = 1.0 - minimum_free_percentage;
99 // Compute some numbers about the state of the heap.
100 const size_t used_after_gc = used();
101 const size_t capacity_after_gc = capacity();
103 const double min_tmp = used_after_gc / maximum_used_percentage;
104 size_t minimum_desired_capacity = (size_t)MIN2(min_tmp, double(max_uintx));
105 // Don't shrink less than the initial generation size
106 minimum_desired_capacity = MAX2(minimum_desired_capacity,
107 spec()->init_size());
108 assert(used_after_gc <= minimum_desired_capacity, "sanity check");
110 if (PrintGC && Verbose) {
111 const size_t free_after_gc = free();
112 const double free_percentage = ((double)free_after_gc) / capacity_after_gc;
113 gclog_or_tty->print_cr("TenuredGeneration::compute_new_size: ");
114 gclog_or_tty->print_cr(" "
115 " minimum_free_percentage: %6.2f"
116 " maximum_used_percentage: %6.2f",
117 minimum_free_percentage,
118 maximum_used_percentage);
119 gclog_or_tty->print_cr(" "
120 " free_after_gc : %6.1fK"
121 " used_after_gc : %6.1fK"
122 " capacity_after_gc : %6.1fK",
123 free_after_gc / (double) K,
124 used_after_gc / (double) K,
125 capacity_after_gc / (double) K);
126 gclog_or_tty->print_cr(" "
127 " free_percentage: %6.2f",
128 free_percentage);
129 }
131 if (capacity_after_gc < minimum_desired_capacity) {
132 // If we have less free space than we want then expand
133 size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
134 // Don't expand unless it's significant
135 if (expand_bytes >= _min_heap_delta_bytes) {
136 expand(expand_bytes, 0); // safe if expansion fails
137 }
138 if (PrintGC && Verbose) {
139 gclog_or_tty->print_cr(" expanding:"
140 " minimum_desired_capacity: %6.1fK"
141 " expand_bytes: %6.1fK"
142 " _min_heap_delta_bytes: %6.1fK",
143 minimum_desired_capacity / (double) K,
144 expand_bytes / (double) K,
145 _min_heap_delta_bytes / (double) K);
146 }
147 return;
148 }
150 // No expansion, now see if we want to shrink
151 size_t shrink_bytes = 0;
152 // We would never want to shrink more than this
153 size_t max_shrink_bytes = capacity_after_gc - minimum_desired_capacity;
155 if (MaxHeapFreeRatio < 100) {
156 const double maximum_free_percentage = MaxHeapFreeRatio / 100.0;
157 const double minimum_used_percentage = 1.0 - maximum_free_percentage;
158 const double max_tmp = used_after_gc / minimum_used_percentage;
159 size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
160 maximum_desired_capacity = MAX2(maximum_desired_capacity,
161 spec()->init_size());
162 if (PrintGC && Verbose) {
163 gclog_or_tty->print_cr(" "
164 " maximum_free_percentage: %6.2f"
165 " minimum_used_percentage: %6.2f",
166 maximum_free_percentage,
167 minimum_used_percentage);
168 gclog_or_tty->print_cr(" "
169 " _capacity_at_prologue: %6.1fK"
170 " minimum_desired_capacity: %6.1fK"
171 " maximum_desired_capacity: %6.1fK",
172 _capacity_at_prologue / (double) K,
173 minimum_desired_capacity / (double) K,
174 maximum_desired_capacity / (double) K);
175 }
176 assert(minimum_desired_capacity <= maximum_desired_capacity,
177 "sanity check");
179 if (capacity_after_gc > maximum_desired_capacity) {
180 // Capacity too large, compute shrinking size
181 shrink_bytes = capacity_after_gc - maximum_desired_capacity;
182 // We don't want shrink all the way back to initSize if people call
183 // System.gc(), because some programs do that between "phases" and then
184 // we'd just have to grow the heap up again for the next phase. So we
185 // damp the shrinking: 0% on the first call, 10% on the second call, 40%
186 // on the third call, and 100% by the fourth call. But if we recompute
187 // size without shrinking, it goes back to 0%.
188 shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
189 assert(shrink_bytes <= max_shrink_bytes, "invalid shrink size");
190 if (current_shrink_factor == 0) {
191 _shrink_factor = 10;
192 } else {
193 _shrink_factor = MIN2(current_shrink_factor * 4, (size_t) 100);
194 }
195 if (PrintGC && Verbose) {
196 gclog_or_tty->print_cr(" "
197 " shrinking:"
198 " initSize: %.1fK"
199 " maximum_desired_capacity: %.1fK",
200 spec()->init_size() / (double) K,
201 maximum_desired_capacity / (double) K);
202 gclog_or_tty->print_cr(" "
203 " shrink_bytes: %.1fK"
204 " current_shrink_factor: %d"
205 " new shrink factor: %d"
206 " _min_heap_delta_bytes: %.1fK",
207 shrink_bytes / (double) K,
208 current_shrink_factor,
209 _shrink_factor,
210 _min_heap_delta_bytes / (double) K);
211 }
212 }
213 }
215 if (capacity_after_gc > _capacity_at_prologue) {
216 // We might have expanded for promotions, in which case we might want to
217 // take back that expansion if there's room after GC. That keeps us from
218 // stretching the heap with promotions when there's plenty of room.
219 size_t expansion_for_promotion = capacity_after_gc - _capacity_at_prologue;
220 expansion_for_promotion = MIN2(expansion_for_promotion, max_shrink_bytes);
221 // We have two shrinking computations, take the largest
222 shrink_bytes = MAX2(shrink_bytes, expansion_for_promotion);
223 assert(shrink_bytes <= max_shrink_bytes, "invalid shrink size");
224 if (PrintGC && Verbose) {
225 gclog_or_tty->print_cr(" "
226 " aggressive shrinking:"
227 " _capacity_at_prologue: %.1fK"
228 " capacity_after_gc: %.1fK"
229 " expansion_for_promotion: %.1fK"
230 " shrink_bytes: %.1fK",
231 capacity_after_gc / (double) K,
232 _capacity_at_prologue / (double) K,
233 expansion_for_promotion / (double) K,
234 shrink_bytes / (double) K);
235 }
236 }
237 // Don't shrink unless it's significant
238 if (shrink_bytes >= _min_heap_delta_bytes) {
239 shrink(shrink_bytes);
240 }
241 assert(used() == used_after_gc && used_after_gc <= capacity(),
242 "sanity check");
243 }
245 void TenuredGeneration::gc_prologue(bool full) {
246 _capacity_at_prologue = capacity();
247 _used_at_prologue = used();
248 if (VerifyBeforeGC) {
249 verify_alloc_buffers_clean();
250 }
251 }
253 void TenuredGeneration::gc_epilogue(bool full) {
254 if (VerifyAfterGC) {
255 verify_alloc_buffers_clean();
256 }
257 OneContigSpaceCardGeneration::gc_epilogue(full);
258 }
261 bool TenuredGeneration::should_collect(bool full,
262 size_t size,
263 bool is_tlab) {
264 // This should be one big conditional or (||), but I want to be able to tell
265 // why it returns what it returns (without re-evaluating the conditionals
266 // in case they aren't idempotent), so I'm doing it this way.
267 // DeMorgan says it's okay.
268 bool result = false;
269 if (!result && full) {
270 result = true;
271 if (PrintGC && Verbose) {
272 gclog_or_tty->print_cr("TenuredGeneration::should_collect: because"
273 " full");
274 }
275 }
276 if (!result && should_allocate(size, is_tlab)) {
277 result = true;
278 if (PrintGC && Verbose) {
279 gclog_or_tty->print_cr("TenuredGeneration::should_collect: because"
280 " should_allocate(" SIZE_FORMAT ")",
281 size);
282 }
283 }
284 // If we don't have very much free space.
285 // XXX: 10000 should be a percentage of the capacity!!!
286 if (!result && free() < 10000) {
287 result = true;
288 if (PrintGC && Verbose) {
289 gclog_or_tty->print_cr("TenuredGeneration::should_collect: because"
290 " free(): " SIZE_FORMAT,
291 free());
292 }
293 }
294 // If we had to expand to accomodate promotions from younger generations
295 if (!result && _capacity_at_prologue < capacity()) {
296 result = true;
297 if (PrintGC && Verbose) {
298 gclog_or_tty->print_cr("TenuredGeneration::should_collect: because"
299 "_capacity_at_prologue: " SIZE_FORMAT " < capacity(): " SIZE_FORMAT,
300 _capacity_at_prologue, capacity());
301 }
302 }
303 return result;
304 }
306 void TenuredGeneration::collect(bool full,
307 bool clear_all_soft_refs,
308 size_t size,
309 bool is_tlab) {
310 retire_alloc_buffers_before_full_gc();
311 OneContigSpaceCardGeneration::collect(full, clear_all_soft_refs,
312 size, is_tlab);
313 }
315 void TenuredGeneration::update_gc_stats(int current_level,
316 bool full) {
317 // If the next lower level(s) has been collected, gather any statistics
318 // that are of interest at this point.
319 if (!full && (current_level + 1) == level()) {
320 // Calculate size of data promoted from the younger generations
321 // before doing the collection.
322 size_t used_before_gc = used();
324 // If the younger gen collections were skipped, then the
325 // number of promoted bytes will be 0 and adding it to the
326 // average will incorrectly lessen the average. It is, however,
327 // also possible that no promotion was needed.
328 if (used_before_gc >= _used_at_prologue) {
329 size_t promoted_in_bytes = used_before_gc - _used_at_prologue;
330 gc_stats()->avg_promoted()->sample(promoted_in_bytes);
331 }
332 }
333 }
335 void TenuredGeneration::update_counters() {
336 if (UsePerfData) {
337 _space_counters->update_all();
338 _gen_counters->update_all();
339 }
340 }
343 #if INCLUDE_ALL_GCS
344 oop TenuredGeneration::par_promote(int thread_num,
345 oop old, markOop m, size_t word_sz) {
347 ParGCAllocBufferWithBOT* buf = _alloc_buffers[thread_num];
348 HeapWord* obj_ptr = buf->allocate(word_sz);
349 bool is_lab = true;
350 if (obj_ptr == NULL) {
351 #ifndef PRODUCT
352 if (Universe::heap()->promotion_should_fail()) {
353 return NULL;
354 }
355 #endif // #ifndef PRODUCT
357 // Slow path:
358 if (word_sz * 100 < ParallelGCBufferWastePct * buf->word_sz()) {
359 // Is small enough; abandon this buffer and start a new one.
360 size_t buf_size = buf->word_sz();
361 HeapWord* buf_space =
362 TenuredGeneration::par_allocate(buf_size, false);
363 if (buf_space == NULL) {
364 buf_space = expand_and_allocate(buf_size, false, true /* parallel*/);
365 }
366 if (buf_space != NULL) {
367 buf->retire(false, false);
368 buf->set_buf(buf_space);
369 obj_ptr = buf->allocate(word_sz);
370 assert(obj_ptr != NULL, "Buffer was definitely big enough...");
371 }
372 };
373 // Otherwise, buffer allocation failed; try allocating object
374 // individually.
375 if (obj_ptr == NULL) {
376 obj_ptr = TenuredGeneration::par_allocate(word_sz, false);
377 if (obj_ptr == NULL) {
378 obj_ptr = expand_and_allocate(word_sz, false, true /* parallel */);
379 }
380 }
381 if (obj_ptr == NULL) return NULL;
382 }
383 assert(obj_ptr != NULL, "program logic");
384 Copy::aligned_disjoint_words((HeapWord*)old, obj_ptr, word_sz);
385 oop obj = oop(obj_ptr);
386 // Restore the mark word copied above.
387 obj->set_mark(m);
388 return obj;
389 }
391 void TenuredGeneration::par_promote_alloc_undo(int thread_num,
392 HeapWord* obj,
393 size_t word_sz) {
394 ParGCAllocBufferWithBOT* buf = _alloc_buffers[thread_num];
395 if (buf->contains(obj)) {
396 guarantee(buf->contains(obj + word_sz - 1),
397 "should contain whole object");
398 buf->undo_allocation(obj, word_sz);
399 } else {
400 CollectedHeap::fill_with_object(obj, word_sz);
401 }
402 }
404 void TenuredGeneration::par_promote_alloc_done(int thread_num) {
405 ParGCAllocBufferWithBOT* buf = _alloc_buffers[thread_num];
406 buf->retire(true, ParallelGCRetainPLAB);
407 }
409 void TenuredGeneration::retire_alloc_buffers_before_full_gc() {
410 if (UseParNewGC) {
411 for (uint i = 0; i < ParallelGCThreads; i++) {
412 _alloc_buffers[i]->retire(true /*end_of_gc*/, false /*retain*/);
413 }
414 }
415 }
417 // Verify that any retained parallel allocation buffers do not
418 // intersect with dirty cards.
419 void TenuredGeneration::verify_alloc_buffers_clean() {
420 if (UseParNewGC) {
421 for (uint i = 0; i < ParallelGCThreads; i++) {
422 _rs->verify_aligned_region_empty(_alloc_buffers[i]->range());
423 }
424 }
425 }
427 #else // INCLUDE_ALL_GCS
428 void TenuredGeneration::retire_alloc_buffers_before_full_gc() {}
429 void TenuredGeneration::verify_alloc_buffers_clean() {}
430 #endif // INCLUDE_ALL_GCS
432 bool TenuredGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
433 size_t available = max_contiguous_available();
434 size_t av_promo = (size_t)gc_stats()->avg_promoted()->padded_average();
435 bool res = (available >= av_promo) || (available >= max_promotion_in_bytes);
436 if (PrintGC && Verbose) {
437 gclog_or_tty->print_cr(
438 "Tenured: promo attempt is%s safe: available("SIZE_FORMAT") %s av_promo("SIZE_FORMAT"),"
439 "max_promo("SIZE_FORMAT")",
440 res? "":" not", available, res? ">=":"<",
441 av_promo, max_promotion_in_bytes);
442 }
443 return res;
444 }