Sat, 23 Nov 2013 12:25:13 +0100
8028128: Add a type safe alternative for working with counter based data
Reviewed-by: dholmes, egahlin
1 /*
2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
27 #include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp"
28 #include "gc_implementation/parallelScavenge/psMarkSweepDecorator.hpp"
29 #include "gc_implementation/parallelScavenge/psOldGen.hpp"
30 #include "gc_implementation/shared/spaceDecorator.hpp"
31 #include "memory/cardTableModRefBS.hpp"
32 #include "memory/gcLocker.inline.hpp"
33 #include "oops/oop.inline.hpp"
34 #include "runtime/java.hpp"
36 inline const char* PSOldGen::select_name() {
37 return UseParallelOldGC ? "ParOldGen" : "PSOldGen";
38 }
40 PSOldGen::PSOldGen(ReservedSpace rs, size_t alignment,
41 size_t initial_size, size_t min_size, size_t max_size,
42 const char* perf_data_name, int level):
43 _name(select_name()), _init_gen_size(initial_size), _min_gen_size(min_size),
44 _max_gen_size(max_size)
45 {
46 initialize(rs, alignment, perf_data_name, level);
47 }
49 PSOldGen::PSOldGen(size_t initial_size,
50 size_t min_size, size_t max_size,
51 const char* perf_data_name, int level):
52 _name(select_name()), _init_gen_size(initial_size), _min_gen_size(min_size),
53 _max_gen_size(max_size)
54 {}
56 void PSOldGen::initialize(ReservedSpace rs, size_t alignment,
57 const char* perf_data_name, int level) {
58 initialize_virtual_space(rs, alignment);
59 initialize_work(perf_data_name, level);
61 // The old gen can grow to gen_size_limit(). _reserve reflects only
62 // the current maximum that can be committed.
63 assert(_reserved.byte_size() <= gen_size_limit(), "Consistency check");
65 initialize_performance_counters(perf_data_name, level);
66 }
68 void PSOldGen::initialize_virtual_space(ReservedSpace rs, size_t alignment) {
70 _virtual_space = new PSVirtualSpace(rs, alignment);
71 if (!_virtual_space->expand_by(_init_gen_size)) {
72 vm_exit_during_initialization("Could not reserve enough space for "
73 "object heap");
74 }
75 }
77 void PSOldGen::initialize_work(const char* perf_data_name, int level) {
78 //
79 // Basic memory initialization
80 //
82 MemRegion limit_reserved((HeapWord*)virtual_space()->low_boundary(),
83 heap_word_size(_max_gen_size));
84 assert(limit_reserved.byte_size() == _max_gen_size,
85 "word vs bytes confusion");
86 //
87 // Object start stuff
88 //
90 start_array()->initialize(limit_reserved);
92 _reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(),
93 (HeapWord*)virtual_space()->high_boundary());
95 //
96 // Card table stuff
97 //
99 MemRegion cmr((HeapWord*)virtual_space()->low(),
100 (HeapWord*)virtual_space()->high());
101 if (ZapUnusedHeapArea) {
102 // Mangle newly committed space immediately rather than
103 // waiting for the initialization of the space even though
104 // mangling is related to spaces. Doing it here eliminates
105 // the need to carry along information that a complete mangling
106 // (bottom to end) needs to be done.
107 SpaceMangler::mangle_region(cmr);
108 }
110 Universe::heap()->barrier_set()->resize_covered_region(cmr);
112 CardTableModRefBS* _ct = (CardTableModRefBS*)Universe::heap()->barrier_set();
113 assert (_ct->kind() == BarrierSet::CardTableModRef, "Sanity");
115 // Verify that the start and end of this generation is the start of a card.
116 // If this wasn't true, a single card could span more than one generation,
117 // which would cause problems when we commit/uncommit memory, and when we
118 // clear and dirty cards.
119 guarantee(_ct->is_card_aligned(_reserved.start()), "generation must be card aligned");
120 if (_reserved.end() != Universe::heap()->reserved_region().end()) {
121 // Don't check at the very end of the heap as we'll assert that we're probing off
122 // the end if we try.
123 guarantee(_ct->is_card_aligned(_reserved.end()), "generation must be card aligned");
124 }
126 //
127 // ObjectSpace stuff
128 //
130 _object_space = new MutableSpace(virtual_space()->alignment());
132 if (_object_space == NULL)
133 vm_exit_during_initialization("Could not allocate an old gen space");
135 object_space()->initialize(cmr,
136 SpaceDecorator::Clear,
137 SpaceDecorator::Mangle);
139 _object_mark_sweep = new PSMarkSweepDecorator(_object_space, start_array(), MarkSweepDeadRatio);
141 if (_object_mark_sweep == NULL)
142 vm_exit_during_initialization("Could not complete allocation of old generation");
144 // Update the start_array
145 start_array()->set_covered_region(cmr);
146 }
148 void PSOldGen::initialize_performance_counters(const char* perf_data_name, int level) {
149 // Generation Counters, generation 'level', 1 subspace
150 _gen_counters = new PSGenerationCounters(perf_data_name, level, 1,
151 virtual_space());
152 _space_counters = new SpaceCounters(perf_data_name, 0,
153 virtual_space()->reserved_size(),
154 _object_space, _gen_counters);
155 }
157 // Assume that the generation has been allocated if its
158 // reserved size is not 0.
159 bool PSOldGen::is_allocated() {
160 return virtual_space()->reserved_size() != 0;
161 }
163 void PSOldGen::precompact() {
164 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
165 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
167 // Reset start array first.
168 start_array()->reset();
170 object_mark_sweep()->precompact();
172 // Now compact the young gen
173 heap->young_gen()->precompact();
174 }
176 void PSOldGen::adjust_pointers() {
177 object_mark_sweep()->adjust_pointers();
178 }
180 void PSOldGen::compact() {
181 object_mark_sweep()->compact(ZapUnusedHeapArea);
182 }
184 size_t PSOldGen::contiguous_available() const {
185 return object_space()->free_in_bytes() + virtual_space()->uncommitted_size();
186 }
188 // Allocation. We report all successful allocations to the size policy
189 // Note that the perm gen does not use this method, and should not!
190 HeapWord* PSOldGen::allocate(size_t word_size) {
191 assert_locked_or_safepoint(Heap_lock);
192 HeapWord* res = allocate_noexpand(word_size);
194 if (res == NULL) {
195 res = expand_and_allocate(word_size);
196 }
198 // Allocations in the old generation need to be reported
199 if (res != NULL) {
200 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
201 heap->size_policy()->tenured_allocation(word_size);
202 }
204 return res;
205 }
207 HeapWord* PSOldGen::expand_and_allocate(size_t word_size) {
208 expand(word_size*HeapWordSize);
209 if (GCExpandToAllocateDelayMillis > 0) {
210 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
211 }
212 return allocate_noexpand(word_size);
213 }
215 HeapWord* PSOldGen::expand_and_cas_allocate(size_t word_size) {
216 expand(word_size*HeapWordSize);
217 if (GCExpandToAllocateDelayMillis > 0) {
218 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
219 }
220 return cas_allocate_noexpand(word_size);
221 }
223 void PSOldGen::expand(size_t bytes) {
224 if (bytes == 0) {
225 return;
226 }
227 MutexLocker x(ExpandHeap_lock);
228 const size_t alignment = virtual_space()->alignment();
229 size_t aligned_bytes = align_size_up(bytes, alignment);
230 size_t aligned_expand_bytes = align_size_up(MinHeapDeltaBytes, alignment);
232 if (UseNUMA) {
233 // With NUMA we use round-robin page allocation for the old gen. Expand by at least
234 // providing a page per lgroup. Alignment is larger or equal to the page size.
235 aligned_expand_bytes = MAX2(aligned_expand_bytes, alignment * os::numa_get_groups_num());
236 }
237 if (aligned_bytes == 0){
238 // The alignment caused the number of bytes to wrap. An expand_by(0) will
239 // return true with the implication that and expansion was done when it
240 // was not. A call to expand implies a best effort to expand by "bytes"
241 // but not a guarantee. Align down to give a best effort. This is likely
242 // the most that the generation can expand since it has some capacity to
243 // start with.
244 aligned_bytes = align_size_down(bytes, alignment);
245 }
247 bool success = false;
248 if (aligned_expand_bytes > aligned_bytes) {
249 success = expand_by(aligned_expand_bytes);
250 }
251 if (!success) {
252 success = expand_by(aligned_bytes);
253 }
254 if (!success) {
255 success = expand_to_reserved();
256 }
258 if (PrintGC && Verbose) {
259 if (success && GC_locker::is_active_and_needs_gc()) {
260 gclog_or_tty->print_cr("Garbage collection disabled, expanded heap instead");
261 }
262 }
263 }
265 bool PSOldGen::expand_by(size_t bytes) {
266 assert_lock_strong(ExpandHeap_lock);
267 assert_locked_or_safepoint(Heap_lock);
268 if (bytes == 0) {
269 return true; // That's what virtual_space()->expand_by(0) would return
270 }
271 bool result = virtual_space()->expand_by(bytes);
272 if (result) {
273 if (ZapUnusedHeapArea) {
274 // We need to mangle the newly expanded area. The memregion spans
275 // end -> new_end, we assume that top -> end is already mangled.
276 // Do the mangling before post_resize() is called because
277 // the space is available for allocation after post_resize();
278 HeapWord* const virtual_space_high = (HeapWord*) virtual_space()->high();
279 assert(object_space()->end() < virtual_space_high,
280 "Should be true before post_resize()");
281 MemRegion mangle_region(object_space()->end(), virtual_space_high);
282 // Note that the object space has not yet been updated to
283 // coincede with the new underlying virtual space.
284 SpaceMangler::mangle_region(mangle_region);
285 }
286 post_resize();
287 if (UsePerfData) {
288 _space_counters->update_capacity();
289 _gen_counters->update_all();
290 }
291 }
293 if (result && Verbose && PrintGC) {
294 size_t new_mem_size = virtual_space()->committed_size();
295 size_t old_mem_size = new_mem_size - bytes;
296 gclog_or_tty->print_cr("Expanding %s from " SIZE_FORMAT "K by "
297 SIZE_FORMAT "K to "
298 SIZE_FORMAT "K",
299 name(), old_mem_size/K, bytes/K, new_mem_size/K);
300 }
302 return result;
303 }
305 bool PSOldGen::expand_to_reserved() {
306 assert_lock_strong(ExpandHeap_lock);
307 assert_locked_or_safepoint(Heap_lock);
309 bool result = true;
310 const size_t remaining_bytes = virtual_space()->uncommitted_size();
311 if (remaining_bytes > 0) {
312 result = expand_by(remaining_bytes);
313 DEBUG_ONLY(if (!result) warning("grow to reserve failed"));
314 }
315 return result;
316 }
318 void PSOldGen::shrink(size_t bytes) {
319 assert_lock_strong(ExpandHeap_lock);
320 assert_locked_or_safepoint(Heap_lock);
322 size_t size = align_size_down(bytes, virtual_space()->alignment());
323 if (size > 0) {
324 assert_lock_strong(ExpandHeap_lock);
325 virtual_space()->shrink_by(bytes);
326 post_resize();
328 if (Verbose && PrintGC) {
329 size_t new_mem_size = virtual_space()->committed_size();
330 size_t old_mem_size = new_mem_size + bytes;
331 gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K by "
332 SIZE_FORMAT "K to "
333 SIZE_FORMAT "K",
334 name(), old_mem_size/K, bytes/K, new_mem_size/K);
335 }
336 }
337 }
339 void PSOldGen::resize(size_t desired_free_space) {
340 const size_t alignment = virtual_space()->alignment();
341 const size_t size_before = virtual_space()->committed_size();
342 size_t new_size = used_in_bytes() + desired_free_space;
343 if (new_size < used_in_bytes()) {
344 // Overflowed the addition.
345 new_size = gen_size_limit();
346 }
347 // Adjust according to our min and max
348 new_size = MAX2(MIN2(new_size, gen_size_limit()), min_gen_size());
350 assert(gen_size_limit() >= reserved().byte_size(), "max new size problem?");
351 new_size = align_size_up(new_size, alignment);
353 const size_t current_size = capacity_in_bytes();
355 if (PrintAdaptiveSizePolicy && Verbose) {
356 gclog_or_tty->print_cr("AdaptiveSizePolicy::old generation size: "
357 "desired free: " SIZE_FORMAT " used: " SIZE_FORMAT
358 " new size: " SIZE_FORMAT " current size " SIZE_FORMAT
359 " gen limits: " SIZE_FORMAT " / " SIZE_FORMAT,
360 desired_free_space, used_in_bytes(), new_size, current_size,
361 gen_size_limit(), min_gen_size());
362 }
364 if (new_size == current_size) {
365 // No change requested
366 return;
367 }
368 if (new_size > current_size) {
369 size_t change_bytes = new_size - current_size;
370 expand(change_bytes);
371 } else {
372 size_t change_bytes = current_size - new_size;
373 // shrink doesn't grab this lock, expand does. Is that right?
374 MutexLocker x(ExpandHeap_lock);
375 shrink(change_bytes);
376 }
378 if (PrintAdaptiveSizePolicy) {
379 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
380 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
381 gclog_or_tty->print_cr("AdaptiveSizePolicy::old generation size: "
382 "collection: %d "
383 "(" SIZE_FORMAT ") -> (" SIZE_FORMAT ") ",
384 heap->total_collections(),
385 size_before, virtual_space()->committed_size());
386 }
387 }
389 // NOTE! We need to be careful about resizing. During a GC, multiple
390 // allocators may be active during heap expansion. If we allow the
391 // heap resizing to become visible before we have correctly resized
392 // all heap related data structures, we may cause program failures.
393 void PSOldGen::post_resize() {
394 // First construct a memregion representing the new size
395 MemRegion new_memregion((HeapWord*)virtual_space()->low(),
396 (HeapWord*)virtual_space()->high());
397 size_t new_word_size = new_memregion.word_size();
399 start_array()->set_covered_region(new_memregion);
400 Universe::heap()->barrier_set()->resize_covered_region(new_memregion);
402 // ALWAYS do this last!!
403 object_space()->initialize(new_memregion,
404 SpaceDecorator::DontClear,
405 SpaceDecorator::DontMangle);
407 assert(new_word_size == heap_word_size(object_space()->capacity_in_bytes()),
408 "Sanity");
409 }
411 size_t PSOldGen::gen_size_limit() {
412 return _max_gen_size;
413 }
415 void PSOldGen::reset_after_change() {
416 ShouldNotReachHere();
417 return;
418 }
420 size_t PSOldGen::available_for_expansion() {
421 ShouldNotReachHere();
422 return 0;
423 }
425 size_t PSOldGen::available_for_contraction() {
426 ShouldNotReachHere();
427 return 0;
428 }
430 void PSOldGen::print() const { print_on(tty);}
431 void PSOldGen::print_on(outputStream* st) const {
432 st->print(" %-15s", name());
433 if (PrintGCDetails && Verbose) {
434 st->print(" total " SIZE_FORMAT ", used " SIZE_FORMAT,
435 capacity_in_bytes(), used_in_bytes());
436 } else {
437 st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
438 capacity_in_bytes()/K, used_in_bytes()/K);
439 }
440 st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
441 virtual_space()->low_boundary(),
442 virtual_space()->high(),
443 virtual_space()->high_boundary());
445 st->print(" object"); object_space()->print_on(st);
446 }
448 void PSOldGen::print_used_change(size_t prev_used) const {
449 gclog_or_tty->print(" [%s:", name());
450 gclog_or_tty->print(" " SIZE_FORMAT "K"
451 "->" SIZE_FORMAT "K"
452 "(" SIZE_FORMAT "K)",
453 prev_used / K, used_in_bytes() / K,
454 capacity_in_bytes() / K);
455 gclog_or_tty->print("]");
456 }
458 void PSOldGen::update_counters() {
459 if (UsePerfData) {
460 _space_counters->update_all();
461 _gen_counters->update_all();
462 }
463 }
465 #ifndef PRODUCT
467 void PSOldGen::space_invariants() {
468 assert(object_space()->end() == (HeapWord*) virtual_space()->high(),
469 "Space invariant");
470 assert(object_space()->bottom() == (HeapWord*) virtual_space()->low(),
471 "Space invariant");
472 assert(virtual_space()->low_boundary() <= virtual_space()->low(),
473 "Space invariant");
474 assert(virtual_space()->high_boundary() >= virtual_space()->high(),
475 "Space invariant");
476 assert(virtual_space()->low_boundary() == (char*) _reserved.start(),
477 "Space invariant");
478 assert(virtual_space()->high_boundary() == (char*) _reserved.end(),
479 "Space invariant");
480 assert(virtual_space()->committed_size() <= virtual_space()->reserved_size(),
481 "Space invariant");
482 }
483 #endif
485 void PSOldGen::verify() {
486 object_space()->verify();
487 }
488 class VerifyObjectStartArrayClosure : public ObjectClosure {
489 PSOldGen* _gen;
490 ObjectStartArray* _start_array;
492 public:
493 VerifyObjectStartArrayClosure(PSOldGen* gen, ObjectStartArray* start_array) :
494 _gen(gen), _start_array(start_array) { }
496 virtual void do_object(oop obj) {
497 HeapWord* test_addr = (HeapWord*)obj + 1;
498 guarantee(_start_array->object_start(test_addr) == (HeapWord*)obj, "ObjectStartArray cannot find start of object");
499 guarantee(_start_array->is_block_allocated((HeapWord*)obj), "ObjectStartArray missing block allocation");
500 }
501 };
503 void PSOldGen::verify_object_start_array() {
504 VerifyObjectStartArrayClosure check( this, &_start_array );
505 object_iterate(&check);
506 }
508 #ifndef PRODUCT
509 void PSOldGen::record_spaces_top() {
510 assert(ZapUnusedHeapArea, "Not mangling unused space");
511 object_space()->set_top_for_allocations();
512 }
513 #endif