Thu, 22 Sep 2011 10:57:37 -0700
6484982: G1: process references during evacuation pauses
Summary: G1 now uses two reference processors - one is used by concurrent marking and the other is used by STW GCs (both full and incremental evacuation pauses). In an evacuation pause, the reference processor is embedded into the closures used to scan objects. Doing so causes causes reference objects to be 'discovered' by the reference processor. At the end of the evacuation pause, these discovered reference objects are processed - preserving (and copying) referent objects (and their reachable graphs) as appropriate.
Reviewed-by: ysr, jwilhelm, brutisso, stefank, tonyp
1 /*
2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
27 #include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp"
28 #include "gc_implementation/parallelScavenge/psMarkSweepDecorator.hpp"
29 #include "gc_implementation/parallelScavenge/psOldGen.hpp"
30 #include "gc_implementation/shared/spaceDecorator.hpp"
31 #include "memory/cardTableModRefBS.hpp"
32 #include "memory/gcLocker.inline.hpp"
33 #include "oops/oop.inline.hpp"
34 #include "runtime/java.hpp"
36 inline const char* PSOldGen::select_name() {
37 return UseParallelOldGC ? "ParOldGen" : "PSOldGen";
38 }
40 PSOldGen::PSOldGen(ReservedSpace rs, size_t alignment,
41 size_t initial_size, size_t min_size, size_t max_size,
42 const char* perf_data_name, int level):
43 _name(select_name()), _init_gen_size(initial_size), _min_gen_size(min_size),
44 _max_gen_size(max_size)
45 {
46 initialize(rs, alignment, perf_data_name, level);
47 }
49 PSOldGen::PSOldGen(size_t initial_size,
50 size_t min_size, size_t max_size,
51 const char* perf_data_name, int level):
52 _name(select_name()), _init_gen_size(initial_size), _min_gen_size(min_size),
53 _max_gen_size(max_size)
54 {}
56 void PSOldGen::initialize(ReservedSpace rs, size_t alignment,
57 const char* perf_data_name, int level) {
58 initialize_virtual_space(rs, alignment);
59 initialize_work(perf_data_name, level);
60 // The old gen can grow to gen_size_limit(). _reserve reflects only
61 // the current maximum that can be committed.
62 assert(_reserved.byte_size() <= gen_size_limit(), "Consistency check");
63 }
65 void PSOldGen::initialize_virtual_space(ReservedSpace rs, size_t alignment) {
67 _virtual_space = new PSVirtualSpace(rs, alignment);
68 if (!_virtual_space->expand_by(_init_gen_size)) {
69 vm_exit_during_initialization("Could not reserve enough space for "
70 "object heap");
71 }
72 }
74 void PSOldGen::initialize_work(const char* perf_data_name, int level) {
75 //
76 // Basic memory initialization
77 //
79 MemRegion limit_reserved((HeapWord*)virtual_space()->low_boundary(),
80 heap_word_size(_max_gen_size));
81 assert(limit_reserved.byte_size() == _max_gen_size,
82 "word vs bytes confusion");
83 //
84 // Object start stuff
85 //
87 start_array()->initialize(limit_reserved);
89 _reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(),
90 (HeapWord*)virtual_space()->high_boundary());
92 //
93 // Card table stuff
94 //
96 MemRegion cmr((HeapWord*)virtual_space()->low(),
97 (HeapWord*)virtual_space()->high());
98 if (ZapUnusedHeapArea) {
99 // Mangle newly committed space immediately rather than
100 // waiting for the initialization of the space even though
101 // mangling is related to spaces. Doing it here eliminates
102 // the need to carry along information that a complete mangling
103 // (bottom to end) needs to be done.
104 SpaceMangler::mangle_region(cmr);
105 }
107 Universe::heap()->barrier_set()->resize_covered_region(cmr);
109 CardTableModRefBS* _ct = (CardTableModRefBS*)Universe::heap()->barrier_set();
110 assert (_ct->kind() == BarrierSet::CardTableModRef, "Sanity");
112 // Verify that the start and end of this generation is the start of a card.
113 // If this wasn't true, a single card could span more than one generation,
114 // which would cause problems when we commit/uncommit memory, and when we
115 // clear and dirty cards.
116 guarantee(_ct->is_card_aligned(_reserved.start()), "generation must be card aligned");
117 if (_reserved.end() != Universe::heap()->reserved_region().end()) {
118 // Don't check at the very end of the heap as we'll assert that we're probing off
119 // the end if we try.
120 guarantee(_ct->is_card_aligned(_reserved.end()), "generation must be card aligned");
121 }
123 //
124 // ObjectSpace stuff
125 //
127 _object_space = new MutableSpace(virtual_space()->alignment());
129 if (_object_space == NULL)
130 vm_exit_during_initialization("Could not allocate an old gen space");
132 object_space()->initialize(cmr,
133 SpaceDecorator::Clear,
134 SpaceDecorator::Mangle);
136 _object_mark_sweep = new PSMarkSweepDecorator(_object_space, start_array(), MarkSweepDeadRatio);
138 if (_object_mark_sweep == NULL)
139 vm_exit_during_initialization("Could not complete allocation of old generation");
141 // Update the start_array
142 start_array()->set_covered_region(cmr);
144 // Generation Counters, generation 'level', 1 subspace
145 _gen_counters = new PSGenerationCounters(perf_data_name, level, 1,
146 virtual_space());
147 _space_counters = new SpaceCounters(perf_data_name, 0,
148 virtual_space()->reserved_size(),
149 _object_space, _gen_counters);
150 }
152 // Assume that the generation has been allocated if its
153 // reserved size is not 0.
154 bool PSOldGen::is_allocated() {
155 return virtual_space()->reserved_size() != 0;
156 }
158 void PSOldGen::precompact() {
159 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
160 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
162 // Reset start array first.
163 start_array()->reset();
165 object_mark_sweep()->precompact();
167 // Now compact the young gen
168 heap->young_gen()->precompact();
169 }
171 void PSOldGen::adjust_pointers() {
172 object_mark_sweep()->adjust_pointers();
173 }
175 void PSOldGen::compact() {
176 object_mark_sweep()->compact(ZapUnusedHeapArea);
177 }
179 size_t PSOldGen::contiguous_available() const {
180 return object_space()->free_in_bytes() + virtual_space()->uncommitted_size();
181 }
183 // Allocation. We report all successful allocations to the size policy
184 // Note that the perm gen does not use this method, and should not!
185 HeapWord* PSOldGen::allocate(size_t word_size) {
186 assert_locked_or_safepoint(Heap_lock);
187 HeapWord* res = allocate_noexpand(word_size);
189 if (res == NULL) {
190 res = expand_and_allocate(word_size);
191 }
193 // Allocations in the old generation need to be reported
194 if (res != NULL) {
195 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
196 heap->size_policy()->tenured_allocation(word_size);
197 }
199 return res;
200 }
202 HeapWord* PSOldGen::expand_and_allocate(size_t word_size) {
203 expand(word_size*HeapWordSize);
204 if (GCExpandToAllocateDelayMillis > 0) {
205 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
206 }
207 return allocate_noexpand(word_size);
208 }
210 HeapWord* PSOldGen::expand_and_cas_allocate(size_t word_size) {
211 expand(word_size*HeapWordSize);
212 if (GCExpandToAllocateDelayMillis > 0) {
213 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
214 }
215 return cas_allocate_noexpand(word_size);
216 }
218 void PSOldGen::expand(size_t bytes) {
219 if (bytes == 0) {
220 return;
221 }
222 MutexLocker x(ExpandHeap_lock);
223 const size_t alignment = virtual_space()->alignment();
224 size_t aligned_bytes = align_size_up(bytes, alignment);
225 size_t aligned_expand_bytes = align_size_up(MinHeapDeltaBytes, alignment);
227 if (UseNUMA) {
228 // With NUMA we use round-robin page allocation for the old gen. Expand by at least
229 // providing a page per lgroup. Alignment is larger or equal to the page size.
230 aligned_expand_bytes = MAX2(aligned_expand_bytes, alignment * os::numa_get_groups_num());
231 }
232 if (aligned_bytes == 0){
233 // The alignment caused the number of bytes to wrap. An expand_by(0) will
234 // return true with the implication that and expansion was done when it
235 // was not. A call to expand implies a best effort to expand by "bytes"
236 // but not a guarantee. Align down to give a best effort. This is likely
237 // the most that the generation can expand since it has some capacity to
238 // start with.
239 aligned_bytes = align_size_down(bytes, alignment);
240 }
242 bool success = false;
243 if (aligned_expand_bytes > aligned_bytes) {
244 success = expand_by(aligned_expand_bytes);
245 }
246 if (!success) {
247 success = expand_by(aligned_bytes);
248 }
249 if (!success) {
250 success = expand_to_reserved();
251 }
253 if (PrintGC && Verbose) {
254 if (success && GC_locker::is_active()) {
255 gclog_or_tty->print_cr("Garbage collection disabled, expanded heap instead");
256 }
257 }
258 }
260 bool PSOldGen::expand_by(size_t bytes) {
261 assert_lock_strong(ExpandHeap_lock);
262 assert_locked_or_safepoint(Heap_lock);
263 if (bytes == 0) {
264 return true; // That's what virtual_space()->expand_by(0) would return
265 }
266 bool result = virtual_space()->expand_by(bytes);
267 if (result) {
268 if (ZapUnusedHeapArea) {
269 // We need to mangle the newly expanded area. The memregion spans
270 // end -> new_end, we assume that top -> end is already mangled.
271 // Do the mangling before post_resize() is called because
272 // the space is available for allocation after post_resize();
273 HeapWord* const virtual_space_high = (HeapWord*) virtual_space()->high();
274 assert(object_space()->end() < virtual_space_high,
275 "Should be true before post_resize()");
276 MemRegion mangle_region(object_space()->end(), virtual_space_high);
277 // Note that the object space has not yet been updated to
278 // coincede with the new underlying virtual space.
279 SpaceMangler::mangle_region(mangle_region);
280 }
281 post_resize();
282 if (UsePerfData) {
283 _space_counters->update_capacity();
284 _gen_counters->update_all();
285 }
286 }
288 if (result && Verbose && PrintGC) {
289 size_t new_mem_size = virtual_space()->committed_size();
290 size_t old_mem_size = new_mem_size - bytes;
291 gclog_or_tty->print_cr("Expanding %s from " SIZE_FORMAT "K by "
292 SIZE_FORMAT "K to "
293 SIZE_FORMAT "K",
294 name(), old_mem_size/K, bytes/K, new_mem_size/K);
295 }
297 return result;
298 }
300 bool PSOldGen::expand_to_reserved() {
301 assert_lock_strong(ExpandHeap_lock);
302 assert_locked_or_safepoint(Heap_lock);
304 bool result = true;
305 const size_t remaining_bytes = virtual_space()->uncommitted_size();
306 if (remaining_bytes > 0) {
307 result = expand_by(remaining_bytes);
308 DEBUG_ONLY(if (!result) warning("grow to reserve failed"));
309 }
310 return result;
311 }
313 void PSOldGen::shrink(size_t bytes) {
314 assert_lock_strong(ExpandHeap_lock);
315 assert_locked_or_safepoint(Heap_lock);
317 size_t size = align_size_down(bytes, virtual_space()->alignment());
318 if (size > 0) {
319 assert_lock_strong(ExpandHeap_lock);
320 virtual_space()->shrink_by(bytes);
321 post_resize();
323 if (Verbose && PrintGC) {
324 size_t new_mem_size = virtual_space()->committed_size();
325 size_t old_mem_size = new_mem_size + bytes;
326 gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K by "
327 SIZE_FORMAT "K to "
328 SIZE_FORMAT "K",
329 name(), old_mem_size/K, bytes/K, new_mem_size/K);
330 }
331 }
332 }
334 void PSOldGen::resize(size_t desired_free_space) {
335 const size_t alignment = virtual_space()->alignment();
336 const size_t size_before = virtual_space()->committed_size();
337 size_t new_size = used_in_bytes() + desired_free_space;
338 if (new_size < used_in_bytes()) {
339 // Overflowed the addition.
340 new_size = gen_size_limit();
341 }
342 // Adjust according to our min and max
343 new_size = MAX2(MIN2(new_size, gen_size_limit()), min_gen_size());
345 assert(gen_size_limit() >= reserved().byte_size(), "max new size problem?");
346 new_size = align_size_up(new_size, alignment);
348 const size_t current_size = capacity_in_bytes();
350 if (PrintAdaptiveSizePolicy && Verbose) {
351 gclog_or_tty->print_cr("AdaptiveSizePolicy::old generation size: "
352 "desired free: " SIZE_FORMAT " used: " SIZE_FORMAT
353 " new size: " SIZE_FORMAT " current size " SIZE_FORMAT
354 " gen limits: " SIZE_FORMAT " / " SIZE_FORMAT,
355 desired_free_space, used_in_bytes(), new_size, current_size,
356 gen_size_limit(), min_gen_size());
357 }
359 if (new_size == current_size) {
360 // No change requested
361 return;
362 }
363 if (new_size > current_size) {
364 size_t change_bytes = new_size - current_size;
365 expand(change_bytes);
366 } else {
367 size_t change_bytes = current_size - new_size;
368 // shrink doesn't grab this lock, expand does. Is that right?
369 MutexLocker x(ExpandHeap_lock);
370 shrink(change_bytes);
371 }
373 if (PrintAdaptiveSizePolicy) {
374 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
375 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
376 gclog_or_tty->print_cr("AdaptiveSizePolicy::old generation size: "
377 "collection: %d "
378 "(" SIZE_FORMAT ") -> (" SIZE_FORMAT ") ",
379 heap->total_collections(),
380 size_before, virtual_space()->committed_size());
381 }
382 }
384 // NOTE! We need to be careful about resizing. During a GC, multiple
385 // allocators may be active during heap expansion. If we allow the
386 // heap resizing to become visible before we have correctly resized
387 // all heap related data structures, we may cause program failures.
388 void PSOldGen::post_resize() {
389 // First construct a memregion representing the new size
390 MemRegion new_memregion((HeapWord*)virtual_space()->low(),
391 (HeapWord*)virtual_space()->high());
392 size_t new_word_size = new_memregion.word_size();
394 start_array()->set_covered_region(new_memregion);
395 Universe::heap()->barrier_set()->resize_covered_region(new_memregion);
397 // ALWAYS do this last!!
398 object_space()->initialize(new_memregion,
399 SpaceDecorator::DontClear,
400 SpaceDecorator::DontMangle);
402 assert(new_word_size == heap_word_size(object_space()->capacity_in_bytes()),
403 "Sanity");
404 }
406 size_t PSOldGen::gen_size_limit() {
407 return _max_gen_size;
408 }
410 void PSOldGen::reset_after_change() {
411 ShouldNotReachHere();
412 return;
413 }
415 size_t PSOldGen::available_for_expansion() {
416 ShouldNotReachHere();
417 return 0;
418 }
420 size_t PSOldGen::available_for_contraction() {
421 ShouldNotReachHere();
422 return 0;
423 }
425 void PSOldGen::print() const { print_on(tty);}
426 void PSOldGen::print_on(outputStream* st) const {
427 st->print(" %-15s", name());
428 if (PrintGCDetails && Verbose) {
429 st->print(" total " SIZE_FORMAT ", used " SIZE_FORMAT,
430 capacity_in_bytes(), used_in_bytes());
431 } else {
432 st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
433 capacity_in_bytes()/K, used_in_bytes()/K);
434 }
435 st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
436 virtual_space()->low_boundary(),
437 virtual_space()->high(),
438 virtual_space()->high_boundary());
440 st->print(" object"); object_space()->print_on(st);
441 }
443 void PSOldGen::print_used_change(size_t prev_used) const {
444 gclog_or_tty->print(" [%s:", name());
445 gclog_or_tty->print(" " SIZE_FORMAT "K"
446 "->" SIZE_FORMAT "K"
447 "(" SIZE_FORMAT "K)",
448 prev_used / K, used_in_bytes() / K,
449 capacity_in_bytes() / K);
450 gclog_or_tty->print("]");
451 }
453 void PSOldGen::update_counters() {
454 if (UsePerfData) {
455 _space_counters->update_all();
456 _gen_counters->update_all();
457 }
458 }
460 #ifndef PRODUCT
462 void PSOldGen::space_invariants() {
463 assert(object_space()->end() == (HeapWord*) virtual_space()->high(),
464 "Space invariant");
465 assert(object_space()->bottom() == (HeapWord*) virtual_space()->low(),
466 "Space invariant");
467 assert(virtual_space()->low_boundary() <= virtual_space()->low(),
468 "Space invariant");
469 assert(virtual_space()->high_boundary() >= virtual_space()->high(),
470 "Space invariant");
471 assert(virtual_space()->low_boundary() == (char*) _reserved.start(),
472 "Space invariant");
473 assert(virtual_space()->high_boundary() == (char*) _reserved.end(),
474 "Space invariant");
475 assert(virtual_space()->committed_size() <= virtual_space()->reserved_size(),
476 "Space invariant");
477 }
478 #endif
480 void PSOldGen::verify(bool allow_dirty) {
481 object_space()->verify(allow_dirty);
482 }
483 class VerifyObjectStartArrayClosure : public ObjectClosure {
484 PSOldGen* _gen;
485 ObjectStartArray* _start_array;
487 public:
488 VerifyObjectStartArrayClosure(PSOldGen* gen, ObjectStartArray* start_array) :
489 _gen(gen), _start_array(start_array) { }
491 virtual void do_object(oop obj) {
492 HeapWord* test_addr = (HeapWord*)obj + 1;
493 guarantee(_start_array->object_start(test_addr) == (HeapWord*)obj, "ObjectStartArray cannot find start of object");
494 guarantee(_start_array->is_block_allocated((HeapWord*)obj), "ObjectStartArray missing block allocation");
495 }
496 };
498 void PSOldGen::verify_object_start_array() {
499 VerifyObjectStartArrayClosure check( this, &_start_array );
500 object_iterate(&check);
501 }
503 #ifndef PRODUCT
504 void PSOldGen::record_spaces_top() {
505 assert(ZapUnusedHeapArea, "Not mangling unused space");
506 object_space()->set_top_for_allocations();
507 }
508 #endif