Thu, 22 Sep 2011 10:57:37 -0700
6484982: G1: process references during evacuation pauses
Summary: G1 now uses two reference processors - one is used by concurrent marking and the other is used by STW GCs (both full and incremental evacuation pauses). In an evacuation pause, the reference processor is embedded into the closures used to scan objects. Doing so causes causes reference objects to be 'discovered' by the reference processor. At the end of the evacuation pause, these discovered reference objects are processed - preserving (and copying) referent objects (and their reachable graphs) as appropriate.
Reviewed-by: ysr, jwilhelm, brutisso, stefank, tonyp
1 /*
2 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "gc_implementation/parallelScavenge/adjoiningGenerations.hpp"
27 #include "gc_implementation/parallelScavenge/adjoiningVirtualSpaces.hpp"
28 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
29 #include "gc_implementation/parallelScavenge/psPermGen.hpp"
31 // If boundary moving is being used, create the young gen and old
32 // gen with ASPSYoungGen and ASPSOldGen, respectively. Revert to
33 // the old behavior otherwise (with PSYoungGen and PSOldGen).
35 AdjoiningGenerations::AdjoiningGenerations(ReservedSpace old_young_rs,
36 size_t init_low_byte_size,
37 size_t min_low_byte_size,
38 size_t max_low_byte_size,
39 size_t init_high_byte_size,
40 size_t min_high_byte_size,
41 size_t max_high_byte_size,
42 size_t alignment) :
43 _virtual_spaces(old_young_rs, min_low_byte_size,
44 min_high_byte_size, alignment) {
45 assert(min_low_byte_size <= init_low_byte_size &&
46 init_low_byte_size <= max_low_byte_size, "Parameter check");
47 assert(min_high_byte_size <= init_high_byte_size &&
48 init_high_byte_size <= max_high_byte_size, "Parameter check");
49 // Create the generations differently based on the option to
50 // move the boundary.
51 if (UseAdaptiveGCBoundary) {
52 // Initialize the adjoining virtual spaces. Then pass the
53 // a virtual to each generation for initialization of the
54 // generation.
56 // Does the actual creation of the virtual spaces
57 _virtual_spaces.initialize(max_low_byte_size,
58 init_low_byte_size,
59 init_high_byte_size);
61 // Place the young gen at the high end. Passes in the virtual space.
62 _young_gen = new ASPSYoungGen(_virtual_spaces.high(),
63 _virtual_spaces.high()->committed_size(),
64 min_high_byte_size,
65 _virtual_spaces.high_byte_size_limit());
67 // Place the old gen at the low end. Passes in the virtual space.
68 _old_gen = new ASPSOldGen(_virtual_spaces.low(),
69 _virtual_spaces.low()->committed_size(),
70 min_low_byte_size,
71 _virtual_spaces.low_byte_size_limit(),
72 "old", 1);
74 young_gen()->initialize_work();
75 assert(young_gen()->reserved().byte_size() <= young_gen()->gen_size_limit(),
76 "Consistency check");
77 assert(old_young_rs.size() >= young_gen()->gen_size_limit(),
78 "Consistency check");
80 old_gen()->initialize_work("old", 1);
81 assert(old_gen()->reserved().byte_size() <= old_gen()->gen_size_limit(),
82 "Consistency check");
83 assert(old_young_rs.size() >= old_gen()->gen_size_limit(),
84 "Consistency check");
85 } else {
87 // Layout the reserved space for the generations.
88 ReservedSpace old_rs =
89 virtual_spaces()->reserved_space().first_part(max_low_byte_size);
90 ReservedSpace heap_rs =
91 virtual_spaces()->reserved_space().last_part(max_low_byte_size);
92 ReservedSpace young_rs = heap_rs.first_part(max_high_byte_size);
93 assert(young_rs.size() == heap_rs.size(), "Didn't reserve all of the heap");
95 // Create the generations. Virtual spaces are not passed in.
96 _young_gen = new PSYoungGen(init_high_byte_size,
97 min_high_byte_size,
98 max_high_byte_size);
99 _old_gen = new PSOldGen(init_low_byte_size,
100 min_low_byte_size,
101 max_low_byte_size,
102 "old", 1);
104 // The virtual spaces are created by the initialization of the gens.
105 _young_gen->initialize(young_rs, alignment);
106 assert(young_gen()->gen_size_limit() == young_rs.size(),
107 "Consistency check");
108 _old_gen->initialize(old_rs, alignment, "old", 1);
109 assert(old_gen()->gen_size_limit() == old_rs.size(), "Consistency check");
110 }
111 }
113 size_t AdjoiningGenerations::reserved_byte_size() {
114 return virtual_spaces()->reserved_space().size();
115 }
118 // Make checks on the current sizes of the generations and
119 // the contraints on the sizes of the generations. Push
120 // up the boundary within the contraints. A partial
121 // push can occur.
122 void AdjoiningGenerations::request_old_gen_expansion(size_t expand_in_bytes) {
123 assert(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary, "runtime check");
125 assert_lock_strong(ExpandHeap_lock);
126 assert_locked_or_safepoint(Heap_lock);
128 // These sizes limit the amount the boundaries can move. Effectively,
129 // the generation says how much it is willing to yield to the other
130 // generation.
131 const size_t young_gen_available = young_gen()->available_for_contraction();
132 const size_t old_gen_available = old_gen()->available_for_expansion();
133 const size_t alignment = virtual_spaces()->alignment();
134 size_t change_in_bytes = MIN3(young_gen_available,
135 old_gen_available,
136 align_size_up_(expand_in_bytes, alignment));
138 if (change_in_bytes == 0) {
139 return;
140 }
142 if (TraceAdaptiveGCBoundary) {
143 gclog_or_tty->print_cr("Before expansion of old gen with boundary move");
144 gclog_or_tty->print_cr(" Requested change: 0x%x Attempted change: 0x%x",
145 expand_in_bytes, change_in_bytes);
146 if (!PrintHeapAtGC) {
147 Universe::print_on(gclog_or_tty);
148 }
149 gclog_or_tty->print_cr(" PSOldGen max size: " SIZE_FORMAT "K",
150 old_gen()->max_gen_size()/K);
151 }
153 // Move the boundary between the generations up (smaller young gen).
154 if (virtual_spaces()->adjust_boundary_up(change_in_bytes)) {
155 young_gen()->reset_after_change();
156 old_gen()->reset_after_change();
157 }
159 // The total reserved for the generations should match the sum
160 // of the two even if the boundary is moving.
161 assert(reserved_byte_size() ==
162 old_gen()->max_gen_size() + young_gen()->max_size(),
163 "Space is missing");
164 young_gen()->space_invariants();
165 old_gen()->space_invariants();
167 if (TraceAdaptiveGCBoundary) {
168 gclog_or_tty->print_cr("After expansion of old gen with boundary move");
169 if (!PrintHeapAtGC) {
170 Universe::print_on(gclog_or_tty);
171 }
172 gclog_or_tty->print_cr(" PSOldGen max size: " SIZE_FORMAT "K",
173 old_gen()->max_gen_size()/K);
174 }
175 }
177 // See comments on request_old_gen_expansion()
178 bool AdjoiningGenerations::request_young_gen_expansion(size_t expand_in_bytes) {
179 assert(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary, "runtime check");
181 // If eden is not empty, the boundary can be moved but no advantage
182 // can be made of the move since eden cannot be moved.
183 if (!young_gen()->eden_space()->is_empty()) {
184 return false;
185 }
188 bool result = false;
189 const size_t young_gen_available = young_gen()->available_for_expansion();
190 const size_t old_gen_available = old_gen()->available_for_contraction();
191 const size_t alignment = virtual_spaces()->alignment();
192 size_t change_in_bytes = MIN3(young_gen_available,
193 old_gen_available,
194 align_size_up_(expand_in_bytes, alignment));
196 if (change_in_bytes == 0) {
197 return false;
198 }
200 if (TraceAdaptiveGCBoundary) {
201 gclog_or_tty->print_cr("Before expansion of young gen with boundary move");
202 gclog_or_tty->print_cr(" Requested change: 0x%x Attempted change: 0x%x",
203 expand_in_bytes, change_in_bytes);
204 if (!PrintHeapAtGC) {
205 Universe::print_on(gclog_or_tty);
206 }
207 gclog_or_tty->print_cr(" PSYoungGen max size: " SIZE_FORMAT "K",
208 young_gen()->max_size()/K);
209 }
211 // Move the boundary between the generations down (smaller old gen).
212 MutexLocker x(ExpandHeap_lock);
213 if (virtual_spaces()->adjust_boundary_down(change_in_bytes)) {
214 young_gen()->reset_after_change();
215 old_gen()->reset_after_change();
216 result = true;
217 }
219 // The total reserved for the generations should match the sum
220 // of the two even if the boundary is moving.
221 assert(reserved_byte_size() ==
222 old_gen()->max_gen_size() + young_gen()->max_size(),
223 "Space is missing");
224 young_gen()->space_invariants();
225 old_gen()->space_invariants();
227 if (TraceAdaptiveGCBoundary) {
228 gclog_or_tty->print_cr("After expansion of young gen with boundary move");
229 if (!PrintHeapAtGC) {
230 Universe::print_on(gclog_or_tty);
231 }
232 gclog_or_tty->print_cr(" PSYoungGen max size: " SIZE_FORMAT "K",
233 young_gen()->max_size()/K);
234 }
236 return result;
237 }
239 // Additional space is needed in the old generation. Try to move the boundary
240 // up to meet the need. Moves boundary up only
241 void AdjoiningGenerations::adjust_boundary_for_old_gen_needs(
242 size_t desired_free_space) {
243 assert(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary, "runtime check");
245 // Stress testing.
246 if (PSAdaptiveSizePolicyResizeVirtualSpaceAlot == 1) {
247 MutexLocker x(ExpandHeap_lock);
248 request_old_gen_expansion(virtual_spaces()->alignment() * 3 / 2);
249 }
251 // Expand only if the entire generation is already committed.
252 if (old_gen()->virtual_space()->uncommitted_size() == 0) {
253 if (old_gen()->free_in_bytes() < desired_free_space) {
254 MutexLocker x(ExpandHeap_lock);
255 request_old_gen_expansion(desired_free_space);
256 }
257 }
258 }
260 // See comment on adjust_boundary_for_old_gen_needss().
261 // Adjust boundary down only.
262 void AdjoiningGenerations::adjust_boundary_for_young_gen_needs(size_t eden_size,
263 size_t survivor_size) {
265 assert(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary, "runtime check");
267 // Stress testing.
268 if (PSAdaptiveSizePolicyResizeVirtualSpaceAlot == 0) {
269 request_young_gen_expansion(virtual_spaces()->alignment() * 3 / 2);
270 eden_size = young_gen()->eden_space()->capacity_in_bytes();
271 }
273 // Expand only if the entire generation is already committed.
274 if (young_gen()->virtual_space()->uncommitted_size() == 0) {
275 size_t desired_size = eden_size + 2 * survivor_size;
276 const size_t committed = young_gen()->virtual_space()->committed_size();
277 if (desired_size > committed) {
278 request_young_gen_expansion(desired_size - committed);
279 }
280 }
281 }