Thu, 22 Sep 2011 10:57:37 -0700
6484982: G1: process references during evacuation pauses
Summary: G1 now uses two reference processors - one is used by concurrent marking and the other is used by STW GCs (both full and incremental evacuation pauses). In an evacuation pause, the reference processor is embedded into the closures used to scan objects. Doing so causes causes reference objects to be 'discovered' by the reference processor. At the end of the evacuation pause, these discovered reference objects are processed - preserving (and copying) referent objects (and their reachable graphs) as appropriate.
Reviewed-by: ysr, jwilhelm, brutisso, stefank, tonyp
1 /*
2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "classfile/systemDictionary.hpp"
27 #include "gc_implementation/parallelScavenge/objectStartArray.hpp"
28 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
29 #include "gc_implementation/parallelScavenge/psMarkSweep.hpp"
30 #include "gc_implementation/parallelScavenge/psMarkSweepDecorator.hpp"
31 #include "gc_implementation/shared/liveRange.hpp"
32 #include "gc_implementation/shared/markSweep.inline.hpp"
33 #include "gc_implementation/shared/spaceDecorator.hpp"
34 #include "oops/oop.inline.hpp"
36 PSMarkSweepDecorator* PSMarkSweepDecorator::_destination_decorator = NULL;
39 void PSMarkSweepDecorator::set_destination_decorator_tenured() {
40 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
41 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
43 _destination_decorator = heap->old_gen()->object_mark_sweep();
44 }
46 void PSMarkSweepDecorator::set_destination_decorator_perm_gen() {
47 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
48 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
50 _destination_decorator = heap->perm_gen()->object_mark_sweep();
51 }
53 void PSMarkSweepDecorator::advance_destination_decorator() {
54 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
55 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
57 assert(_destination_decorator != NULL, "Sanity");
58 guarantee(_destination_decorator != heap->perm_gen()->object_mark_sweep(), "Cannot advance perm gen decorator");
60 PSMarkSweepDecorator* first = heap->old_gen()->object_mark_sweep();
61 PSMarkSweepDecorator* second = heap->young_gen()->eden_mark_sweep();
62 PSMarkSweepDecorator* third = heap->young_gen()->from_mark_sweep();
63 PSMarkSweepDecorator* fourth = heap->young_gen()->to_mark_sweep();
65 if ( _destination_decorator == first ) {
66 _destination_decorator = second;
67 } else if ( _destination_decorator == second ) {
68 _destination_decorator = third;
69 } else if ( _destination_decorator == third ) {
70 _destination_decorator = fourth;
71 } else {
72 fatal("PSMarkSweep attempting to advance past last compaction area");
73 }
74 }
76 PSMarkSweepDecorator* PSMarkSweepDecorator::destination_decorator() {
77 assert(_destination_decorator != NULL, "Sanity");
79 return _destination_decorator;
80 }
82 // FIX ME FIX ME FIX ME FIX ME!!!!!!!!!
83 // The object forwarding code is duplicated. Factor this out!!!!!
84 //
85 // This method "precompacts" objects inside its space to dest. It places forwarding
86 // pointers into markOops for use by adjust_pointers. If "dest" should overflow, we
87 // finish by compacting into our own space.
89 void PSMarkSweepDecorator::precompact() {
90 // Reset our own compact top.
91 set_compaction_top(space()->bottom());
93 /* We allow some amount of garbage towards the bottom of the space, so
94 * we don't start compacting before there is a significant gain to be made.
95 * Occasionally, we want to ensure a full compaction, which is determined
96 * by the MarkSweepAlwaysCompactCount parameter. This is a significant
97 * performance improvement!
98 */
99 bool skip_dead = ((PSMarkSweep::total_invocations() % MarkSweepAlwaysCompactCount) != 0);
101 size_t allowed_deadspace = 0;
102 if (skip_dead) {
103 const size_t ratio = allowed_dead_ratio();
104 allowed_deadspace = space()->capacity_in_words() * ratio / 100;
105 }
107 // Fetch the current destination decorator
108 PSMarkSweepDecorator* dest = destination_decorator();
109 ObjectStartArray* start_array = dest->start_array();
111 HeapWord* compact_top = dest->compaction_top();
112 HeapWord* compact_end = dest->space()->end();
114 HeapWord* q = space()->bottom();
115 HeapWord* t = space()->top();
117 HeapWord* end_of_live= q; /* One byte beyond the last byte of the last
118 live object. */
119 HeapWord* first_dead = space()->end(); /* The first dead object. */
120 LiveRange* liveRange = NULL; /* The current live range, recorded in the
121 first header of preceding free area. */
122 _first_dead = first_dead;
124 const intx interval = PrefetchScanIntervalInBytes;
126 while (q < t) {
127 assert(oop(q)->mark()->is_marked() || oop(q)->mark()->is_unlocked() ||
128 oop(q)->mark()->has_bias_pattern(),
129 "these are the only valid states during a mark sweep");
130 if (oop(q)->is_gc_marked()) {
131 /* prefetch beyond q */
132 Prefetch::write(q, interval);
133 size_t size = oop(q)->size();
135 size_t compaction_max_size = pointer_delta(compact_end, compact_top);
137 // This should only happen if a space in the young gen overflows the
138 // old gen. If that should happen, we null out the start_array, because
139 // the young spaces are not covered by one.
140 while(size > compaction_max_size) {
141 // First record the last compact_top
142 dest->set_compaction_top(compact_top);
144 // Advance to the next compaction decorator
145 advance_destination_decorator();
146 dest = destination_decorator();
148 // Update compaction info
149 start_array = dest->start_array();
150 compact_top = dest->compaction_top();
151 compact_end = dest->space()->end();
152 assert(compact_top == dest->space()->bottom(), "Advanced to space already in use");
153 assert(compact_end > compact_top, "Must always be space remaining");
154 compaction_max_size =
155 pointer_delta(compact_end, compact_top);
156 }
158 // store the forwarding pointer into the mark word
159 if (q != compact_top) {
160 oop(q)->forward_to(oop(compact_top));
161 assert(oop(q)->is_gc_marked(), "encoding the pointer should preserve the mark");
162 } else {
163 // if the object isn't moving we can just set the mark to the default
164 // mark and handle it specially later on.
165 oop(q)->init_mark();
166 assert(oop(q)->forwardee() == NULL, "should be forwarded to NULL");
167 }
169 // Update object start array
170 if (start_array) {
171 start_array->allocate_block(compact_top);
172 }
174 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(oop(q), size));
175 compact_top += size;
176 assert(compact_top <= dest->space()->end(),
177 "Exceeding space in destination");
179 q += size;
180 end_of_live = q;
181 } else {
182 /* run over all the contiguous dead objects */
183 HeapWord* end = q;
184 do {
185 /* prefetch beyond end */
186 Prefetch::write(end, interval);
187 end += oop(end)->size();
188 } while (end < t && (!oop(end)->is_gc_marked()));
190 /* see if we might want to pretend this object is alive so that
191 * we don't have to compact quite as often.
192 */
193 if (allowed_deadspace > 0 && q == compact_top) {
194 size_t sz = pointer_delta(end, q);
195 if (insert_deadspace(allowed_deadspace, q, sz)) {
196 size_t compaction_max_size = pointer_delta(compact_end, compact_top);
198 // This should only happen if a space in the young gen overflows the
199 // old gen. If that should happen, we null out the start_array, because
200 // the young spaces are not covered by one.
201 while (sz > compaction_max_size) {
202 // First record the last compact_top
203 dest->set_compaction_top(compact_top);
205 // Advance to the next compaction decorator
206 advance_destination_decorator();
207 dest = destination_decorator();
209 // Update compaction info
210 start_array = dest->start_array();
211 compact_top = dest->compaction_top();
212 compact_end = dest->space()->end();
213 assert(compact_top == dest->space()->bottom(), "Advanced to space already in use");
214 assert(compact_end > compact_top, "Must always be space remaining");
215 compaction_max_size =
216 pointer_delta(compact_end, compact_top);
217 }
219 // store the forwarding pointer into the mark word
220 if (q != compact_top) {
221 oop(q)->forward_to(oop(compact_top));
222 assert(oop(q)->is_gc_marked(), "encoding the pointer should preserve the mark");
223 } else {
224 // if the object isn't moving we can just set the mark to the default
225 // mark and handle it specially later on.
226 oop(q)->init_mark();
227 assert(oop(q)->forwardee() == NULL, "should be forwarded to NULL");
228 }
230 // Update object start array
231 if (start_array) {
232 start_array->allocate_block(compact_top);
233 }
235 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(oop(q), sz));
236 compact_top += sz;
237 assert(compact_top <= dest->space()->end(),
238 "Exceeding space in destination");
240 q = end;
241 end_of_live = end;
242 continue;
243 }
244 }
246 /* for the previous LiveRange, record the end of the live objects. */
247 if (liveRange) {
248 liveRange->set_end(q);
249 }
251 /* record the current LiveRange object.
252 * liveRange->start() is overlaid on the mark word.
253 */
254 liveRange = (LiveRange*)q;
255 liveRange->set_start(end);
256 liveRange->set_end(end);
258 /* see if this is the first dead region. */
259 if (q < first_dead) {
260 first_dead = q;
261 }
263 /* move on to the next object */
264 q = end;
265 }
266 }
268 assert(q == t, "just checking");
269 if (liveRange != NULL) {
270 liveRange->set_end(q);
271 }
272 _end_of_live = end_of_live;
273 if (end_of_live < first_dead) {
274 first_dead = end_of_live;
275 }
276 _first_dead = first_dead;
278 // Update compaction top
279 dest->set_compaction_top(compact_top);
280 }
282 bool PSMarkSweepDecorator::insert_deadspace(size_t& allowed_deadspace_words,
283 HeapWord* q, size_t deadlength) {
284 if (allowed_deadspace_words >= deadlength) {
285 allowed_deadspace_words -= deadlength;
286 CollectedHeap::fill_with_object(q, deadlength);
287 oop(q)->set_mark(oop(q)->mark()->set_marked());
288 assert((int) deadlength == oop(q)->size(), "bad filler object size");
289 // Recall that we required "q == compaction_top".
290 return true;
291 } else {
292 allowed_deadspace_words = 0;
293 return false;
294 }
295 }
297 void PSMarkSweepDecorator::adjust_pointers() {
298 // adjust all the interior pointers to point at the new locations of objects
299 // Used by MarkSweep::mark_sweep_phase3()
301 HeapWord* q = space()->bottom();
302 HeapWord* t = _end_of_live; // Established by "prepare_for_compaction".
304 assert(_first_dead <= _end_of_live, "Stands to reason, no?");
306 if (q < t && _first_dead > q &&
307 !oop(q)->is_gc_marked()) {
308 // we have a chunk of the space which hasn't moved and we've
309 // reinitialized the mark word during the previous pass, so we can't
310 // use is_gc_marked for the traversal.
311 HeapWord* end = _first_dead;
313 while (q < end) {
314 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q)));
315 // point all the oops to the new location
316 size_t size = oop(q)->adjust_pointers();
317 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers());
318 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size));
319 q += size;
320 }
322 if (_first_dead == t) {
323 q = t;
324 } else {
325 // $$$ This is funky. Using this to read the previously written
326 // LiveRange. See also use below.
327 q = (HeapWord*)oop(_first_dead)->mark()->decode_pointer();
328 }
329 }
330 const intx interval = PrefetchScanIntervalInBytes;
332 debug_only(HeapWord* prev_q = NULL);
333 while (q < t) {
334 // prefetch beyond q
335 Prefetch::write(q, interval);
336 if (oop(q)->is_gc_marked()) {
337 // q is alive
338 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q)));
339 // point all the oops to the new location
340 size_t size = oop(q)->adjust_pointers();
341 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers());
342 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size));
343 debug_only(prev_q = q);
344 q += size;
345 } else {
346 // q is not a live object, so its mark should point at the next
347 // live object
348 debug_only(prev_q = q);
349 q = (HeapWord*) oop(q)->mark()->decode_pointer();
350 assert(q > prev_q, "we should be moving forward through memory");
351 }
352 }
354 assert(q == t, "just checking");
355 }
357 void PSMarkSweepDecorator::compact(bool mangle_free_space ) {
358 // Copy all live objects to their new location
359 // Used by MarkSweep::mark_sweep_phase4()
361 HeapWord* q = space()->bottom();
362 HeapWord* const t = _end_of_live;
363 debug_only(HeapWord* prev_q = NULL);
365 if (q < t && _first_dead > q &&
366 !oop(q)->is_gc_marked()) {
367 #ifdef ASSERT
368 // we have a chunk of the space which hasn't moved and we've reinitialized the
369 // mark word during the previous pass, so we can't use is_gc_marked for the
370 // traversal.
371 HeapWord* const end = _first_dead;
373 while (q < end) {
374 size_t size = oop(q)->size();
375 assert(!oop(q)->is_gc_marked(), "should be unmarked (special dense prefix handling)");
376 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::live_oop_moved_to(q, size, q));
377 debug_only(prev_q = q);
378 q += size;
379 }
380 #endif
382 if (_first_dead == t) {
383 q = t;
384 } else {
385 // $$$ Funky
386 q = (HeapWord*) oop(_first_dead)->mark()->decode_pointer();
387 }
388 }
390 const intx scan_interval = PrefetchScanIntervalInBytes;
391 const intx copy_interval = PrefetchCopyIntervalInBytes;
393 while (q < t) {
394 if (!oop(q)->is_gc_marked()) {
395 // mark is pointer to next marked oop
396 debug_only(prev_q = q);
397 q = (HeapWord*) oop(q)->mark()->decode_pointer();
398 assert(q > prev_q, "we should be moving forward through memory");
399 } else {
400 // prefetch beyond q
401 Prefetch::read(q, scan_interval);
403 // size and destination
404 size_t size = oop(q)->size();
405 HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee();
407 // prefetch beyond compaction_top
408 Prefetch::write(compaction_top, copy_interval);
410 // copy object and reinit its mark
411 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::live_oop_moved_to(q, size, compaction_top));
412 assert(q != compaction_top, "everything in this pass should be moving");
413 Copy::aligned_conjoint_words(q, compaction_top, size);
414 oop(compaction_top)->init_mark();
415 assert(oop(compaction_top)->klass() != NULL, "should have a class");
417 debug_only(prev_q = q);
418 q += size;
419 }
420 }
422 assert(compaction_top() >= space()->bottom() && compaction_top() <= space()->end(),
423 "should point inside space");
424 space()->set_top(compaction_top());
426 if (mangle_free_space) {
427 space()->mangle_unused_area();
428 }
429 }