Wed, 09 Jul 2008 15:08:55 -0700
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
Summary: Maintain a high water mark for the allocations in a space and mangle only up to that high water mark.
Reviewed-by: ysr, apetrusenko
1 /*
2 * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 # include "incls/_precompiled.incl"
26 # include "incls/_psMarkSweepDecorator.cpp.incl"
28 PSMarkSweepDecorator* PSMarkSweepDecorator::_destination_decorator = NULL;
31 void PSMarkSweepDecorator::set_destination_decorator_tenured() {
32 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
33 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
35 _destination_decorator = heap->old_gen()->object_mark_sweep();
36 }
38 void PSMarkSweepDecorator::set_destination_decorator_perm_gen() {
39 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
40 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
42 _destination_decorator = heap->perm_gen()->object_mark_sweep();
43 }
45 void PSMarkSweepDecorator::advance_destination_decorator() {
46 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
47 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
49 assert(_destination_decorator != NULL, "Sanity");
50 guarantee(_destination_decorator != heap->perm_gen()->object_mark_sweep(), "Cannot advance perm gen decorator");
52 PSMarkSweepDecorator* first = heap->old_gen()->object_mark_sweep();
53 PSMarkSweepDecorator* second = heap->young_gen()->eden_mark_sweep();
54 PSMarkSweepDecorator* third = heap->young_gen()->from_mark_sweep();
55 PSMarkSweepDecorator* fourth = heap->young_gen()->to_mark_sweep();
57 if ( _destination_decorator == first ) {
58 _destination_decorator = second;
59 } else if ( _destination_decorator == second ) {
60 _destination_decorator = third;
61 } else if ( _destination_decorator == third ) {
62 _destination_decorator = fourth;
63 } else {
64 fatal("PSMarkSweep attempting to advance past last compaction area");
65 }
66 }
68 PSMarkSweepDecorator* PSMarkSweepDecorator::destination_decorator() {
69 assert(_destination_decorator != NULL, "Sanity");
71 return _destination_decorator;
72 }
74 // FIX ME FIX ME FIX ME FIX ME!!!!!!!!!
75 // The object forwarding code is duplicated. Factor this out!!!!!
76 //
77 // This method "precompacts" objects inside its space to dest. It places forwarding
78 // pointers into markOops for use by adjust_pointers. If "dest" should overflow, we
79 // finish by compacting into our own space.
81 void PSMarkSweepDecorator::precompact() {
82 // Reset our own compact top.
83 set_compaction_top(space()->bottom());
85 /* We allow some amount of garbage towards the bottom of the space, so
86 * we don't start compacting before there is a significant gain to be made.
87 * Occasionally, we want to ensure a full compaction, which is determined
88 * by the MarkSweepAlwaysCompactCount parameter. This is a significant
89 * performance improvement!
90 */
91 bool skip_dead = ((PSMarkSweep::total_invocations() % MarkSweepAlwaysCompactCount) != 0);
93 ssize_t allowed_deadspace = 0;
94 if (skip_dead) {
95 int ratio = allowed_dead_ratio();
96 allowed_deadspace = (space()->capacity_in_bytes() * ratio / 100) / HeapWordSize;
97 }
99 // Fetch the current destination decorator
100 PSMarkSweepDecorator* dest = destination_decorator();
101 ObjectStartArray* start_array = dest->start_array();
103 HeapWord* compact_top = dest->compaction_top();
104 HeapWord* compact_end = dest->space()->end();
106 HeapWord* q = space()->bottom();
107 HeapWord* t = space()->top();
109 HeapWord* end_of_live= q; /* One byte beyond the last byte of the last
110 live object. */
111 HeapWord* first_dead = space()->end(); /* The first dead object. */
112 LiveRange* liveRange = NULL; /* The current live range, recorded in the
113 first header of preceding free area. */
114 _first_dead = first_dead;
116 const intx interval = PrefetchScanIntervalInBytes;
118 while (q < t) {
119 assert(oop(q)->mark()->is_marked() || oop(q)->mark()->is_unlocked() ||
120 oop(q)->mark()->has_bias_pattern(),
121 "these are the only valid states during a mark sweep");
122 if (oop(q)->is_gc_marked()) {
123 /* prefetch beyond q */
124 Prefetch::write(q, interval);
125 size_t size = oop(q)->size();
127 size_t compaction_max_size = pointer_delta(compact_end, compact_top);
129 // This should only happen if a space in the young gen overflows the
130 // old gen. If that should happen, we null out the start_array, because
131 // the young spaces are not covered by one.
132 while(size > compaction_max_size) {
133 // First record the last compact_top
134 dest->set_compaction_top(compact_top);
136 // Advance to the next compaction decorator
137 advance_destination_decorator();
138 dest = destination_decorator();
140 // Update compaction info
141 start_array = dest->start_array();
142 compact_top = dest->compaction_top();
143 compact_end = dest->space()->end();
144 assert(compact_top == dest->space()->bottom(), "Advanced to space already in use");
145 assert(compact_end > compact_top, "Must always be space remaining");
146 compaction_max_size =
147 pointer_delta(compact_end, compact_top);
148 }
150 // store the forwarding pointer into the mark word
151 if (q != compact_top) {
152 oop(q)->forward_to(oop(compact_top));
153 assert(oop(q)->is_gc_marked(), "encoding the pointer should preserve the mark");
154 } else {
155 // Don't clear the mark since it's confuses parallel old
156 // verification.
157 if (!UseParallelOldGC || !VerifyParallelOldWithMarkSweep) {
158 // if the object isn't moving we can just set the mark to the default
159 // mark and handle it specially later on.
160 oop(q)->init_mark();
161 }
162 assert(oop(q)->forwardee() == NULL, "should be forwarded to NULL");
163 }
165 // Update object start array
166 if (!UseParallelOldGC || !VerifyParallelOldWithMarkSweep) {
167 if (start_array)
168 start_array->allocate_block(compact_top);
169 }
171 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(oop(q), size));
172 compact_top += size;
173 assert(compact_top <= dest->space()->end(),
174 "Exceeding space in destination");
176 q += size;
177 end_of_live = q;
178 } else {
179 /* run over all the contiguous dead objects */
180 HeapWord* end = q;
181 do {
182 /* prefetch beyond end */
183 Prefetch::write(end, interval);
184 end += oop(end)->size();
185 } while (end < t && (!oop(end)->is_gc_marked()));
187 /* see if we might want to pretend this object is alive so that
188 * we don't have to compact quite as often.
189 */
190 if (allowed_deadspace > 0 && q == compact_top) {
191 size_t sz = pointer_delta(end, q);
192 if (insert_deadspace(allowed_deadspace, q, sz)) {
193 size_t compaction_max_size = pointer_delta(compact_end, compact_top);
195 // This should only happen if a space in the young gen overflows the
196 // old gen. If that should happen, we null out the start_array, because
197 // the young spaces are not covered by one.
198 while (sz > compaction_max_size) {
199 // First record the last compact_top
200 dest->set_compaction_top(compact_top);
202 // Advance to the next compaction decorator
203 advance_destination_decorator();
204 dest = destination_decorator();
206 // Update compaction info
207 start_array = dest->start_array();
208 compact_top = dest->compaction_top();
209 compact_end = dest->space()->end();
210 assert(compact_top == dest->space()->bottom(), "Advanced to space already in use");
211 assert(compact_end > compact_top, "Must always be space remaining");
212 compaction_max_size =
213 pointer_delta(compact_end, compact_top);
214 }
216 // store the forwarding pointer into the mark word
217 if (q != compact_top) {
218 oop(q)->forward_to(oop(compact_top));
219 assert(oop(q)->is_gc_marked(), "encoding the pointer should preserve the mark");
220 } else {
221 // if the object isn't moving we can just set the mark to the default
222 // Don't clear the mark since it's confuses parallel old
223 // verification.
224 if (!UseParallelOldGC || !VerifyParallelOldWithMarkSweep) {
225 // mark and handle it specially later on.
226 oop(q)->init_mark();
227 }
228 assert(oop(q)->forwardee() == NULL, "should be forwarded to NULL");
229 }
231 if (!UseParallelOldGC || !VerifyParallelOldWithMarkSweep) {
232 // Update object start array
233 if (start_array)
234 start_array->allocate_block(compact_top);
235 }
237 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(oop(q), sz));
238 compact_top += sz;
239 assert(compact_top <= dest->space()->end(),
240 "Exceeding space in destination");
242 q = end;
243 end_of_live = end;
244 continue;
245 }
246 }
248 /* for the previous LiveRange, record the end of the live objects. */
249 if (liveRange) {
250 liveRange->set_end(q);
251 }
253 /* record the current LiveRange object.
254 * liveRange->start() is overlaid on the mark word.
255 */
256 liveRange = (LiveRange*)q;
257 liveRange->set_start(end);
258 liveRange->set_end(end);
260 /* see if this is the first dead region. */
261 if (q < first_dead) {
262 first_dead = q;
263 }
265 /* move on to the next object */
266 q = end;
267 }
268 }
270 assert(q == t, "just checking");
271 if (liveRange != NULL) {
272 liveRange->set_end(q);
273 }
274 _end_of_live = end_of_live;
275 if (end_of_live < first_dead) {
276 first_dead = end_of_live;
277 }
278 _first_dead = first_dead;
280 // Update compaction top
281 dest->set_compaction_top(compact_top);
282 }
284 bool PSMarkSweepDecorator::insert_deadspace(ssize_t& allowed_deadspace_words,
285 HeapWord* q, size_t deadlength) {
286 allowed_deadspace_words -= deadlength;
287 if (allowed_deadspace_words >= 0) {
288 oop(q)->set_mark(markOopDesc::prototype()->set_marked());
289 const size_t aligned_min_int_array_size =
290 align_object_size(typeArrayOopDesc::header_size(T_INT));
291 if (deadlength >= aligned_min_int_array_size) {
292 oop(q)->set_klass(Universe::intArrayKlassObj());
293 assert(((deadlength - aligned_min_int_array_size) * (HeapWordSize/sizeof(jint))) < (size_t)max_jint,
294 "deadspace too big for Arrayoop");
295 typeArrayOop(q)->set_length((int)((deadlength - aligned_min_int_array_size)
296 * (HeapWordSize/sizeof(jint))));
297 } else {
298 assert((int) deadlength == instanceOopDesc::header_size(),
299 "size for smallest fake dead object doesn't match");
300 oop(q)->set_klass(SystemDictionary::object_klass());
301 }
302 assert((int) deadlength == oop(q)->size(),
303 "make sure size for fake dead object match");
304 // Recall that we required "q == compaction_top".
305 return true;
306 } else {
307 allowed_deadspace_words = 0;
308 return false;
309 }
310 }
312 void PSMarkSweepDecorator::adjust_pointers() {
313 // adjust all the interior pointers to point at the new locations of objects
314 // Used by MarkSweep::mark_sweep_phase3()
316 HeapWord* q = space()->bottom();
317 HeapWord* t = _end_of_live; // Established by "prepare_for_compaction".
319 assert(_first_dead <= _end_of_live, "Stands to reason, no?");
321 if (q < t && _first_dead > q &&
322 !oop(q)->is_gc_marked()) {
323 // we have a chunk of the space which hasn't moved and we've
324 // reinitialized the mark word during the previous pass, so we can't
325 // use is_gc_marked for the traversal.
326 HeapWord* end = _first_dead;
328 while (q < end) {
329 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q)));
330 // point all the oops to the new location
331 size_t size = oop(q)->adjust_pointers();
332 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers());
333 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size));
334 q += size;
335 }
337 if (_first_dead == t) {
338 q = t;
339 } else {
340 // $$$ This is funky. Using this to read the previously written
341 // LiveRange. See also use below.
342 q = (HeapWord*)oop(_first_dead)->mark()->decode_pointer();
343 }
344 }
345 const intx interval = PrefetchScanIntervalInBytes;
347 debug_only(HeapWord* prev_q = NULL);
348 while (q < t) {
349 // prefetch beyond q
350 Prefetch::write(q, interval);
351 if (oop(q)->is_gc_marked()) {
352 // q is alive
353 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q)));
354 // point all the oops to the new location
355 size_t size = oop(q)->adjust_pointers();
356 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers());
357 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size));
358 debug_only(prev_q = q);
359 q += size;
360 } else {
361 // q is not a live object, so its mark should point at the next
362 // live object
363 debug_only(prev_q = q);
364 q = (HeapWord*) oop(q)->mark()->decode_pointer();
365 assert(q > prev_q, "we should be moving forward through memory");
366 }
367 }
369 assert(q == t, "just checking");
370 }
372 void PSMarkSweepDecorator::compact(bool mangle_free_space ) {
373 // Copy all live objects to their new location
374 // Used by MarkSweep::mark_sweep_phase4()
376 HeapWord* q = space()->bottom();
377 HeapWord* const t = _end_of_live;
378 debug_only(HeapWord* prev_q = NULL);
380 if (q < t && _first_dead > q &&
381 !oop(q)->is_gc_marked()) {
382 #ifdef ASSERT
383 // we have a chunk of the space which hasn't moved and we've reinitialized the
384 // mark word during the previous pass, so we can't use is_gc_marked for the
385 // traversal.
386 HeapWord* const end = _first_dead;
388 while (q < end) {
389 size_t size = oop(q)->size();
390 assert(!oop(q)->is_gc_marked(), "should be unmarked (special dense prefix handling)");
391 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::live_oop_moved_to(q, size, q));
392 debug_only(prev_q = q);
393 q += size;
394 }
395 #endif
397 if (_first_dead == t) {
398 q = t;
399 } else {
400 // $$$ Funky
401 q = (HeapWord*) oop(_first_dead)->mark()->decode_pointer();
402 }
403 }
405 const intx scan_interval = PrefetchScanIntervalInBytes;
406 const intx copy_interval = PrefetchCopyIntervalInBytes;
408 while (q < t) {
409 if (!oop(q)->is_gc_marked()) {
410 // mark is pointer to next marked oop
411 debug_only(prev_q = q);
412 q = (HeapWord*) oop(q)->mark()->decode_pointer();
413 assert(q > prev_q, "we should be moving forward through memory");
414 } else {
415 // prefetch beyond q
416 Prefetch::read(q, scan_interval);
418 // size and destination
419 size_t size = oop(q)->size();
420 HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee();
422 // prefetch beyond compaction_top
423 Prefetch::write(compaction_top, copy_interval);
425 // copy object and reinit its mark
426 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::live_oop_moved_to(q, size, compaction_top));
427 assert(q != compaction_top, "everything in this pass should be moving");
428 Copy::aligned_conjoint_words(q, compaction_top, size);
429 oop(compaction_top)->init_mark();
430 assert(oop(compaction_top)->klass() != NULL, "should have a class");
432 debug_only(prev_q = q);
433 q += size;
434 }
435 }
437 assert(compaction_top() >= space()->bottom() && compaction_top() <= space()->end(),
438 "should point inside space");
439 space()->set_top(compaction_top());
441 if (mangle_free_space) {
442 space()->mangle_unused_area();
443 }
444 }