Mon, 21 Nov 2011 07:47:34 +0100
7110718: -XX:MarkSweepAlwaysCompactCount=0 crashes the JVM
Summary: Interpret MarkSweepAlwaysCompactCount < 1 as never do full compaction
Reviewed-by: ysr, tonyp, jmasa, johnc
duke@435 | 1 | /* |
brutisso@3290 | 2 | * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
trims@1907 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
trims@1907 | 20 | * or visit www.oracle.com if you need additional information or have any |
trims@1907 | 21 | * questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
stefank@2314 | 25 | #include "precompiled.hpp" |
stefank@2314 | 26 | #include "classfile/systemDictionary.hpp" |
stefank@2314 | 27 | #include "gc_implementation/parallelScavenge/objectStartArray.hpp" |
stefank@2314 | 28 | #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp" |
stefank@2314 | 29 | #include "gc_implementation/parallelScavenge/psMarkSweep.hpp" |
stefank@2314 | 30 | #include "gc_implementation/parallelScavenge/psMarkSweepDecorator.hpp" |
stefank@2314 | 31 | #include "gc_implementation/shared/liveRange.hpp" |
stefank@2314 | 32 | #include "gc_implementation/shared/markSweep.inline.hpp" |
stefank@2314 | 33 | #include "gc_implementation/shared/spaceDecorator.hpp" |
stefank@2314 | 34 | #include "oops/oop.inline.hpp" |
duke@435 | 35 | |
duke@435 | 36 | PSMarkSweepDecorator* PSMarkSweepDecorator::_destination_decorator = NULL; |
duke@435 | 37 | |
duke@435 | 38 | |
duke@435 | 39 | void PSMarkSweepDecorator::set_destination_decorator_tenured() { |
duke@435 | 40 | ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); |
duke@435 | 41 | assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); |
duke@435 | 42 | |
duke@435 | 43 | _destination_decorator = heap->old_gen()->object_mark_sweep(); |
duke@435 | 44 | } |
duke@435 | 45 | |
duke@435 | 46 | void PSMarkSweepDecorator::set_destination_decorator_perm_gen() { |
duke@435 | 47 | ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); |
duke@435 | 48 | assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); |
duke@435 | 49 | |
duke@435 | 50 | _destination_decorator = heap->perm_gen()->object_mark_sweep(); |
duke@435 | 51 | } |
duke@435 | 52 | |
duke@435 | 53 | void PSMarkSweepDecorator::advance_destination_decorator() { |
duke@435 | 54 | ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); |
duke@435 | 55 | assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); |
duke@435 | 56 | |
duke@435 | 57 | assert(_destination_decorator != NULL, "Sanity"); |
duke@435 | 58 | guarantee(_destination_decorator != heap->perm_gen()->object_mark_sweep(), "Cannot advance perm gen decorator"); |
duke@435 | 59 | |
duke@435 | 60 | PSMarkSweepDecorator* first = heap->old_gen()->object_mark_sweep(); |
duke@435 | 61 | PSMarkSweepDecorator* second = heap->young_gen()->eden_mark_sweep(); |
duke@435 | 62 | PSMarkSweepDecorator* third = heap->young_gen()->from_mark_sweep(); |
duke@435 | 63 | PSMarkSweepDecorator* fourth = heap->young_gen()->to_mark_sweep(); |
duke@435 | 64 | |
duke@435 | 65 | if ( _destination_decorator == first ) { |
duke@435 | 66 | _destination_decorator = second; |
duke@435 | 67 | } else if ( _destination_decorator == second ) { |
duke@435 | 68 | _destination_decorator = third; |
duke@435 | 69 | } else if ( _destination_decorator == third ) { |
duke@435 | 70 | _destination_decorator = fourth; |
duke@435 | 71 | } else { |
duke@435 | 72 | fatal("PSMarkSweep attempting to advance past last compaction area"); |
duke@435 | 73 | } |
duke@435 | 74 | } |
duke@435 | 75 | |
duke@435 | 76 | PSMarkSweepDecorator* PSMarkSweepDecorator::destination_decorator() { |
duke@435 | 77 | assert(_destination_decorator != NULL, "Sanity"); |
duke@435 | 78 | |
duke@435 | 79 | return _destination_decorator; |
duke@435 | 80 | } |
duke@435 | 81 | |
duke@435 | 82 | // FIX ME FIX ME FIX ME FIX ME!!!!!!!!! |
duke@435 | 83 | // The object forwarding code is duplicated. Factor this out!!!!! |
duke@435 | 84 | // |
duke@435 | 85 | // This method "precompacts" objects inside its space to dest. It places forwarding |
duke@435 | 86 | // pointers into markOops for use by adjust_pointers. If "dest" should overflow, we |
duke@435 | 87 | // finish by compacting into our own space. |
duke@435 | 88 | |
duke@435 | 89 | void PSMarkSweepDecorator::precompact() { |
duke@435 | 90 | // Reset our own compact top. |
duke@435 | 91 | set_compaction_top(space()->bottom()); |
duke@435 | 92 | |
duke@435 | 93 | /* We allow some amount of garbage towards the bottom of the space, so |
duke@435 | 94 | * we don't start compacting before there is a significant gain to be made. |
duke@435 | 95 | * Occasionally, we want to ensure a full compaction, which is determined |
duke@435 | 96 | * by the MarkSweepAlwaysCompactCount parameter. This is a significant |
duke@435 | 97 | * performance improvement! |
duke@435 | 98 | */ |
brutisso@3290 | 99 | bool skip_dead = (MarkSweepAlwaysCompactCount < 1) |
brutisso@3290 | 100 | || ((PSMarkSweep::total_invocations() % MarkSweepAlwaysCompactCount) != 0); |
duke@435 | 101 | |
jcoomes@873 | 102 | size_t allowed_deadspace = 0; |
duke@435 | 103 | if (skip_dead) { |
jcoomes@873 | 104 | const size_t ratio = allowed_dead_ratio(); |
jcoomes@873 | 105 | allowed_deadspace = space()->capacity_in_words() * ratio / 100; |
duke@435 | 106 | } |
duke@435 | 107 | |
duke@435 | 108 | // Fetch the current destination decorator |
duke@435 | 109 | PSMarkSweepDecorator* dest = destination_decorator(); |
duke@435 | 110 | ObjectStartArray* start_array = dest->start_array(); |
duke@435 | 111 | |
duke@435 | 112 | HeapWord* compact_top = dest->compaction_top(); |
duke@435 | 113 | HeapWord* compact_end = dest->space()->end(); |
duke@435 | 114 | |
duke@435 | 115 | HeapWord* q = space()->bottom(); |
duke@435 | 116 | HeapWord* t = space()->top(); |
duke@435 | 117 | |
duke@435 | 118 | HeapWord* end_of_live= q; /* One byte beyond the last byte of the last |
duke@435 | 119 | live object. */ |
duke@435 | 120 | HeapWord* first_dead = space()->end(); /* The first dead object. */ |
duke@435 | 121 | LiveRange* liveRange = NULL; /* The current live range, recorded in the |
duke@435 | 122 | first header of preceding free area. */ |
duke@435 | 123 | _first_dead = first_dead; |
duke@435 | 124 | |
duke@435 | 125 | const intx interval = PrefetchScanIntervalInBytes; |
duke@435 | 126 | |
duke@435 | 127 | while (q < t) { |
duke@435 | 128 | assert(oop(q)->mark()->is_marked() || oop(q)->mark()->is_unlocked() || |
duke@435 | 129 | oop(q)->mark()->has_bias_pattern(), |
duke@435 | 130 | "these are the only valid states during a mark sweep"); |
duke@435 | 131 | if (oop(q)->is_gc_marked()) { |
duke@435 | 132 | /* prefetch beyond q */ |
duke@435 | 133 | Prefetch::write(q, interval); |
duke@435 | 134 | size_t size = oop(q)->size(); |
duke@435 | 135 | |
duke@435 | 136 | size_t compaction_max_size = pointer_delta(compact_end, compact_top); |
duke@435 | 137 | |
duke@435 | 138 | // This should only happen if a space in the young gen overflows the |
duke@435 | 139 | // old gen. If that should happen, we null out the start_array, because |
duke@435 | 140 | // the young spaces are not covered by one. |
duke@435 | 141 | while(size > compaction_max_size) { |
duke@435 | 142 | // First record the last compact_top |
duke@435 | 143 | dest->set_compaction_top(compact_top); |
duke@435 | 144 | |
duke@435 | 145 | // Advance to the next compaction decorator |
duke@435 | 146 | advance_destination_decorator(); |
duke@435 | 147 | dest = destination_decorator(); |
duke@435 | 148 | |
duke@435 | 149 | // Update compaction info |
duke@435 | 150 | start_array = dest->start_array(); |
duke@435 | 151 | compact_top = dest->compaction_top(); |
duke@435 | 152 | compact_end = dest->space()->end(); |
duke@435 | 153 | assert(compact_top == dest->space()->bottom(), "Advanced to space already in use"); |
duke@435 | 154 | assert(compact_end > compact_top, "Must always be space remaining"); |
duke@435 | 155 | compaction_max_size = |
duke@435 | 156 | pointer_delta(compact_end, compact_top); |
duke@435 | 157 | } |
duke@435 | 158 | |
duke@435 | 159 | // store the forwarding pointer into the mark word |
duke@435 | 160 | if (q != compact_top) { |
duke@435 | 161 | oop(q)->forward_to(oop(compact_top)); |
duke@435 | 162 | assert(oop(q)->is_gc_marked(), "encoding the pointer should preserve the mark"); |
duke@435 | 163 | } else { |
jcoomes@809 | 164 | // if the object isn't moving we can just set the mark to the default |
jcoomes@809 | 165 | // mark and handle it specially later on. |
jcoomes@809 | 166 | oop(q)->init_mark(); |
duke@435 | 167 | assert(oop(q)->forwardee() == NULL, "should be forwarded to NULL"); |
duke@435 | 168 | } |
duke@435 | 169 | |
duke@435 | 170 | // Update object start array |
jcoomes@809 | 171 | if (start_array) { |
jcoomes@809 | 172 | start_array->allocate_block(compact_top); |
duke@435 | 173 | } |
duke@435 | 174 | |
coleenp@548 | 175 | VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(oop(q), size)); |
duke@435 | 176 | compact_top += size; |
duke@435 | 177 | assert(compact_top <= dest->space()->end(), |
duke@435 | 178 | "Exceeding space in destination"); |
duke@435 | 179 | |
duke@435 | 180 | q += size; |
duke@435 | 181 | end_of_live = q; |
duke@435 | 182 | } else { |
duke@435 | 183 | /* run over all the contiguous dead objects */ |
duke@435 | 184 | HeapWord* end = q; |
duke@435 | 185 | do { |
duke@435 | 186 | /* prefetch beyond end */ |
duke@435 | 187 | Prefetch::write(end, interval); |
duke@435 | 188 | end += oop(end)->size(); |
duke@435 | 189 | } while (end < t && (!oop(end)->is_gc_marked())); |
duke@435 | 190 | |
duke@435 | 191 | /* see if we might want to pretend this object is alive so that |
duke@435 | 192 | * we don't have to compact quite as often. |
duke@435 | 193 | */ |
duke@435 | 194 | if (allowed_deadspace > 0 && q == compact_top) { |
duke@435 | 195 | size_t sz = pointer_delta(end, q); |
duke@435 | 196 | if (insert_deadspace(allowed_deadspace, q, sz)) { |
duke@435 | 197 | size_t compaction_max_size = pointer_delta(compact_end, compact_top); |
duke@435 | 198 | |
duke@435 | 199 | // This should only happen if a space in the young gen overflows the |
duke@435 | 200 | // old gen. If that should happen, we null out the start_array, because |
duke@435 | 201 | // the young spaces are not covered by one. |
duke@435 | 202 | while (sz > compaction_max_size) { |
duke@435 | 203 | // First record the last compact_top |
duke@435 | 204 | dest->set_compaction_top(compact_top); |
duke@435 | 205 | |
duke@435 | 206 | // Advance to the next compaction decorator |
duke@435 | 207 | advance_destination_decorator(); |
duke@435 | 208 | dest = destination_decorator(); |
duke@435 | 209 | |
duke@435 | 210 | // Update compaction info |
duke@435 | 211 | start_array = dest->start_array(); |
duke@435 | 212 | compact_top = dest->compaction_top(); |
duke@435 | 213 | compact_end = dest->space()->end(); |
duke@435 | 214 | assert(compact_top == dest->space()->bottom(), "Advanced to space already in use"); |
duke@435 | 215 | assert(compact_end > compact_top, "Must always be space remaining"); |
duke@435 | 216 | compaction_max_size = |
duke@435 | 217 | pointer_delta(compact_end, compact_top); |
duke@435 | 218 | } |
duke@435 | 219 | |
duke@435 | 220 | // store the forwarding pointer into the mark word |
duke@435 | 221 | if (q != compact_top) { |
duke@435 | 222 | oop(q)->forward_to(oop(compact_top)); |
duke@435 | 223 | assert(oop(q)->is_gc_marked(), "encoding the pointer should preserve the mark"); |
duke@435 | 224 | } else { |
duke@435 | 225 | // if the object isn't moving we can just set the mark to the default |
jcoomes@809 | 226 | // mark and handle it specially later on. |
jcoomes@809 | 227 | oop(q)->init_mark(); |
duke@435 | 228 | assert(oop(q)->forwardee() == NULL, "should be forwarded to NULL"); |
duke@435 | 229 | } |
duke@435 | 230 | |
jcoomes@809 | 231 | // Update object start array |
jcoomes@809 | 232 | if (start_array) { |
jcoomes@809 | 233 | start_array->allocate_block(compact_top); |
duke@435 | 234 | } |
duke@435 | 235 | |
coleenp@548 | 236 | VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(oop(q), sz)); |
duke@435 | 237 | compact_top += sz; |
duke@435 | 238 | assert(compact_top <= dest->space()->end(), |
duke@435 | 239 | "Exceeding space in destination"); |
duke@435 | 240 | |
duke@435 | 241 | q = end; |
duke@435 | 242 | end_of_live = end; |
duke@435 | 243 | continue; |
duke@435 | 244 | } |
duke@435 | 245 | } |
duke@435 | 246 | |
duke@435 | 247 | /* for the previous LiveRange, record the end of the live objects. */ |
duke@435 | 248 | if (liveRange) { |
duke@435 | 249 | liveRange->set_end(q); |
duke@435 | 250 | } |
duke@435 | 251 | |
duke@435 | 252 | /* record the current LiveRange object. |
duke@435 | 253 | * liveRange->start() is overlaid on the mark word. |
duke@435 | 254 | */ |
duke@435 | 255 | liveRange = (LiveRange*)q; |
duke@435 | 256 | liveRange->set_start(end); |
duke@435 | 257 | liveRange->set_end(end); |
duke@435 | 258 | |
duke@435 | 259 | /* see if this is the first dead region. */ |
duke@435 | 260 | if (q < first_dead) { |
duke@435 | 261 | first_dead = q; |
duke@435 | 262 | } |
duke@435 | 263 | |
duke@435 | 264 | /* move on to the next object */ |
duke@435 | 265 | q = end; |
duke@435 | 266 | } |
duke@435 | 267 | } |
duke@435 | 268 | |
duke@435 | 269 | assert(q == t, "just checking"); |
duke@435 | 270 | if (liveRange != NULL) { |
duke@435 | 271 | liveRange->set_end(q); |
duke@435 | 272 | } |
duke@435 | 273 | _end_of_live = end_of_live; |
duke@435 | 274 | if (end_of_live < first_dead) { |
duke@435 | 275 | first_dead = end_of_live; |
duke@435 | 276 | } |
duke@435 | 277 | _first_dead = first_dead; |
duke@435 | 278 | |
duke@435 | 279 | // Update compaction top |
duke@435 | 280 | dest->set_compaction_top(compact_top); |
duke@435 | 281 | } |
duke@435 | 282 | |
jcoomes@873 | 283 | bool PSMarkSweepDecorator::insert_deadspace(size_t& allowed_deadspace_words, |
jcoomes@873 | 284 | HeapWord* q, size_t deadlength) { |
jcoomes@873 | 285 | if (allowed_deadspace_words >= deadlength) { |
jcoomes@873 | 286 | allowed_deadspace_words -= deadlength; |
jcoomes@916 | 287 | CollectedHeap::fill_with_object(q, deadlength); |
jcoomes@916 | 288 | oop(q)->set_mark(oop(q)->mark()->set_marked()); |
jcoomes@916 | 289 | assert((int) deadlength == oop(q)->size(), "bad filler object size"); |
duke@435 | 290 | // Recall that we required "q == compaction_top". |
duke@435 | 291 | return true; |
duke@435 | 292 | } else { |
duke@435 | 293 | allowed_deadspace_words = 0; |
duke@435 | 294 | return false; |
duke@435 | 295 | } |
duke@435 | 296 | } |
duke@435 | 297 | |
duke@435 | 298 | void PSMarkSweepDecorator::adjust_pointers() { |
duke@435 | 299 | // adjust all the interior pointers to point at the new locations of objects |
duke@435 | 300 | // Used by MarkSweep::mark_sweep_phase3() |
duke@435 | 301 | |
duke@435 | 302 | HeapWord* q = space()->bottom(); |
duke@435 | 303 | HeapWord* t = _end_of_live; // Established by "prepare_for_compaction". |
duke@435 | 304 | |
duke@435 | 305 | assert(_first_dead <= _end_of_live, "Stands to reason, no?"); |
duke@435 | 306 | |
duke@435 | 307 | if (q < t && _first_dead > q && |
duke@435 | 308 | !oop(q)->is_gc_marked()) { |
duke@435 | 309 | // we have a chunk of the space which hasn't moved and we've |
duke@435 | 310 | // reinitialized the mark word during the previous pass, so we can't |
duke@435 | 311 | // use is_gc_marked for the traversal. |
duke@435 | 312 | HeapWord* end = _first_dead; |
duke@435 | 313 | |
duke@435 | 314 | while (q < end) { |
coleenp@548 | 315 | VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q))); |
duke@435 | 316 | // point all the oops to the new location |
duke@435 | 317 | size_t size = oop(q)->adjust_pointers(); |
coleenp@548 | 318 | VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers()); |
coleenp@548 | 319 | VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size)); |
duke@435 | 320 | q += size; |
duke@435 | 321 | } |
duke@435 | 322 | |
duke@435 | 323 | if (_first_dead == t) { |
duke@435 | 324 | q = t; |
duke@435 | 325 | } else { |
duke@435 | 326 | // $$$ This is funky. Using this to read the previously written |
duke@435 | 327 | // LiveRange. See also use below. |
duke@435 | 328 | q = (HeapWord*)oop(_first_dead)->mark()->decode_pointer(); |
duke@435 | 329 | } |
duke@435 | 330 | } |
duke@435 | 331 | const intx interval = PrefetchScanIntervalInBytes; |
duke@435 | 332 | |
duke@435 | 333 | debug_only(HeapWord* prev_q = NULL); |
duke@435 | 334 | while (q < t) { |
duke@435 | 335 | // prefetch beyond q |
duke@435 | 336 | Prefetch::write(q, interval); |
duke@435 | 337 | if (oop(q)->is_gc_marked()) { |
duke@435 | 338 | // q is alive |
coleenp@548 | 339 | VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q))); |
duke@435 | 340 | // point all the oops to the new location |
duke@435 | 341 | size_t size = oop(q)->adjust_pointers(); |
coleenp@548 | 342 | VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers()); |
coleenp@548 | 343 | VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size)); |
duke@435 | 344 | debug_only(prev_q = q); |
duke@435 | 345 | q += size; |
duke@435 | 346 | } else { |
duke@435 | 347 | // q is not a live object, so its mark should point at the next |
duke@435 | 348 | // live object |
duke@435 | 349 | debug_only(prev_q = q); |
duke@435 | 350 | q = (HeapWord*) oop(q)->mark()->decode_pointer(); |
duke@435 | 351 | assert(q > prev_q, "we should be moving forward through memory"); |
duke@435 | 352 | } |
duke@435 | 353 | } |
duke@435 | 354 | |
duke@435 | 355 | assert(q == t, "just checking"); |
duke@435 | 356 | } |
duke@435 | 357 | |
duke@435 | 358 | void PSMarkSweepDecorator::compact(bool mangle_free_space ) { |
duke@435 | 359 | // Copy all live objects to their new location |
duke@435 | 360 | // Used by MarkSweep::mark_sweep_phase4() |
duke@435 | 361 | |
duke@435 | 362 | HeapWord* q = space()->bottom(); |
duke@435 | 363 | HeapWord* const t = _end_of_live; |
duke@435 | 364 | debug_only(HeapWord* prev_q = NULL); |
duke@435 | 365 | |
duke@435 | 366 | if (q < t && _first_dead > q && |
duke@435 | 367 | !oop(q)->is_gc_marked()) { |
duke@435 | 368 | #ifdef ASSERT |
duke@435 | 369 | // we have a chunk of the space which hasn't moved and we've reinitialized the |
duke@435 | 370 | // mark word during the previous pass, so we can't use is_gc_marked for the |
duke@435 | 371 | // traversal. |
duke@435 | 372 | HeapWord* const end = _first_dead; |
duke@435 | 373 | |
duke@435 | 374 | while (q < end) { |
duke@435 | 375 | size_t size = oop(q)->size(); |
duke@435 | 376 | assert(!oop(q)->is_gc_marked(), "should be unmarked (special dense prefix handling)"); |
coleenp@548 | 377 | VALIDATE_MARK_SWEEP_ONLY(MarkSweep::live_oop_moved_to(q, size, q)); |
duke@435 | 378 | debug_only(prev_q = q); |
duke@435 | 379 | q += size; |
duke@435 | 380 | } |
duke@435 | 381 | #endif |
duke@435 | 382 | |
duke@435 | 383 | if (_first_dead == t) { |
duke@435 | 384 | q = t; |
duke@435 | 385 | } else { |
duke@435 | 386 | // $$$ Funky |
duke@435 | 387 | q = (HeapWord*) oop(_first_dead)->mark()->decode_pointer(); |
duke@435 | 388 | } |
duke@435 | 389 | } |
duke@435 | 390 | |
duke@435 | 391 | const intx scan_interval = PrefetchScanIntervalInBytes; |
duke@435 | 392 | const intx copy_interval = PrefetchCopyIntervalInBytes; |
duke@435 | 393 | |
duke@435 | 394 | while (q < t) { |
duke@435 | 395 | if (!oop(q)->is_gc_marked()) { |
duke@435 | 396 | // mark is pointer to next marked oop |
duke@435 | 397 | debug_only(prev_q = q); |
duke@435 | 398 | q = (HeapWord*) oop(q)->mark()->decode_pointer(); |
duke@435 | 399 | assert(q > prev_q, "we should be moving forward through memory"); |
duke@435 | 400 | } else { |
duke@435 | 401 | // prefetch beyond q |
duke@435 | 402 | Prefetch::read(q, scan_interval); |
duke@435 | 403 | |
duke@435 | 404 | // size and destination |
duke@435 | 405 | size_t size = oop(q)->size(); |
duke@435 | 406 | HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee(); |
duke@435 | 407 | |
duke@435 | 408 | // prefetch beyond compaction_top |
duke@435 | 409 | Prefetch::write(compaction_top, copy_interval); |
duke@435 | 410 | |
duke@435 | 411 | // copy object and reinit its mark |
coleenp@548 | 412 | VALIDATE_MARK_SWEEP_ONLY(MarkSweep::live_oop_moved_to(q, size, compaction_top)); |
duke@435 | 413 | assert(q != compaction_top, "everything in this pass should be moving"); |
duke@435 | 414 | Copy::aligned_conjoint_words(q, compaction_top, size); |
duke@435 | 415 | oop(compaction_top)->init_mark(); |
duke@435 | 416 | assert(oop(compaction_top)->klass() != NULL, "should have a class"); |
duke@435 | 417 | |
duke@435 | 418 | debug_only(prev_q = q); |
duke@435 | 419 | q += size; |
duke@435 | 420 | } |
duke@435 | 421 | } |
duke@435 | 422 | |
duke@435 | 423 | assert(compaction_top() >= space()->bottom() && compaction_top() <= space()->end(), |
duke@435 | 424 | "should point inside space"); |
duke@435 | 425 | space()->set_top(compaction_top()); |
duke@435 | 426 | |
jmasa@698 | 427 | if (mangle_free_space) { |
jmasa@698 | 428 | space()->mangle_unused_area(); |
jmasa@698 | 429 | } |
duke@435 | 430 | } |