Sat, 19 Jul 2008 17:38:22 -0400
6716785: implicit null checks not triggering with CompressedOops
Summary: allocate alignment-sized page(s) below java heap so that memory accesses at heap_base+1page give signal and cause an implicit null check
Reviewed-by: kvn, jmasa, phh, jcoomes
duke@435 | 1 | /* |
xdono@631 | 2 | * Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
duke@435 | 19 | * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, |
duke@435 | 20 | * CA 95054 USA or visit www.sun.com if you need additional information or |
duke@435 | 21 | * have any questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
duke@435 | 25 | #include "incls/_precompiled.incl" |
duke@435 | 26 | #include "incls/_psParallelCompact.cpp.incl" |
duke@435 | 27 | |
duke@435 | 28 | #include <math.h> |
duke@435 | 29 | |
duke@435 | 30 | // All sizes are in HeapWords. |
duke@435 | 31 | const size_t ParallelCompactData::Log2ChunkSize = 9; // 512 words |
duke@435 | 32 | const size_t ParallelCompactData::ChunkSize = (size_t)1 << Log2ChunkSize; |
duke@435 | 33 | const size_t ParallelCompactData::ChunkSizeBytes = ChunkSize << LogHeapWordSize; |
duke@435 | 34 | const size_t ParallelCompactData::ChunkSizeOffsetMask = ChunkSize - 1; |
duke@435 | 35 | const size_t ParallelCompactData::ChunkAddrOffsetMask = ChunkSizeBytes - 1; |
duke@435 | 36 | const size_t ParallelCompactData::ChunkAddrMask = ~ChunkAddrOffsetMask; |
duke@435 | 37 | |
duke@435 | 38 | // 32-bit: 128 words covers 4 bitmap words |
duke@435 | 39 | // 64-bit: 128 words covers 2 bitmap words |
duke@435 | 40 | const size_t ParallelCompactData::Log2BlockSize = 7; // 128 words |
duke@435 | 41 | const size_t ParallelCompactData::BlockSize = (size_t)1 << Log2BlockSize; |
duke@435 | 42 | const size_t ParallelCompactData::BlockOffsetMask = BlockSize - 1; |
duke@435 | 43 | const size_t ParallelCompactData::BlockMask = ~BlockOffsetMask; |
duke@435 | 44 | |
duke@435 | 45 | const size_t ParallelCompactData::BlocksPerChunk = ChunkSize / BlockSize; |
duke@435 | 46 | |
duke@435 | 47 | const ParallelCompactData::ChunkData::chunk_sz_t |
duke@435 | 48 | ParallelCompactData::ChunkData::dc_shift = 27; |
duke@435 | 49 | |
duke@435 | 50 | const ParallelCompactData::ChunkData::chunk_sz_t |
duke@435 | 51 | ParallelCompactData::ChunkData::dc_mask = ~0U << dc_shift; |
duke@435 | 52 | |
duke@435 | 53 | const ParallelCompactData::ChunkData::chunk_sz_t |
duke@435 | 54 | ParallelCompactData::ChunkData::dc_one = 0x1U << dc_shift; |
duke@435 | 55 | |
duke@435 | 56 | const ParallelCompactData::ChunkData::chunk_sz_t |
duke@435 | 57 | ParallelCompactData::ChunkData::los_mask = ~dc_mask; |
duke@435 | 58 | |
duke@435 | 59 | const ParallelCompactData::ChunkData::chunk_sz_t |
duke@435 | 60 | ParallelCompactData::ChunkData::dc_claimed = 0x8U << dc_shift; |
duke@435 | 61 | |
duke@435 | 62 | const ParallelCompactData::ChunkData::chunk_sz_t |
duke@435 | 63 | ParallelCompactData::ChunkData::dc_completed = 0xcU << dc_shift; |
duke@435 | 64 | |
duke@435 | 65 | #ifdef ASSERT |
duke@435 | 66 | short ParallelCompactData::BlockData::_cur_phase = 0; |
duke@435 | 67 | #endif |
duke@435 | 68 | |
duke@435 | 69 | SpaceInfo PSParallelCompact::_space_info[PSParallelCompact::last_space_id]; |
duke@435 | 70 | bool PSParallelCompact::_print_phases = false; |
duke@435 | 71 | |
duke@435 | 72 | ReferenceProcessor* PSParallelCompact::_ref_processor = NULL; |
duke@435 | 73 | klassOop PSParallelCompact::_updated_int_array_klass_obj = NULL; |
duke@435 | 74 | |
duke@435 | 75 | double PSParallelCompact::_dwl_mean; |
duke@435 | 76 | double PSParallelCompact::_dwl_std_dev; |
duke@435 | 77 | double PSParallelCompact::_dwl_first_term; |
duke@435 | 78 | double PSParallelCompact::_dwl_adjustment; |
duke@435 | 79 | #ifdef ASSERT |
duke@435 | 80 | bool PSParallelCompact::_dwl_initialized = false; |
duke@435 | 81 | #endif // #ifdef ASSERT |
duke@435 | 82 | |
duke@435 | 83 | #ifdef VALIDATE_MARK_SWEEP |
coleenp@548 | 84 | GrowableArray<void*>* PSParallelCompact::_root_refs_stack = NULL; |
duke@435 | 85 | GrowableArray<oop> * PSParallelCompact::_live_oops = NULL; |
duke@435 | 86 | GrowableArray<oop> * PSParallelCompact::_live_oops_moved_to = NULL; |
duke@435 | 87 | GrowableArray<size_t>* PSParallelCompact::_live_oops_size = NULL; |
duke@435 | 88 | size_t PSParallelCompact::_live_oops_index = 0; |
duke@435 | 89 | size_t PSParallelCompact::_live_oops_index_at_perm = 0; |
coleenp@548 | 90 | GrowableArray<void*>* PSParallelCompact::_other_refs_stack = NULL; |
coleenp@548 | 91 | GrowableArray<void*>* PSParallelCompact::_adjusted_pointers = NULL; |
duke@435 | 92 | bool PSParallelCompact::_pointer_tracking = false; |
duke@435 | 93 | bool PSParallelCompact::_root_tracking = true; |
duke@435 | 94 | |
duke@435 | 95 | GrowableArray<HeapWord*>* PSParallelCompact::_cur_gc_live_oops = NULL; |
duke@435 | 96 | GrowableArray<HeapWord*>* PSParallelCompact::_cur_gc_live_oops_moved_to = NULL; |
duke@435 | 97 | GrowableArray<size_t> * PSParallelCompact::_cur_gc_live_oops_size = NULL; |
duke@435 | 98 | GrowableArray<HeapWord*>* PSParallelCompact::_last_gc_live_oops = NULL; |
duke@435 | 99 | GrowableArray<HeapWord*>* PSParallelCompact::_last_gc_live_oops_moved_to = NULL; |
duke@435 | 100 | GrowableArray<size_t> * PSParallelCompact::_last_gc_live_oops_size = NULL; |
duke@435 | 101 | #endif |
duke@435 | 102 | |
duke@435 | 103 | // XXX beg - verification code; only works while we also mark in object headers |
duke@435 | 104 | static void |
duke@435 | 105 | verify_mark_bitmap(ParMarkBitMap& _mark_bitmap) |
duke@435 | 106 | { |
duke@435 | 107 | ParallelScavengeHeap* heap = PSParallelCompact::gc_heap(); |
duke@435 | 108 | |
duke@435 | 109 | PSPermGen* perm_gen = heap->perm_gen(); |
duke@435 | 110 | PSOldGen* old_gen = heap->old_gen(); |
duke@435 | 111 | PSYoungGen* young_gen = heap->young_gen(); |
duke@435 | 112 | |
duke@435 | 113 | MutableSpace* perm_space = perm_gen->object_space(); |
duke@435 | 114 | MutableSpace* old_space = old_gen->object_space(); |
duke@435 | 115 | MutableSpace* eden_space = young_gen->eden_space(); |
duke@435 | 116 | MutableSpace* from_space = young_gen->from_space(); |
duke@435 | 117 | MutableSpace* to_space = young_gen->to_space(); |
duke@435 | 118 | |
duke@435 | 119 | // 'from_space' here is the survivor space at the lower address. |
duke@435 | 120 | if (to_space->bottom() < from_space->bottom()) { |
duke@435 | 121 | from_space = to_space; |
duke@435 | 122 | to_space = young_gen->from_space(); |
duke@435 | 123 | } |
duke@435 | 124 | |
duke@435 | 125 | HeapWord* boundaries[12]; |
duke@435 | 126 | unsigned int bidx = 0; |
duke@435 | 127 | const unsigned int bidx_max = sizeof(boundaries) / sizeof(boundaries[0]); |
duke@435 | 128 | |
duke@435 | 129 | boundaries[0] = perm_space->bottom(); |
duke@435 | 130 | boundaries[1] = perm_space->top(); |
duke@435 | 131 | boundaries[2] = old_space->bottom(); |
duke@435 | 132 | boundaries[3] = old_space->top(); |
duke@435 | 133 | boundaries[4] = eden_space->bottom(); |
duke@435 | 134 | boundaries[5] = eden_space->top(); |
duke@435 | 135 | boundaries[6] = from_space->bottom(); |
duke@435 | 136 | boundaries[7] = from_space->top(); |
duke@435 | 137 | boundaries[8] = to_space->bottom(); |
duke@435 | 138 | boundaries[9] = to_space->top(); |
duke@435 | 139 | boundaries[10] = to_space->end(); |
duke@435 | 140 | boundaries[11] = to_space->end(); |
duke@435 | 141 | |
duke@435 | 142 | BitMap::idx_t beg_bit = 0; |
duke@435 | 143 | BitMap::idx_t end_bit; |
duke@435 | 144 | BitMap::idx_t tmp_bit; |
duke@435 | 145 | const BitMap::idx_t last_bit = _mark_bitmap.size(); |
duke@435 | 146 | do { |
duke@435 | 147 | HeapWord* addr = _mark_bitmap.bit_to_addr(beg_bit); |
duke@435 | 148 | if (_mark_bitmap.is_marked(beg_bit)) { |
duke@435 | 149 | oop obj = (oop)addr; |
duke@435 | 150 | assert(obj->is_gc_marked(), "obj header is not marked"); |
duke@435 | 151 | end_bit = _mark_bitmap.find_obj_end(beg_bit, last_bit); |
duke@435 | 152 | const size_t size = _mark_bitmap.obj_size(beg_bit, end_bit); |
duke@435 | 153 | assert(size == (size_t)obj->size(), "end bit wrong?"); |
duke@435 | 154 | beg_bit = _mark_bitmap.find_obj_beg(beg_bit + 1, last_bit); |
duke@435 | 155 | assert(beg_bit > end_bit, "bit set in middle of an obj"); |
duke@435 | 156 | } else { |
duke@435 | 157 | if (addr >= boundaries[bidx] && addr < boundaries[bidx + 1]) { |
duke@435 | 158 | // a dead object in the current space. |
duke@435 | 159 | oop obj = (oop)addr; |
duke@435 | 160 | end_bit = _mark_bitmap.addr_to_bit(addr + obj->size()); |
duke@435 | 161 | assert(!obj->is_gc_marked(), "obj marked in header, not in bitmap"); |
duke@435 | 162 | tmp_bit = beg_bit + 1; |
duke@435 | 163 | beg_bit = _mark_bitmap.find_obj_beg(tmp_bit, end_bit); |
duke@435 | 164 | assert(beg_bit == end_bit, "beg bit set in unmarked obj"); |
duke@435 | 165 | beg_bit = _mark_bitmap.find_obj_end(tmp_bit, end_bit); |
duke@435 | 166 | assert(beg_bit == end_bit, "end bit set in unmarked obj"); |
duke@435 | 167 | } else if (addr < boundaries[bidx + 2]) { |
duke@435 | 168 | // addr is between top in the current space and bottom in the next. |
duke@435 | 169 | end_bit = beg_bit + pointer_delta(boundaries[bidx + 2], addr); |
duke@435 | 170 | tmp_bit = beg_bit; |
duke@435 | 171 | beg_bit = _mark_bitmap.find_obj_beg(tmp_bit, end_bit); |
duke@435 | 172 | assert(beg_bit == end_bit, "beg bit set above top"); |
duke@435 | 173 | beg_bit = _mark_bitmap.find_obj_end(tmp_bit, end_bit); |
duke@435 | 174 | assert(beg_bit == end_bit, "end bit set above top"); |
duke@435 | 175 | bidx += 2; |
duke@435 | 176 | } else if (bidx < bidx_max - 2) { |
duke@435 | 177 | bidx += 2; // ??? |
duke@435 | 178 | } else { |
duke@435 | 179 | tmp_bit = beg_bit; |
duke@435 | 180 | beg_bit = _mark_bitmap.find_obj_beg(tmp_bit, last_bit); |
duke@435 | 181 | assert(beg_bit == last_bit, "beg bit set outside heap"); |
duke@435 | 182 | beg_bit = _mark_bitmap.find_obj_end(tmp_bit, last_bit); |
duke@435 | 183 | assert(beg_bit == last_bit, "end bit set outside heap"); |
duke@435 | 184 | } |
duke@435 | 185 | } |
duke@435 | 186 | } while (beg_bit < last_bit); |
duke@435 | 187 | } |
duke@435 | 188 | // XXX end - verification code; only works while we also mark in object headers |
duke@435 | 189 | |
duke@435 | 190 | #ifndef PRODUCT |
duke@435 | 191 | const char* PSParallelCompact::space_names[] = { |
duke@435 | 192 | "perm", "old ", "eden", "from", "to " |
duke@435 | 193 | }; |
duke@435 | 194 | |
duke@435 | 195 | void PSParallelCompact::print_chunk_ranges() |
duke@435 | 196 | { |
duke@435 | 197 | tty->print_cr("space bottom top end new_top"); |
duke@435 | 198 | tty->print_cr("------ ---------- ---------- ---------- ----------"); |
duke@435 | 199 | |
duke@435 | 200 | for (unsigned int id = 0; id < last_space_id; ++id) { |
duke@435 | 201 | const MutableSpace* space = _space_info[id].space(); |
duke@435 | 202 | tty->print_cr("%u %s " |
duke@435 | 203 | SIZE_FORMAT_W("10") " " SIZE_FORMAT_W("10") " " |
duke@435 | 204 | SIZE_FORMAT_W("10") " " SIZE_FORMAT_W("10") " ", |
duke@435 | 205 | id, space_names[id], |
duke@435 | 206 | summary_data().addr_to_chunk_idx(space->bottom()), |
duke@435 | 207 | summary_data().addr_to_chunk_idx(space->top()), |
duke@435 | 208 | summary_data().addr_to_chunk_idx(space->end()), |
duke@435 | 209 | summary_data().addr_to_chunk_idx(_space_info[id].new_top())); |
duke@435 | 210 | } |
duke@435 | 211 | } |
duke@435 | 212 | |
duke@435 | 213 | void |
duke@435 | 214 | print_generic_summary_chunk(size_t i, const ParallelCompactData::ChunkData* c) |
duke@435 | 215 | { |
duke@435 | 216 | #define CHUNK_IDX_FORMAT SIZE_FORMAT_W("7") |
duke@435 | 217 | #define CHUNK_DATA_FORMAT SIZE_FORMAT_W("5") |
duke@435 | 218 | |
duke@435 | 219 | ParallelCompactData& sd = PSParallelCompact::summary_data(); |
duke@435 | 220 | size_t dci = c->destination() ? sd.addr_to_chunk_idx(c->destination()) : 0; |
duke@435 | 221 | tty->print_cr(CHUNK_IDX_FORMAT " " PTR_FORMAT " " |
duke@435 | 222 | CHUNK_IDX_FORMAT " " PTR_FORMAT " " |
duke@435 | 223 | CHUNK_DATA_FORMAT " " CHUNK_DATA_FORMAT " " |
duke@435 | 224 | CHUNK_DATA_FORMAT " " CHUNK_IDX_FORMAT " %d", |
duke@435 | 225 | i, c->data_location(), dci, c->destination(), |
duke@435 | 226 | c->partial_obj_size(), c->live_obj_size(), |
duke@435 | 227 | c->data_size(), c->source_chunk(), c->destination_count()); |
duke@435 | 228 | |
duke@435 | 229 | #undef CHUNK_IDX_FORMAT |
duke@435 | 230 | #undef CHUNK_DATA_FORMAT |
duke@435 | 231 | } |
duke@435 | 232 | |
duke@435 | 233 | void |
duke@435 | 234 | print_generic_summary_data(ParallelCompactData& summary_data, |
duke@435 | 235 | HeapWord* const beg_addr, |
duke@435 | 236 | HeapWord* const end_addr) |
duke@435 | 237 | { |
duke@435 | 238 | size_t total_words = 0; |
duke@435 | 239 | size_t i = summary_data.addr_to_chunk_idx(beg_addr); |
duke@435 | 240 | const size_t last = summary_data.addr_to_chunk_idx(end_addr); |
duke@435 | 241 | HeapWord* pdest = 0; |
duke@435 | 242 | |
duke@435 | 243 | while (i <= last) { |
duke@435 | 244 | ParallelCompactData::ChunkData* c = summary_data.chunk(i); |
duke@435 | 245 | if (c->data_size() != 0 || c->destination() != pdest) { |
duke@435 | 246 | print_generic_summary_chunk(i, c); |
duke@435 | 247 | total_words += c->data_size(); |
duke@435 | 248 | pdest = c->destination(); |
duke@435 | 249 | } |
duke@435 | 250 | ++i; |
duke@435 | 251 | } |
duke@435 | 252 | |
duke@435 | 253 | tty->print_cr("summary_data_bytes=" SIZE_FORMAT, total_words * HeapWordSize); |
duke@435 | 254 | } |
duke@435 | 255 | |
duke@435 | 256 | void |
duke@435 | 257 | print_generic_summary_data(ParallelCompactData& summary_data, |
duke@435 | 258 | SpaceInfo* space_info) |
duke@435 | 259 | { |
duke@435 | 260 | for (unsigned int id = 0; id < PSParallelCompact::last_space_id; ++id) { |
duke@435 | 261 | const MutableSpace* space = space_info[id].space(); |
duke@435 | 262 | print_generic_summary_data(summary_data, space->bottom(), |
duke@435 | 263 | MAX2(space->top(), space_info[id].new_top())); |
duke@435 | 264 | } |
duke@435 | 265 | } |
duke@435 | 266 | |
duke@435 | 267 | void |
duke@435 | 268 | print_initial_summary_chunk(size_t i, |
duke@435 | 269 | const ParallelCompactData::ChunkData* c, |
duke@435 | 270 | bool newline = true) |
duke@435 | 271 | { |
duke@435 | 272 | tty->print(SIZE_FORMAT_W("5") " " PTR_FORMAT " " |
duke@435 | 273 | SIZE_FORMAT_W("5") " " SIZE_FORMAT_W("5") " " |
duke@435 | 274 | SIZE_FORMAT_W("5") " " SIZE_FORMAT_W("5") " %d", |
duke@435 | 275 | i, c->destination(), |
duke@435 | 276 | c->partial_obj_size(), c->live_obj_size(), |
duke@435 | 277 | c->data_size(), c->source_chunk(), c->destination_count()); |
duke@435 | 278 | if (newline) tty->cr(); |
duke@435 | 279 | } |
duke@435 | 280 | |
duke@435 | 281 | void |
duke@435 | 282 | print_initial_summary_data(ParallelCompactData& summary_data, |
duke@435 | 283 | const MutableSpace* space) { |
duke@435 | 284 | if (space->top() == space->bottom()) { |
duke@435 | 285 | return; |
duke@435 | 286 | } |
duke@435 | 287 | |
duke@435 | 288 | const size_t chunk_size = ParallelCompactData::ChunkSize; |
duke@435 | 289 | HeapWord* const top_aligned_up = summary_data.chunk_align_up(space->top()); |
duke@435 | 290 | const size_t end_chunk = summary_data.addr_to_chunk_idx(top_aligned_up); |
duke@435 | 291 | const ParallelCompactData::ChunkData* c = summary_data.chunk(end_chunk - 1); |
duke@435 | 292 | HeapWord* end_addr = c->destination() + c->data_size(); |
duke@435 | 293 | const size_t live_in_space = pointer_delta(end_addr, space->bottom()); |
duke@435 | 294 | |
duke@435 | 295 | // Print (and count) the full chunks at the beginning of the space. |
duke@435 | 296 | size_t full_chunk_count = 0; |
duke@435 | 297 | size_t i = summary_data.addr_to_chunk_idx(space->bottom()); |
duke@435 | 298 | while (i < end_chunk && summary_data.chunk(i)->data_size() == chunk_size) { |
duke@435 | 299 | print_initial_summary_chunk(i, summary_data.chunk(i)); |
duke@435 | 300 | ++full_chunk_count; |
duke@435 | 301 | ++i; |
duke@435 | 302 | } |
duke@435 | 303 | |
duke@435 | 304 | size_t live_to_right = live_in_space - full_chunk_count * chunk_size; |
duke@435 | 305 | |
duke@435 | 306 | double max_reclaimed_ratio = 0.0; |
duke@435 | 307 | size_t max_reclaimed_ratio_chunk = 0; |
duke@435 | 308 | size_t max_dead_to_right = 0; |
duke@435 | 309 | size_t max_live_to_right = 0; |
duke@435 | 310 | |
duke@435 | 311 | // Print the 'reclaimed ratio' for chunks while there is something live in the |
duke@435 | 312 | // chunk or to the right of it. The remaining chunks are empty (and |
duke@435 | 313 | // uninteresting), and computing the ratio will result in division by 0. |
duke@435 | 314 | while (i < end_chunk && live_to_right > 0) { |
duke@435 | 315 | c = summary_data.chunk(i); |
duke@435 | 316 | HeapWord* const chunk_addr = summary_data.chunk_to_addr(i); |
duke@435 | 317 | const size_t used_to_right = pointer_delta(space->top(), chunk_addr); |
duke@435 | 318 | const size_t dead_to_right = used_to_right - live_to_right; |
duke@435 | 319 | const double reclaimed_ratio = double(dead_to_right) / live_to_right; |
duke@435 | 320 | |
duke@435 | 321 | if (reclaimed_ratio > max_reclaimed_ratio) { |
duke@435 | 322 | max_reclaimed_ratio = reclaimed_ratio; |
duke@435 | 323 | max_reclaimed_ratio_chunk = i; |
duke@435 | 324 | max_dead_to_right = dead_to_right; |
duke@435 | 325 | max_live_to_right = live_to_right; |
duke@435 | 326 | } |
duke@435 | 327 | |
duke@435 | 328 | print_initial_summary_chunk(i, c, false); |
duke@435 | 329 | tty->print_cr(" %12.10f " SIZE_FORMAT_W("10") " " SIZE_FORMAT_W("10"), |
duke@435 | 330 | reclaimed_ratio, dead_to_right, live_to_right); |
duke@435 | 331 | |
duke@435 | 332 | live_to_right -= c->data_size(); |
duke@435 | 333 | ++i; |
duke@435 | 334 | } |
duke@435 | 335 | |
duke@435 | 336 | // Any remaining chunks are empty. Print one more if there is one. |
duke@435 | 337 | if (i < end_chunk) { |
duke@435 | 338 | print_initial_summary_chunk(i, summary_data.chunk(i)); |
duke@435 | 339 | } |
duke@435 | 340 | |
duke@435 | 341 | tty->print_cr("max: " SIZE_FORMAT_W("4") " d2r=" SIZE_FORMAT_W("10") " " |
duke@435 | 342 | "l2r=" SIZE_FORMAT_W("10") " max_ratio=%14.12f", |
duke@435 | 343 | max_reclaimed_ratio_chunk, max_dead_to_right, |
duke@435 | 344 | max_live_to_right, max_reclaimed_ratio); |
duke@435 | 345 | } |
duke@435 | 346 | |
duke@435 | 347 | void |
duke@435 | 348 | print_initial_summary_data(ParallelCompactData& summary_data, |
duke@435 | 349 | SpaceInfo* space_info) { |
duke@435 | 350 | unsigned int id = PSParallelCompact::perm_space_id; |
duke@435 | 351 | const MutableSpace* space; |
duke@435 | 352 | do { |
duke@435 | 353 | space = space_info[id].space(); |
duke@435 | 354 | print_initial_summary_data(summary_data, space); |
duke@435 | 355 | } while (++id < PSParallelCompact::eden_space_id); |
duke@435 | 356 | |
duke@435 | 357 | do { |
duke@435 | 358 | space = space_info[id].space(); |
duke@435 | 359 | print_generic_summary_data(summary_data, space->bottom(), space->top()); |
duke@435 | 360 | } while (++id < PSParallelCompact::last_space_id); |
duke@435 | 361 | } |
duke@435 | 362 | #endif // #ifndef PRODUCT |
duke@435 | 363 | |
duke@435 | 364 | #ifdef ASSERT |
duke@435 | 365 | size_t add_obj_count; |
duke@435 | 366 | size_t add_obj_size; |
duke@435 | 367 | size_t mark_bitmap_count; |
duke@435 | 368 | size_t mark_bitmap_size; |
duke@435 | 369 | #endif // #ifdef ASSERT |
duke@435 | 370 | |
duke@435 | 371 | ParallelCompactData::ParallelCompactData() |
duke@435 | 372 | { |
duke@435 | 373 | _region_start = 0; |
duke@435 | 374 | |
duke@435 | 375 | _chunk_vspace = 0; |
duke@435 | 376 | _chunk_data = 0; |
duke@435 | 377 | _chunk_count = 0; |
duke@435 | 378 | |
duke@435 | 379 | _block_vspace = 0; |
duke@435 | 380 | _block_data = 0; |
duke@435 | 381 | _block_count = 0; |
duke@435 | 382 | } |
duke@435 | 383 | |
duke@435 | 384 | bool ParallelCompactData::initialize(MemRegion covered_region) |
duke@435 | 385 | { |
duke@435 | 386 | _region_start = covered_region.start(); |
duke@435 | 387 | const size_t region_size = covered_region.word_size(); |
duke@435 | 388 | DEBUG_ONLY(_region_end = _region_start + region_size;) |
duke@435 | 389 | |
duke@435 | 390 | assert(chunk_align_down(_region_start) == _region_start, |
duke@435 | 391 | "region start not aligned"); |
duke@435 | 392 | assert((region_size & ChunkSizeOffsetMask) == 0, |
duke@435 | 393 | "region size not a multiple of ChunkSize"); |
duke@435 | 394 | |
duke@435 | 395 | bool result = initialize_chunk_data(region_size); |
duke@435 | 396 | |
duke@435 | 397 | // Initialize the block data if it will be used for updating pointers, or if |
duke@435 | 398 | // this is a debug build. |
duke@435 | 399 | if (!UseParallelOldGCChunkPointerCalc || trueInDebug) { |
duke@435 | 400 | result = result && initialize_block_data(region_size); |
duke@435 | 401 | } |
duke@435 | 402 | |
duke@435 | 403 | return result; |
duke@435 | 404 | } |
duke@435 | 405 | |
duke@435 | 406 | PSVirtualSpace* |
duke@435 | 407 | ParallelCompactData::create_vspace(size_t count, size_t element_size) |
duke@435 | 408 | { |
duke@435 | 409 | const size_t raw_bytes = count * element_size; |
duke@435 | 410 | const size_t page_sz = os::page_size_for_region(raw_bytes, raw_bytes, 10); |
duke@435 | 411 | const size_t granularity = os::vm_allocation_granularity(); |
duke@435 | 412 | const size_t bytes = align_size_up(raw_bytes, MAX2(page_sz, granularity)); |
duke@435 | 413 | |
duke@435 | 414 | const size_t rs_align = page_sz == (size_t) os::vm_page_size() ? 0 : |
duke@435 | 415 | MAX2(page_sz, granularity); |
jcoomes@514 | 416 | ReservedSpace rs(bytes, rs_align, rs_align > 0); |
duke@435 | 417 | os::trace_page_sizes("par compact", raw_bytes, raw_bytes, page_sz, rs.base(), |
duke@435 | 418 | rs.size()); |
duke@435 | 419 | PSVirtualSpace* vspace = new PSVirtualSpace(rs, page_sz); |
duke@435 | 420 | if (vspace != 0) { |
duke@435 | 421 | if (vspace->expand_by(bytes)) { |
duke@435 | 422 | return vspace; |
duke@435 | 423 | } |
duke@435 | 424 | delete vspace; |
coleenp@672 | 425 | // Release memory reserved in the space. |
coleenp@672 | 426 | rs.release(); |
duke@435 | 427 | } |
duke@435 | 428 | |
duke@435 | 429 | return 0; |
duke@435 | 430 | } |
duke@435 | 431 | |
duke@435 | 432 | bool ParallelCompactData::initialize_chunk_data(size_t region_size) |
duke@435 | 433 | { |
duke@435 | 434 | const size_t count = (region_size + ChunkSizeOffsetMask) >> Log2ChunkSize; |
duke@435 | 435 | _chunk_vspace = create_vspace(count, sizeof(ChunkData)); |
duke@435 | 436 | if (_chunk_vspace != 0) { |
duke@435 | 437 | _chunk_data = (ChunkData*)_chunk_vspace->reserved_low_addr(); |
duke@435 | 438 | _chunk_count = count; |
duke@435 | 439 | return true; |
duke@435 | 440 | } |
duke@435 | 441 | return false; |
duke@435 | 442 | } |
duke@435 | 443 | |
duke@435 | 444 | bool ParallelCompactData::initialize_block_data(size_t region_size) |
duke@435 | 445 | { |
duke@435 | 446 | const size_t count = (region_size + BlockOffsetMask) >> Log2BlockSize; |
duke@435 | 447 | _block_vspace = create_vspace(count, sizeof(BlockData)); |
duke@435 | 448 | if (_block_vspace != 0) { |
duke@435 | 449 | _block_data = (BlockData*)_block_vspace->reserved_low_addr(); |
duke@435 | 450 | _block_count = count; |
duke@435 | 451 | return true; |
duke@435 | 452 | } |
duke@435 | 453 | return false; |
duke@435 | 454 | } |
duke@435 | 455 | |
duke@435 | 456 | void ParallelCompactData::clear() |
duke@435 | 457 | { |
duke@435 | 458 | if (_block_data) { |
duke@435 | 459 | memset(_block_data, 0, _block_vspace->committed_size()); |
duke@435 | 460 | } |
duke@435 | 461 | memset(_chunk_data, 0, _chunk_vspace->committed_size()); |
duke@435 | 462 | } |
duke@435 | 463 | |
duke@435 | 464 | void ParallelCompactData::clear_range(size_t beg_chunk, size_t end_chunk) { |
duke@435 | 465 | assert(beg_chunk <= _chunk_count, "beg_chunk out of range"); |
duke@435 | 466 | assert(end_chunk <= _chunk_count, "end_chunk out of range"); |
duke@435 | 467 | assert(ChunkSize % BlockSize == 0, "ChunkSize not a multiple of BlockSize"); |
duke@435 | 468 | |
duke@435 | 469 | const size_t chunk_cnt = end_chunk - beg_chunk; |
duke@435 | 470 | |
duke@435 | 471 | if (_block_data) { |
duke@435 | 472 | const size_t blocks_per_chunk = ChunkSize / BlockSize; |
duke@435 | 473 | const size_t beg_block = beg_chunk * blocks_per_chunk; |
duke@435 | 474 | const size_t block_cnt = chunk_cnt * blocks_per_chunk; |
duke@435 | 475 | memset(_block_data + beg_block, 0, block_cnt * sizeof(BlockData)); |
duke@435 | 476 | } |
duke@435 | 477 | memset(_chunk_data + beg_chunk, 0, chunk_cnt * sizeof(ChunkData)); |
duke@435 | 478 | } |
duke@435 | 479 | |
duke@435 | 480 | HeapWord* ParallelCompactData::partial_obj_end(size_t chunk_idx) const |
duke@435 | 481 | { |
duke@435 | 482 | const ChunkData* cur_cp = chunk(chunk_idx); |
duke@435 | 483 | const ChunkData* const end_cp = chunk(chunk_count() - 1); |
duke@435 | 484 | |
duke@435 | 485 | HeapWord* result = chunk_to_addr(chunk_idx); |
duke@435 | 486 | if (cur_cp < end_cp) { |
duke@435 | 487 | do { |
duke@435 | 488 | result += cur_cp->partial_obj_size(); |
duke@435 | 489 | } while (cur_cp->partial_obj_size() == ChunkSize && ++cur_cp < end_cp); |
duke@435 | 490 | } |
duke@435 | 491 | return result; |
duke@435 | 492 | } |
duke@435 | 493 | |
duke@435 | 494 | void ParallelCompactData::add_obj(HeapWord* addr, size_t len) |
duke@435 | 495 | { |
duke@435 | 496 | const size_t obj_ofs = pointer_delta(addr, _region_start); |
duke@435 | 497 | const size_t beg_chunk = obj_ofs >> Log2ChunkSize; |
duke@435 | 498 | const size_t end_chunk = (obj_ofs + len - 1) >> Log2ChunkSize; |
duke@435 | 499 | |
duke@435 | 500 | DEBUG_ONLY(Atomic::inc_ptr(&add_obj_count);) |
duke@435 | 501 | DEBUG_ONLY(Atomic::add_ptr(len, &add_obj_size);) |
duke@435 | 502 | |
duke@435 | 503 | if (beg_chunk == end_chunk) { |
duke@435 | 504 | // All in one chunk. |
duke@435 | 505 | _chunk_data[beg_chunk].add_live_obj(len); |
duke@435 | 506 | return; |
duke@435 | 507 | } |
duke@435 | 508 | |
duke@435 | 509 | // First chunk. |
duke@435 | 510 | const size_t beg_ofs = chunk_offset(addr); |
duke@435 | 511 | _chunk_data[beg_chunk].add_live_obj(ChunkSize - beg_ofs); |
duke@435 | 512 | |
duke@435 | 513 | klassOop klass = ((oop)addr)->klass(); |
duke@435 | 514 | // Middle chunks--completely spanned by this object. |
duke@435 | 515 | for (size_t chunk = beg_chunk + 1; chunk < end_chunk; ++chunk) { |
duke@435 | 516 | _chunk_data[chunk].set_partial_obj_size(ChunkSize); |
duke@435 | 517 | _chunk_data[chunk].set_partial_obj_addr(addr); |
duke@435 | 518 | } |
duke@435 | 519 | |
duke@435 | 520 | // Last chunk. |
duke@435 | 521 | const size_t end_ofs = chunk_offset(addr + len - 1); |
duke@435 | 522 | _chunk_data[end_chunk].set_partial_obj_size(end_ofs + 1); |
duke@435 | 523 | _chunk_data[end_chunk].set_partial_obj_addr(addr); |
duke@435 | 524 | } |
duke@435 | 525 | |
duke@435 | 526 | void |
duke@435 | 527 | ParallelCompactData::summarize_dense_prefix(HeapWord* beg, HeapWord* end) |
duke@435 | 528 | { |
duke@435 | 529 | assert(chunk_offset(beg) == 0, "not ChunkSize aligned"); |
duke@435 | 530 | assert(chunk_offset(end) == 0, "not ChunkSize aligned"); |
duke@435 | 531 | |
duke@435 | 532 | size_t cur_chunk = addr_to_chunk_idx(beg); |
duke@435 | 533 | const size_t end_chunk = addr_to_chunk_idx(end); |
duke@435 | 534 | HeapWord* addr = beg; |
duke@435 | 535 | while (cur_chunk < end_chunk) { |
duke@435 | 536 | _chunk_data[cur_chunk].set_destination(addr); |
duke@435 | 537 | _chunk_data[cur_chunk].set_destination_count(0); |
duke@435 | 538 | _chunk_data[cur_chunk].set_source_chunk(cur_chunk); |
duke@435 | 539 | _chunk_data[cur_chunk].set_data_location(addr); |
duke@435 | 540 | |
duke@435 | 541 | // Update live_obj_size so the chunk appears completely full. |
duke@435 | 542 | size_t live_size = ChunkSize - _chunk_data[cur_chunk].partial_obj_size(); |
duke@435 | 543 | _chunk_data[cur_chunk].set_live_obj_size(live_size); |
duke@435 | 544 | |
duke@435 | 545 | ++cur_chunk; |
duke@435 | 546 | addr += ChunkSize; |
duke@435 | 547 | } |
duke@435 | 548 | } |
duke@435 | 549 | |
duke@435 | 550 | bool ParallelCompactData::summarize(HeapWord* target_beg, HeapWord* target_end, |
duke@435 | 551 | HeapWord* source_beg, HeapWord* source_end, |
duke@435 | 552 | HeapWord** target_next, |
duke@435 | 553 | HeapWord** source_next) { |
duke@435 | 554 | // This is too strict. |
duke@435 | 555 | // assert(chunk_offset(source_beg) == 0, "not ChunkSize aligned"); |
duke@435 | 556 | |
duke@435 | 557 | if (TraceParallelOldGCSummaryPhase) { |
duke@435 | 558 | tty->print_cr("tb=" PTR_FORMAT " te=" PTR_FORMAT " " |
duke@435 | 559 | "sb=" PTR_FORMAT " se=" PTR_FORMAT " " |
duke@435 | 560 | "tn=" PTR_FORMAT " sn=" PTR_FORMAT, |
duke@435 | 561 | target_beg, target_end, |
duke@435 | 562 | source_beg, source_end, |
duke@435 | 563 | target_next != 0 ? *target_next : (HeapWord*) 0, |
duke@435 | 564 | source_next != 0 ? *source_next : (HeapWord*) 0); |
duke@435 | 565 | } |
duke@435 | 566 | |
duke@435 | 567 | size_t cur_chunk = addr_to_chunk_idx(source_beg); |
duke@435 | 568 | const size_t end_chunk = addr_to_chunk_idx(chunk_align_up(source_end)); |
duke@435 | 569 | |
duke@435 | 570 | HeapWord *dest_addr = target_beg; |
duke@435 | 571 | while (cur_chunk < end_chunk) { |
duke@435 | 572 | size_t words = _chunk_data[cur_chunk].data_size(); |
duke@435 | 573 | |
duke@435 | 574 | #if 1 |
duke@435 | 575 | assert(pointer_delta(target_end, dest_addr) >= words, |
duke@435 | 576 | "source region does not fit into target region"); |
duke@435 | 577 | #else |
duke@435 | 578 | // XXX - need some work on the corner cases here. If the chunk does not |
duke@435 | 579 | // fit, then must either make sure any partial_obj from the chunk fits, or |
duke@435 | 580 | // 'undo' the initial part of the partial_obj that is in the previous chunk. |
duke@435 | 581 | if (dest_addr + words >= target_end) { |
duke@435 | 582 | // Let the caller know where to continue. |
duke@435 | 583 | *target_next = dest_addr; |
duke@435 | 584 | *source_next = chunk_to_addr(cur_chunk); |
duke@435 | 585 | return false; |
duke@435 | 586 | } |
duke@435 | 587 | #endif // #if 1 |
duke@435 | 588 | |
duke@435 | 589 | _chunk_data[cur_chunk].set_destination(dest_addr); |
duke@435 | 590 | |
duke@435 | 591 | // Set the destination_count for cur_chunk, and if necessary, update |
duke@435 | 592 | // source_chunk for a destination chunk. The source_chunk field is updated |
duke@435 | 593 | // if cur_chunk is the first (left-most) chunk to be copied to a destination |
duke@435 | 594 | // chunk. |
duke@435 | 595 | // |
duke@435 | 596 | // The destination_count calculation is a bit subtle. A chunk that has data |
duke@435 | 597 | // that compacts into itself does not count itself as a destination. This |
duke@435 | 598 | // maintains the invariant that a zero count means the chunk is available |
duke@435 | 599 | // and can be claimed and then filled. |
duke@435 | 600 | if (words > 0) { |
duke@435 | 601 | HeapWord* const last_addr = dest_addr + words - 1; |
duke@435 | 602 | const size_t dest_chunk_1 = addr_to_chunk_idx(dest_addr); |
duke@435 | 603 | const size_t dest_chunk_2 = addr_to_chunk_idx(last_addr); |
duke@435 | 604 | #if 0 |
duke@435 | 605 | // Initially assume that the destination chunks will be the same and |
duke@435 | 606 | // adjust the value below if necessary. Under this assumption, if |
duke@435 | 607 | // cur_chunk == dest_chunk_2, then cur_chunk will be compacted completely |
duke@435 | 608 | // into itself. |
duke@435 | 609 | uint destination_count = cur_chunk == dest_chunk_2 ? 0 : 1; |
duke@435 | 610 | if (dest_chunk_1 != dest_chunk_2) { |
duke@435 | 611 | // Destination chunks differ; adjust destination_count. |
duke@435 | 612 | destination_count += 1; |
duke@435 | 613 | // Data from cur_chunk will be copied to the start of dest_chunk_2. |
duke@435 | 614 | _chunk_data[dest_chunk_2].set_source_chunk(cur_chunk); |
duke@435 | 615 | } else if (chunk_offset(dest_addr) == 0) { |
duke@435 | 616 | // Data from cur_chunk will be copied to the start of the destination |
duke@435 | 617 | // chunk. |
duke@435 | 618 | _chunk_data[dest_chunk_1].set_source_chunk(cur_chunk); |
duke@435 | 619 | } |
duke@435 | 620 | #else |
duke@435 | 621 | // Initially assume that the destination chunks will be different and |
duke@435 | 622 | // adjust the value below if necessary. Under this assumption, if |
duke@435 | 623 | // cur_chunk == dest_chunk2, then cur_chunk will be compacted partially |
duke@435 | 624 | // into dest_chunk_1 and partially into itself. |
duke@435 | 625 | uint destination_count = cur_chunk == dest_chunk_2 ? 1 : 2; |
duke@435 | 626 | if (dest_chunk_1 != dest_chunk_2) { |
duke@435 | 627 | // Data from cur_chunk will be copied to the start of dest_chunk_2. |
duke@435 | 628 | _chunk_data[dest_chunk_2].set_source_chunk(cur_chunk); |
duke@435 | 629 | } else { |
duke@435 | 630 | // Destination chunks are the same; adjust destination_count. |
duke@435 | 631 | destination_count -= 1; |
duke@435 | 632 | if (chunk_offset(dest_addr) == 0) { |
duke@435 | 633 | // Data from cur_chunk will be copied to the start of the destination |
duke@435 | 634 | // chunk. |
duke@435 | 635 | _chunk_data[dest_chunk_1].set_source_chunk(cur_chunk); |
duke@435 | 636 | } |
duke@435 | 637 | } |
duke@435 | 638 | #endif // #if 0 |
duke@435 | 639 | |
duke@435 | 640 | _chunk_data[cur_chunk].set_destination_count(destination_count); |
duke@435 | 641 | _chunk_data[cur_chunk].set_data_location(chunk_to_addr(cur_chunk)); |
duke@435 | 642 | dest_addr += words; |
duke@435 | 643 | } |
duke@435 | 644 | |
duke@435 | 645 | ++cur_chunk; |
duke@435 | 646 | } |
duke@435 | 647 | |
duke@435 | 648 | *target_next = dest_addr; |
duke@435 | 649 | return true; |
duke@435 | 650 | } |
duke@435 | 651 | |
duke@435 | 652 | bool ParallelCompactData::partial_obj_ends_in_block(size_t block_index) { |
duke@435 | 653 | HeapWord* block_addr = block_to_addr(block_index); |
duke@435 | 654 | HeapWord* block_end_addr = block_addr + BlockSize; |
duke@435 | 655 | size_t chunk_index = addr_to_chunk_idx(block_addr); |
duke@435 | 656 | HeapWord* partial_obj_end_addr = partial_obj_end(chunk_index); |
duke@435 | 657 | |
duke@435 | 658 | // An object that ends at the end of the block, ends |
duke@435 | 659 | // in the block (the last word of the object is to |
duke@435 | 660 | // the left of the end). |
duke@435 | 661 | if ((block_addr < partial_obj_end_addr) && |
duke@435 | 662 | (partial_obj_end_addr <= block_end_addr)) { |
duke@435 | 663 | return true; |
duke@435 | 664 | } |
duke@435 | 665 | |
duke@435 | 666 | return false; |
duke@435 | 667 | } |
duke@435 | 668 | |
duke@435 | 669 | HeapWord* ParallelCompactData::calc_new_pointer(HeapWord* addr) { |
duke@435 | 670 | HeapWord* result = NULL; |
duke@435 | 671 | if (UseParallelOldGCChunkPointerCalc) { |
duke@435 | 672 | result = chunk_calc_new_pointer(addr); |
duke@435 | 673 | } else { |
duke@435 | 674 | result = block_calc_new_pointer(addr); |
duke@435 | 675 | } |
duke@435 | 676 | return result; |
duke@435 | 677 | } |
duke@435 | 678 | |
duke@435 | 679 | // This method is overly complicated (expensive) to be called |
duke@435 | 680 | // for every reference. |
duke@435 | 681 | // Try to restructure this so that a NULL is returned if |
duke@435 | 682 | // the object is dead. But don't wast the cycles to explicitly check |
duke@435 | 683 | // that it is dead since only live objects should be passed in. |
duke@435 | 684 | |
duke@435 | 685 | HeapWord* ParallelCompactData::chunk_calc_new_pointer(HeapWord* addr) { |
duke@435 | 686 | assert(addr != NULL, "Should detect NULL oop earlier"); |
duke@435 | 687 | assert(PSParallelCompact::gc_heap()->is_in(addr), "addr not in heap"); |
duke@435 | 688 | #ifdef ASSERT |
duke@435 | 689 | if (PSParallelCompact::mark_bitmap()->is_unmarked(addr)) { |
duke@435 | 690 | gclog_or_tty->print_cr("calc_new_pointer:: addr " PTR_FORMAT, addr); |
duke@435 | 691 | } |
duke@435 | 692 | #endif |
duke@435 | 693 | assert(PSParallelCompact::mark_bitmap()->is_marked(addr), "obj not marked"); |
duke@435 | 694 | |
duke@435 | 695 | // Chunk covering the object. |
duke@435 | 696 | size_t chunk_index = addr_to_chunk_idx(addr); |
duke@435 | 697 | const ChunkData* const chunk_ptr = chunk(chunk_index); |
duke@435 | 698 | HeapWord* const chunk_addr = chunk_align_down(addr); |
duke@435 | 699 | |
duke@435 | 700 | assert(addr < chunk_addr + ChunkSize, "Chunk does not cover object"); |
duke@435 | 701 | assert(addr_to_chunk_ptr(chunk_addr) == chunk_ptr, "sanity check"); |
duke@435 | 702 | |
duke@435 | 703 | HeapWord* result = chunk_ptr->destination(); |
duke@435 | 704 | |
duke@435 | 705 | // If all the data in the chunk is live, then the new location of the object |
duke@435 | 706 | // can be calculated from the destination of the chunk plus the offset of the |
duke@435 | 707 | // object in the chunk. |
duke@435 | 708 | if (chunk_ptr->data_size() == ChunkSize) { |
duke@435 | 709 | result += pointer_delta(addr, chunk_addr); |
duke@435 | 710 | return result; |
duke@435 | 711 | } |
duke@435 | 712 | |
duke@435 | 713 | // The new location of the object is |
duke@435 | 714 | // chunk destination + |
duke@435 | 715 | // size of the partial object extending onto the chunk + |
duke@435 | 716 | // sizes of the live objects in the Chunk that are to the left of addr |
duke@435 | 717 | const size_t partial_obj_size = chunk_ptr->partial_obj_size(); |
duke@435 | 718 | HeapWord* const search_start = chunk_addr + partial_obj_size; |
duke@435 | 719 | |
duke@435 | 720 | const ParMarkBitMap* bitmap = PSParallelCompact::mark_bitmap(); |
duke@435 | 721 | size_t live_to_left = bitmap->live_words_in_range(search_start, oop(addr)); |
duke@435 | 722 | |
duke@435 | 723 | result += partial_obj_size + live_to_left; |
duke@435 | 724 | assert(result <= addr, "object cannot move to the right"); |
duke@435 | 725 | return result; |
duke@435 | 726 | } |
duke@435 | 727 | |
duke@435 | 728 | HeapWord* ParallelCompactData::block_calc_new_pointer(HeapWord* addr) { |
duke@435 | 729 | assert(addr != NULL, "Should detect NULL oop earlier"); |
duke@435 | 730 | assert(PSParallelCompact::gc_heap()->is_in(addr), "addr not in heap"); |
duke@435 | 731 | #ifdef ASSERT |
duke@435 | 732 | if (PSParallelCompact::mark_bitmap()->is_unmarked(addr)) { |
duke@435 | 733 | gclog_or_tty->print_cr("calc_new_pointer:: addr " PTR_FORMAT, addr); |
duke@435 | 734 | } |
duke@435 | 735 | #endif |
duke@435 | 736 | assert(PSParallelCompact::mark_bitmap()->is_marked(addr), "obj not marked"); |
duke@435 | 737 | |
duke@435 | 738 | // Chunk covering the object. |
duke@435 | 739 | size_t chunk_index = addr_to_chunk_idx(addr); |
duke@435 | 740 | const ChunkData* const chunk_ptr = chunk(chunk_index); |
duke@435 | 741 | HeapWord* const chunk_addr = chunk_align_down(addr); |
duke@435 | 742 | |
duke@435 | 743 | assert(addr < chunk_addr + ChunkSize, "Chunk does not cover object"); |
duke@435 | 744 | assert(addr_to_chunk_ptr(chunk_addr) == chunk_ptr, "sanity check"); |
duke@435 | 745 | |
duke@435 | 746 | HeapWord* result = chunk_ptr->destination(); |
duke@435 | 747 | |
duke@435 | 748 | // If all the data in the chunk is live, then the new location of the object |
duke@435 | 749 | // can be calculated from the destination of the chunk plus the offset of the |
duke@435 | 750 | // object in the chunk. |
duke@435 | 751 | if (chunk_ptr->data_size() == ChunkSize) { |
duke@435 | 752 | result += pointer_delta(addr, chunk_addr); |
duke@435 | 753 | return result; |
duke@435 | 754 | } |
duke@435 | 755 | |
duke@435 | 756 | // The new location of the object is |
duke@435 | 757 | // chunk destination + |
duke@435 | 758 | // block offset + |
duke@435 | 759 | // sizes of the live objects in the Block that are to the left of addr |
duke@435 | 760 | const size_t block_offset = addr_to_block_ptr(addr)->offset(); |
duke@435 | 761 | HeapWord* const search_start = chunk_addr + block_offset; |
duke@435 | 762 | |
duke@435 | 763 | const ParMarkBitMap* bitmap = PSParallelCompact::mark_bitmap(); |
duke@435 | 764 | size_t live_to_left = bitmap->live_words_in_range(search_start, oop(addr)); |
duke@435 | 765 | |
duke@435 | 766 | result += block_offset + live_to_left; |
duke@435 | 767 | assert(result <= addr, "object cannot move to the right"); |
duke@435 | 768 | assert(result == chunk_calc_new_pointer(addr), "Should match"); |
duke@435 | 769 | return result; |
duke@435 | 770 | } |
duke@435 | 771 | |
duke@435 | 772 | klassOop ParallelCompactData::calc_new_klass(klassOop old_klass) { |
duke@435 | 773 | klassOop updated_klass; |
duke@435 | 774 | if (PSParallelCompact::should_update_klass(old_klass)) { |
duke@435 | 775 | updated_klass = (klassOop) calc_new_pointer(old_klass); |
duke@435 | 776 | } else { |
duke@435 | 777 | updated_klass = old_klass; |
duke@435 | 778 | } |
duke@435 | 779 | |
duke@435 | 780 | return updated_klass; |
duke@435 | 781 | } |
duke@435 | 782 | |
duke@435 | 783 | #ifdef ASSERT |
duke@435 | 784 | void ParallelCompactData::verify_clear(const PSVirtualSpace* vspace) |
duke@435 | 785 | { |
duke@435 | 786 | const size_t* const beg = (const size_t*)vspace->committed_low_addr(); |
duke@435 | 787 | const size_t* const end = (const size_t*)vspace->committed_high_addr(); |
duke@435 | 788 | for (const size_t* p = beg; p < end; ++p) { |
duke@435 | 789 | assert(*p == 0, "not zero"); |
duke@435 | 790 | } |
duke@435 | 791 | } |
duke@435 | 792 | |
duke@435 | 793 | void ParallelCompactData::verify_clear() |
duke@435 | 794 | { |
duke@435 | 795 | verify_clear(_chunk_vspace); |
duke@435 | 796 | verify_clear(_block_vspace); |
duke@435 | 797 | } |
duke@435 | 798 | #endif // #ifdef ASSERT |
duke@435 | 799 | |
duke@435 | 800 | #ifdef NOT_PRODUCT |
duke@435 | 801 | ParallelCompactData::ChunkData* debug_chunk(size_t chunk_index) { |
duke@435 | 802 | ParallelCompactData& sd = PSParallelCompact::summary_data(); |
duke@435 | 803 | return sd.chunk(chunk_index); |
duke@435 | 804 | } |
duke@435 | 805 | #endif |
duke@435 | 806 | |
duke@435 | 807 | elapsedTimer PSParallelCompact::_accumulated_time; |
duke@435 | 808 | unsigned int PSParallelCompact::_total_invocations = 0; |
duke@435 | 809 | unsigned int PSParallelCompact::_maximum_compaction_gc_num = 0; |
duke@435 | 810 | jlong PSParallelCompact::_time_of_last_gc = 0; |
duke@435 | 811 | CollectorCounters* PSParallelCompact::_counters = NULL; |
duke@435 | 812 | ParMarkBitMap PSParallelCompact::_mark_bitmap; |
duke@435 | 813 | ParallelCompactData PSParallelCompact::_summary_data; |
duke@435 | 814 | |
duke@435 | 815 | PSParallelCompact::IsAliveClosure PSParallelCompact::_is_alive_closure; |
coleenp@548 | 816 | |
coleenp@548 | 817 | void PSParallelCompact::IsAliveClosure::do_object(oop p) { ShouldNotReachHere(); } |
coleenp@548 | 818 | bool PSParallelCompact::IsAliveClosure::do_object_b(oop p) { return mark_bitmap()->is_marked(p); } |
coleenp@548 | 819 | |
coleenp@548 | 820 | void PSParallelCompact::KeepAliveClosure::do_oop(oop* p) { PSParallelCompact::KeepAliveClosure::do_oop_work(p); } |
coleenp@548 | 821 | void PSParallelCompact::KeepAliveClosure::do_oop(narrowOop* p) { PSParallelCompact::KeepAliveClosure::do_oop_work(p); } |
coleenp@548 | 822 | |
duke@435 | 823 | PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_root_pointer_closure(true); |
duke@435 | 824 | PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_pointer_closure(false); |
duke@435 | 825 | |
coleenp@548 | 826 | void PSParallelCompact::AdjustPointerClosure::do_oop(oop* p) { adjust_pointer(p, _is_root); } |
coleenp@548 | 827 | void PSParallelCompact::AdjustPointerClosure::do_oop(narrowOop* p) { adjust_pointer(p, _is_root); } |
coleenp@548 | 828 | |
coleenp@548 | 829 | void PSParallelCompact::FollowStackClosure::do_void() { follow_stack(_compaction_manager); } |
coleenp@548 | 830 | |
coleenp@548 | 831 | void PSParallelCompact::MarkAndPushClosure::do_oop(oop* p) { mark_and_push(_compaction_manager, p); } |
coleenp@548 | 832 | void PSParallelCompact::MarkAndPushClosure::do_oop(narrowOop* p) { mark_and_push(_compaction_manager, p); } |
duke@435 | 833 | |
duke@435 | 834 | void PSParallelCompact::post_initialize() { |
duke@435 | 835 | ParallelScavengeHeap* heap = gc_heap(); |
duke@435 | 836 | assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); |
duke@435 | 837 | |
duke@435 | 838 | MemRegion mr = heap->reserved_region(); |
duke@435 | 839 | _ref_processor = ReferenceProcessor::create_ref_processor( |
duke@435 | 840 | mr, // span |
duke@435 | 841 | true, // atomic_discovery |
duke@435 | 842 | true, // mt_discovery |
duke@435 | 843 | &_is_alive_closure, |
duke@435 | 844 | ParallelGCThreads, |
duke@435 | 845 | ParallelRefProcEnabled); |
duke@435 | 846 | _counters = new CollectorCounters("PSParallelCompact", 1); |
duke@435 | 847 | |
duke@435 | 848 | // Initialize static fields in ParCompactionManager. |
duke@435 | 849 | ParCompactionManager::initialize(mark_bitmap()); |
duke@435 | 850 | } |
duke@435 | 851 | |
duke@435 | 852 | bool PSParallelCompact::initialize() { |
duke@435 | 853 | ParallelScavengeHeap* heap = gc_heap(); |
duke@435 | 854 | assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); |
duke@435 | 855 | MemRegion mr = heap->reserved_region(); |
duke@435 | 856 | |
duke@435 | 857 | // Was the old gen get allocated successfully? |
duke@435 | 858 | if (!heap->old_gen()->is_allocated()) { |
duke@435 | 859 | return false; |
duke@435 | 860 | } |
duke@435 | 861 | |
duke@435 | 862 | initialize_space_info(); |
duke@435 | 863 | initialize_dead_wood_limiter(); |
duke@435 | 864 | |
duke@435 | 865 | if (!_mark_bitmap.initialize(mr)) { |
duke@435 | 866 | vm_shutdown_during_initialization("Unable to allocate bit map for " |
duke@435 | 867 | "parallel garbage collection for the requested heap size."); |
duke@435 | 868 | return false; |
duke@435 | 869 | } |
duke@435 | 870 | |
duke@435 | 871 | if (!_summary_data.initialize(mr)) { |
duke@435 | 872 | vm_shutdown_during_initialization("Unable to allocate tables for " |
duke@435 | 873 | "parallel garbage collection for the requested heap size."); |
duke@435 | 874 | return false; |
duke@435 | 875 | } |
duke@435 | 876 | |
duke@435 | 877 | return true; |
duke@435 | 878 | } |
duke@435 | 879 | |
duke@435 | 880 | void PSParallelCompact::initialize_space_info() |
duke@435 | 881 | { |
duke@435 | 882 | memset(&_space_info, 0, sizeof(_space_info)); |
duke@435 | 883 | |
duke@435 | 884 | ParallelScavengeHeap* heap = gc_heap(); |
duke@435 | 885 | PSYoungGen* young_gen = heap->young_gen(); |
duke@435 | 886 | MutableSpace* perm_space = heap->perm_gen()->object_space(); |
duke@435 | 887 | |
duke@435 | 888 | _space_info[perm_space_id].set_space(perm_space); |
duke@435 | 889 | _space_info[old_space_id].set_space(heap->old_gen()->object_space()); |
duke@435 | 890 | _space_info[eden_space_id].set_space(young_gen->eden_space()); |
duke@435 | 891 | _space_info[from_space_id].set_space(young_gen->from_space()); |
duke@435 | 892 | _space_info[to_space_id].set_space(young_gen->to_space()); |
duke@435 | 893 | |
duke@435 | 894 | _space_info[perm_space_id].set_start_array(heap->perm_gen()->start_array()); |
duke@435 | 895 | _space_info[old_space_id].set_start_array(heap->old_gen()->start_array()); |
duke@435 | 896 | |
duke@435 | 897 | _space_info[perm_space_id].set_min_dense_prefix(perm_space->top()); |
duke@435 | 898 | if (TraceParallelOldGCDensePrefix) { |
duke@435 | 899 | tty->print_cr("perm min_dense_prefix=" PTR_FORMAT, |
duke@435 | 900 | _space_info[perm_space_id].min_dense_prefix()); |
duke@435 | 901 | } |
duke@435 | 902 | } |
duke@435 | 903 | |
duke@435 | 904 | void PSParallelCompact::initialize_dead_wood_limiter() |
duke@435 | 905 | { |
duke@435 | 906 | const size_t max = 100; |
duke@435 | 907 | _dwl_mean = double(MIN2(ParallelOldDeadWoodLimiterMean, max)) / 100.0; |
duke@435 | 908 | _dwl_std_dev = double(MIN2(ParallelOldDeadWoodLimiterStdDev, max)) / 100.0; |
duke@435 | 909 | _dwl_first_term = 1.0 / (sqrt(2.0 * M_PI) * _dwl_std_dev); |
duke@435 | 910 | DEBUG_ONLY(_dwl_initialized = true;) |
duke@435 | 911 | _dwl_adjustment = normal_distribution(1.0); |
duke@435 | 912 | } |
duke@435 | 913 | |
duke@435 | 914 | // Simple class for storing info about the heap at the start of GC, to be used |
duke@435 | 915 | // after GC for comparison/printing. |
duke@435 | 916 | class PreGCValues { |
duke@435 | 917 | public: |
duke@435 | 918 | PreGCValues() { } |
duke@435 | 919 | PreGCValues(ParallelScavengeHeap* heap) { fill(heap); } |
duke@435 | 920 | |
duke@435 | 921 | void fill(ParallelScavengeHeap* heap) { |
duke@435 | 922 | _heap_used = heap->used(); |
duke@435 | 923 | _young_gen_used = heap->young_gen()->used_in_bytes(); |
duke@435 | 924 | _old_gen_used = heap->old_gen()->used_in_bytes(); |
duke@435 | 925 | _perm_gen_used = heap->perm_gen()->used_in_bytes(); |
duke@435 | 926 | }; |
duke@435 | 927 | |
duke@435 | 928 | size_t heap_used() const { return _heap_used; } |
duke@435 | 929 | size_t young_gen_used() const { return _young_gen_used; } |
duke@435 | 930 | size_t old_gen_used() const { return _old_gen_used; } |
duke@435 | 931 | size_t perm_gen_used() const { return _perm_gen_used; } |
duke@435 | 932 | |
duke@435 | 933 | private: |
duke@435 | 934 | size_t _heap_used; |
duke@435 | 935 | size_t _young_gen_used; |
duke@435 | 936 | size_t _old_gen_used; |
duke@435 | 937 | size_t _perm_gen_used; |
duke@435 | 938 | }; |
duke@435 | 939 | |
duke@435 | 940 | void |
duke@435 | 941 | PSParallelCompact::clear_data_covering_space(SpaceId id) |
duke@435 | 942 | { |
duke@435 | 943 | // At this point, top is the value before GC, new_top() is the value that will |
duke@435 | 944 | // be set at the end of GC. The marking bitmap is cleared to top; nothing |
duke@435 | 945 | // should be marked above top. The summary data is cleared to the larger of |
duke@435 | 946 | // top & new_top. |
duke@435 | 947 | MutableSpace* const space = _space_info[id].space(); |
duke@435 | 948 | HeapWord* const bot = space->bottom(); |
duke@435 | 949 | HeapWord* const top = space->top(); |
duke@435 | 950 | HeapWord* const max_top = MAX2(top, _space_info[id].new_top()); |
duke@435 | 951 | |
duke@435 | 952 | const idx_t beg_bit = _mark_bitmap.addr_to_bit(bot); |
duke@435 | 953 | const idx_t end_bit = BitMap::word_align_up(_mark_bitmap.addr_to_bit(top)); |
duke@435 | 954 | _mark_bitmap.clear_range(beg_bit, end_bit); |
duke@435 | 955 | |
duke@435 | 956 | const size_t beg_chunk = _summary_data.addr_to_chunk_idx(bot); |
duke@435 | 957 | const size_t end_chunk = |
duke@435 | 958 | _summary_data.addr_to_chunk_idx(_summary_data.chunk_align_up(max_top)); |
duke@435 | 959 | _summary_data.clear_range(beg_chunk, end_chunk); |
duke@435 | 960 | } |
duke@435 | 961 | |
duke@435 | 962 | void PSParallelCompact::pre_compact(PreGCValues* pre_gc_values) |
duke@435 | 963 | { |
duke@435 | 964 | // Update the from & to space pointers in space_info, since they are swapped |
duke@435 | 965 | // at each young gen gc. Do the update unconditionally (even though a |
duke@435 | 966 | // promotion failure does not swap spaces) because an unknown number of minor |
duke@435 | 967 | // collections will have swapped the spaces an unknown number of times. |
duke@435 | 968 | TraceTime tm("pre compact", print_phases(), true, gclog_or_tty); |
duke@435 | 969 | ParallelScavengeHeap* heap = gc_heap(); |
duke@435 | 970 | _space_info[from_space_id].set_space(heap->young_gen()->from_space()); |
duke@435 | 971 | _space_info[to_space_id].set_space(heap->young_gen()->to_space()); |
duke@435 | 972 | |
duke@435 | 973 | pre_gc_values->fill(heap); |
duke@435 | 974 | |
duke@435 | 975 | ParCompactionManager::reset(); |
duke@435 | 976 | NOT_PRODUCT(_mark_bitmap.reset_counters()); |
duke@435 | 977 | DEBUG_ONLY(add_obj_count = add_obj_size = 0;) |
duke@435 | 978 | DEBUG_ONLY(mark_bitmap_count = mark_bitmap_size = 0;) |
duke@435 | 979 | |
duke@435 | 980 | // Increment the invocation count |
apetrusenko@574 | 981 | heap->increment_total_collections(true); |
duke@435 | 982 | |
duke@435 | 983 | // We need to track unique mark sweep invocations as well. |
duke@435 | 984 | _total_invocations++; |
duke@435 | 985 | |
duke@435 | 986 | if (PrintHeapAtGC) { |
duke@435 | 987 | Universe::print_heap_before_gc(); |
duke@435 | 988 | } |
duke@435 | 989 | |
duke@435 | 990 | // Fill in TLABs |
duke@435 | 991 | heap->accumulate_statistics_all_tlabs(); |
duke@435 | 992 | heap->ensure_parsability(true); // retire TLABs |
duke@435 | 993 | |
duke@435 | 994 | if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) { |
duke@435 | 995 | HandleMark hm; // Discard invalid handles created during verification |
duke@435 | 996 | gclog_or_tty->print(" VerifyBeforeGC:"); |
duke@435 | 997 | Universe::verify(true); |
duke@435 | 998 | } |
duke@435 | 999 | |
duke@435 | 1000 | // Verify object start arrays |
duke@435 | 1001 | if (VerifyObjectStartArray && |
duke@435 | 1002 | VerifyBeforeGC) { |
duke@435 | 1003 | heap->old_gen()->verify_object_start_array(); |
duke@435 | 1004 | heap->perm_gen()->verify_object_start_array(); |
duke@435 | 1005 | } |
duke@435 | 1006 | |
duke@435 | 1007 | DEBUG_ONLY(mark_bitmap()->verify_clear();) |
duke@435 | 1008 | DEBUG_ONLY(summary_data().verify_clear();) |
jcoomes@645 | 1009 | |
jcoomes@645 | 1010 | // Have worker threads release resources the next time they run a task. |
jcoomes@645 | 1011 | gc_task_manager()->release_all_resources(); |
duke@435 | 1012 | } |
duke@435 | 1013 | |
duke@435 | 1014 | void PSParallelCompact::post_compact() |
duke@435 | 1015 | { |
duke@435 | 1016 | TraceTime tm("post compact", print_phases(), true, gclog_or_tty); |
duke@435 | 1017 | |
duke@435 | 1018 | // Clear the marking bitmap and summary data and update top() in each space. |
duke@435 | 1019 | for (unsigned int id = perm_space_id; id < last_space_id; ++id) { |
duke@435 | 1020 | clear_data_covering_space(SpaceId(id)); |
duke@435 | 1021 | _space_info[id].space()->set_top(_space_info[id].new_top()); |
duke@435 | 1022 | } |
duke@435 | 1023 | |
duke@435 | 1024 | MutableSpace* const eden_space = _space_info[eden_space_id].space(); |
duke@435 | 1025 | MutableSpace* const from_space = _space_info[from_space_id].space(); |
duke@435 | 1026 | MutableSpace* const to_space = _space_info[to_space_id].space(); |
duke@435 | 1027 | |
duke@435 | 1028 | ParallelScavengeHeap* heap = gc_heap(); |
duke@435 | 1029 | bool eden_empty = eden_space->is_empty(); |
duke@435 | 1030 | if (!eden_empty) { |
duke@435 | 1031 | eden_empty = absorb_live_data_from_eden(heap->size_policy(), |
duke@435 | 1032 | heap->young_gen(), heap->old_gen()); |
duke@435 | 1033 | } |
duke@435 | 1034 | |
duke@435 | 1035 | // Update heap occupancy information which is used as input to the soft ref |
duke@435 | 1036 | // clearing policy at the next gc. |
duke@435 | 1037 | Universe::update_heap_info_at_gc(); |
duke@435 | 1038 | |
duke@435 | 1039 | bool young_gen_empty = eden_empty && from_space->is_empty() && |
duke@435 | 1040 | to_space->is_empty(); |
duke@435 | 1041 | |
duke@435 | 1042 | BarrierSet* bs = heap->barrier_set(); |
duke@435 | 1043 | if (bs->is_a(BarrierSet::ModRef)) { |
duke@435 | 1044 | ModRefBarrierSet* modBS = (ModRefBarrierSet*)bs; |
duke@435 | 1045 | MemRegion old_mr = heap->old_gen()->reserved(); |
duke@435 | 1046 | MemRegion perm_mr = heap->perm_gen()->reserved(); |
duke@435 | 1047 | assert(perm_mr.end() <= old_mr.start(), "Generations out of order"); |
duke@435 | 1048 | |
duke@435 | 1049 | if (young_gen_empty) { |
duke@435 | 1050 | modBS->clear(MemRegion(perm_mr.start(), old_mr.end())); |
duke@435 | 1051 | } else { |
duke@435 | 1052 | modBS->invalidate(MemRegion(perm_mr.start(), old_mr.end())); |
duke@435 | 1053 | } |
duke@435 | 1054 | } |
duke@435 | 1055 | |
duke@435 | 1056 | Threads::gc_epilogue(); |
duke@435 | 1057 | CodeCache::gc_epilogue(); |
duke@435 | 1058 | |
duke@435 | 1059 | COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); |
duke@435 | 1060 | |
duke@435 | 1061 | ref_processor()->enqueue_discovered_references(NULL); |
duke@435 | 1062 | |
duke@435 | 1063 | // Update time of last GC |
duke@435 | 1064 | reset_millis_since_last_gc(); |
duke@435 | 1065 | } |
duke@435 | 1066 | |
duke@435 | 1067 | HeapWord* |
duke@435 | 1068 | PSParallelCompact::compute_dense_prefix_via_density(const SpaceId id, |
duke@435 | 1069 | bool maximum_compaction) |
duke@435 | 1070 | { |
duke@435 | 1071 | const size_t chunk_size = ParallelCompactData::ChunkSize; |
duke@435 | 1072 | const ParallelCompactData& sd = summary_data(); |
duke@435 | 1073 | |
duke@435 | 1074 | const MutableSpace* const space = _space_info[id].space(); |
duke@435 | 1075 | HeapWord* const top_aligned_up = sd.chunk_align_up(space->top()); |
duke@435 | 1076 | const ChunkData* const beg_cp = sd.addr_to_chunk_ptr(space->bottom()); |
duke@435 | 1077 | const ChunkData* const end_cp = sd.addr_to_chunk_ptr(top_aligned_up); |
duke@435 | 1078 | |
duke@435 | 1079 | // Skip full chunks at the beginning of the space--they are necessarily part |
duke@435 | 1080 | // of the dense prefix. |
duke@435 | 1081 | size_t full_count = 0; |
duke@435 | 1082 | const ChunkData* cp; |
duke@435 | 1083 | for (cp = beg_cp; cp < end_cp && cp->data_size() == chunk_size; ++cp) { |
duke@435 | 1084 | ++full_count; |
duke@435 | 1085 | } |
duke@435 | 1086 | |
duke@435 | 1087 | assert(total_invocations() >= _maximum_compaction_gc_num, "sanity"); |
duke@435 | 1088 | const size_t gcs_since_max = total_invocations() - _maximum_compaction_gc_num; |
duke@435 | 1089 | const bool interval_ended = gcs_since_max > HeapMaximumCompactionInterval; |
duke@435 | 1090 | if (maximum_compaction || cp == end_cp || interval_ended) { |
duke@435 | 1091 | _maximum_compaction_gc_num = total_invocations(); |
duke@435 | 1092 | return sd.chunk_to_addr(cp); |
duke@435 | 1093 | } |
duke@435 | 1094 | |
duke@435 | 1095 | HeapWord* const new_top = _space_info[id].new_top(); |
duke@435 | 1096 | const size_t space_live = pointer_delta(new_top, space->bottom()); |
duke@435 | 1097 | const size_t space_used = space->used_in_words(); |
duke@435 | 1098 | const size_t space_capacity = space->capacity_in_words(); |
duke@435 | 1099 | |
duke@435 | 1100 | const double cur_density = double(space_live) / space_capacity; |
duke@435 | 1101 | const double deadwood_density = |
duke@435 | 1102 | (1.0 - cur_density) * (1.0 - cur_density) * cur_density * cur_density; |
duke@435 | 1103 | const size_t deadwood_goal = size_t(space_capacity * deadwood_density); |
duke@435 | 1104 | |
duke@435 | 1105 | if (TraceParallelOldGCDensePrefix) { |
duke@435 | 1106 | tty->print_cr("cur_dens=%5.3f dw_dens=%5.3f dw_goal=" SIZE_FORMAT, |
duke@435 | 1107 | cur_density, deadwood_density, deadwood_goal); |
duke@435 | 1108 | tty->print_cr("space_live=" SIZE_FORMAT " " "space_used=" SIZE_FORMAT " " |
duke@435 | 1109 | "space_cap=" SIZE_FORMAT, |
duke@435 | 1110 | space_live, space_used, |
duke@435 | 1111 | space_capacity); |
duke@435 | 1112 | } |
duke@435 | 1113 | |
duke@435 | 1114 | // XXX - Use binary search? |
duke@435 | 1115 | HeapWord* dense_prefix = sd.chunk_to_addr(cp); |
duke@435 | 1116 | const ChunkData* full_cp = cp; |
duke@435 | 1117 | const ChunkData* const top_cp = sd.addr_to_chunk_ptr(space->top() - 1); |
duke@435 | 1118 | while (cp < end_cp) { |
duke@435 | 1119 | HeapWord* chunk_destination = cp->destination(); |
duke@435 | 1120 | const size_t cur_deadwood = pointer_delta(dense_prefix, chunk_destination); |
duke@435 | 1121 | if (TraceParallelOldGCDensePrefix && Verbose) { |
duke@435 | 1122 | tty->print_cr("c#=" SIZE_FORMAT_W("04") " dst=" PTR_FORMAT " " |
duke@435 | 1123 | "dp=" SIZE_FORMAT_W("08") " " "cdw=" SIZE_FORMAT_W("08"), |
duke@435 | 1124 | sd.chunk(cp), chunk_destination, |
duke@435 | 1125 | dense_prefix, cur_deadwood); |
duke@435 | 1126 | } |
duke@435 | 1127 | |
duke@435 | 1128 | if (cur_deadwood >= deadwood_goal) { |
duke@435 | 1129 | // Found the chunk that has the correct amount of deadwood to the left. |
duke@435 | 1130 | // This typically occurs after crossing a fairly sparse set of chunks, so |
duke@435 | 1131 | // iterate backwards over those sparse chunks, looking for the chunk that |
duke@435 | 1132 | // has the lowest density of live objects 'to the right.' |
duke@435 | 1133 | size_t space_to_left = sd.chunk(cp) * chunk_size; |
duke@435 | 1134 | size_t live_to_left = space_to_left - cur_deadwood; |
duke@435 | 1135 | size_t space_to_right = space_capacity - space_to_left; |
duke@435 | 1136 | size_t live_to_right = space_live - live_to_left; |
duke@435 | 1137 | double density_to_right = double(live_to_right) / space_to_right; |
duke@435 | 1138 | while (cp > full_cp) { |
duke@435 | 1139 | --cp; |
duke@435 | 1140 | const size_t prev_chunk_live_to_right = live_to_right - cp->data_size(); |
duke@435 | 1141 | const size_t prev_chunk_space_to_right = space_to_right + chunk_size; |
duke@435 | 1142 | double prev_chunk_density_to_right = |
duke@435 | 1143 | double(prev_chunk_live_to_right) / prev_chunk_space_to_right; |
duke@435 | 1144 | if (density_to_right <= prev_chunk_density_to_right) { |
duke@435 | 1145 | return dense_prefix; |
duke@435 | 1146 | } |
duke@435 | 1147 | if (TraceParallelOldGCDensePrefix && Verbose) { |
duke@435 | 1148 | tty->print_cr("backing up from c=" SIZE_FORMAT_W("4") " d2r=%10.8f " |
duke@435 | 1149 | "pc_d2r=%10.8f", sd.chunk(cp), density_to_right, |
duke@435 | 1150 | prev_chunk_density_to_right); |
duke@435 | 1151 | } |
duke@435 | 1152 | dense_prefix -= chunk_size; |
duke@435 | 1153 | live_to_right = prev_chunk_live_to_right; |
duke@435 | 1154 | space_to_right = prev_chunk_space_to_right; |
duke@435 | 1155 | density_to_right = prev_chunk_density_to_right; |
duke@435 | 1156 | } |
duke@435 | 1157 | return dense_prefix; |
duke@435 | 1158 | } |
duke@435 | 1159 | |
duke@435 | 1160 | dense_prefix += chunk_size; |
duke@435 | 1161 | ++cp; |
duke@435 | 1162 | } |
duke@435 | 1163 | |
duke@435 | 1164 | return dense_prefix; |
duke@435 | 1165 | } |
duke@435 | 1166 | |
duke@435 | 1167 | #ifndef PRODUCT |
duke@435 | 1168 | void PSParallelCompact::print_dense_prefix_stats(const char* const algorithm, |
duke@435 | 1169 | const SpaceId id, |
duke@435 | 1170 | const bool maximum_compaction, |
duke@435 | 1171 | HeapWord* const addr) |
duke@435 | 1172 | { |
duke@435 | 1173 | const size_t chunk_idx = summary_data().addr_to_chunk_idx(addr); |
duke@435 | 1174 | ChunkData* const cp = summary_data().chunk(chunk_idx); |
duke@435 | 1175 | const MutableSpace* const space = _space_info[id].space(); |
duke@435 | 1176 | HeapWord* const new_top = _space_info[id].new_top(); |
duke@435 | 1177 | |
duke@435 | 1178 | const size_t space_live = pointer_delta(new_top, space->bottom()); |
duke@435 | 1179 | const size_t dead_to_left = pointer_delta(addr, cp->destination()); |
duke@435 | 1180 | const size_t space_cap = space->capacity_in_words(); |
duke@435 | 1181 | const double dead_to_left_pct = double(dead_to_left) / space_cap; |
duke@435 | 1182 | const size_t live_to_right = new_top - cp->destination(); |
duke@435 | 1183 | const size_t dead_to_right = space->top() - addr - live_to_right; |
duke@435 | 1184 | |
duke@435 | 1185 | tty->print_cr("%s=" PTR_FORMAT " dpc=" SIZE_FORMAT_W("05") " " |
duke@435 | 1186 | "spl=" SIZE_FORMAT " " |
duke@435 | 1187 | "d2l=" SIZE_FORMAT " d2l%%=%6.4f " |
duke@435 | 1188 | "d2r=" SIZE_FORMAT " l2r=" SIZE_FORMAT |
duke@435 | 1189 | " ratio=%10.8f", |
duke@435 | 1190 | algorithm, addr, chunk_idx, |
duke@435 | 1191 | space_live, |
duke@435 | 1192 | dead_to_left, dead_to_left_pct, |
duke@435 | 1193 | dead_to_right, live_to_right, |
duke@435 | 1194 | double(dead_to_right) / live_to_right); |
duke@435 | 1195 | } |
duke@435 | 1196 | #endif // #ifndef PRODUCT |
duke@435 | 1197 | |
duke@435 | 1198 | // Return a fraction indicating how much of the generation can be treated as |
duke@435 | 1199 | // "dead wood" (i.e., not reclaimed). The function uses a normal distribution |
duke@435 | 1200 | // based on the density of live objects in the generation to determine a limit, |
duke@435 | 1201 | // which is then adjusted so the return value is min_percent when the density is |
duke@435 | 1202 | // 1. |
duke@435 | 1203 | // |
duke@435 | 1204 | // The following table shows some return values for a different values of the |
duke@435 | 1205 | // standard deviation (ParallelOldDeadWoodLimiterStdDev); the mean is 0.5 and |
duke@435 | 1206 | // min_percent is 1. |
duke@435 | 1207 | // |
duke@435 | 1208 | // fraction allowed as dead wood |
duke@435 | 1209 | // ----------------------------------------------------------------- |
duke@435 | 1210 | // density std_dev=70 std_dev=75 std_dev=80 std_dev=85 std_dev=90 std_dev=95 |
duke@435 | 1211 | // ------- ---------- ---------- ---------- ---------- ---------- ---------- |
duke@435 | 1212 | // 0.00000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000 |
duke@435 | 1213 | // 0.05000 0.03193096 0.02836880 0.02550828 0.02319280 0.02130337 0.01974941 |
duke@435 | 1214 | // 0.10000 0.05247504 0.04547452 0.03988045 0.03537016 0.03170171 0.02869272 |
duke@435 | 1215 | // 0.15000 0.07135702 0.06111390 0.05296419 0.04641639 0.04110601 0.03676066 |
duke@435 | 1216 | // 0.20000 0.08831616 0.07509618 0.06461766 0.05622444 0.04943437 0.04388975 |
duke@435 | 1217 | // 0.25000 0.10311208 0.08724696 0.07471205 0.06469760 0.05661313 0.05002313 |
duke@435 | 1218 | // 0.30000 0.11553050 0.09741183 0.08313394 0.07175114 0.06257797 0.05511132 |
duke@435 | 1219 | // 0.35000 0.12538832 0.10545958 0.08978741 0.07731366 0.06727491 0.05911289 |
duke@435 | 1220 | // 0.40000 0.13253818 0.11128511 0.09459590 0.08132834 0.07066107 0.06199500 |
duke@435 | 1221 | // 0.45000 0.13687208 0.11481163 0.09750361 0.08375387 0.07270534 0.06373386 |
duke@435 | 1222 | // 0.50000 0.13832410 0.11599237 0.09847664 0.08456518 0.07338887 0.06431510 |
duke@435 | 1223 | // 0.55000 0.13687208 0.11481163 0.09750361 0.08375387 0.07270534 0.06373386 |
duke@435 | 1224 | // 0.60000 0.13253818 0.11128511 0.09459590 0.08132834 0.07066107 0.06199500 |
duke@435 | 1225 | // 0.65000 0.12538832 0.10545958 0.08978741 0.07731366 0.06727491 0.05911289 |
duke@435 | 1226 | // 0.70000 0.11553050 0.09741183 0.08313394 0.07175114 0.06257797 0.05511132 |
duke@435 | 1227 | // 0.75000 0.10311208 0.08724696 0.07471205 0.06469760 0.05661313 0.05002313 |
duke@435 | 1228 | // 0.80000 0.08831616 0.07509618 0.06461766 0.05622444 0.04943437 0.04388975 |
duke@435 | 1229 | // 0.85000 0.07135702 0.06111390 0.05296419 0.04641639 0.04110601 0.03676066 |
duke@435 | 1230 | // 0.90000 0.05247504 0.04547452 0.03988045 0.03537016 0.03170171 0.02869272 |
duke@435 | 1231 | // 0.95000 0.03193096 0.02836880 0.02550828 0.02319280 0.02130337 0.01974941 |
duke@435 | 1232 | // 1.00000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000 |
duke@435 | 1233 | |
duke@435 | 1234 | double PSParallelCompact::dead_wood_limiter(double density, size_t min_percent) |
duke@435 | 1235 | { |
duke@435 | 1236 | assert(_dwl_initialized, "uninitialized"); |
duke@435 | 1237 | |
duke@435 | 1238 | // The raw limit is the value of the normal distribution at x = density. |
duke@435 | 1239 | const double raw_limit = normal_distribution(density); |
duke@435 | 1240 | |
duke@435 | 1241 | // Adjust the raw limit so it becomes the minimum when the density is 1. |
duke@435 | 1242 | // |
duke@435 | 1243 | // First subtract the adjustment value (which is simply the precomputed value |
duke@435 | 1244 | // normal_distribution(1.0)); this yields a value of 0 when the density is 1. |
duke@435 | 1245 | // Then add the minimum value, so the minimum is returned when the density is |
duke@435 | 1246 | // 1. Finally, prevent negative values, which occur when the mean is not 0.5. |
duke@435 | 1247 | const double min = double(min_percent) / 100.0; |
duke@435 | 1248 | const double limit = raw_limit - _dwl_adjustment + min; |
duke@435 | 1249 | return MAX2(limit, 0.0); |
duke@435 | 1250 | } |
duke@435 | 1251 | |
duke@435 | 1252 | ParallelCompactData::ChunkData* |
duke@435 | 1253 | PSParallelCompact::first_dead_space_chunk(const ChunkData* beg, |
duke@435 | 1254 | const ChunkData* end) |
duke@435 | 1255 | { |
duke@435 | 1256 | const size_t chunk_size = ParallelCompactData::ChunkSize; |
duke@435 | 1257 | ParallelCompactData& sd = summary_data(); |
duke@435 | 1258 | size_t left = sd.chunk(beg); |
duke@435 | 1259 | size_t right = end > beg ? sd.chunk(end) - 1 : left; |
duke@435 | 1260 | |
duke@435 | 1261 | // Binary search. |
duke@435 | 1262 | while (left < right) { |
duke@435 | 1263 | // Equivalent to (left + right) / 2, but does not overflow. |
duke@435 | 1264 | const size_t middle = left + (right - left) / 2; |
duke@435 | 1265 | ChunkData* const middle_ptr = sd.chunk(middle); |
duke@435 | 1266 | HeapWord* const dest = middle_ptr->destination(); |
duke@435 | 1267 | HeapWord* const addr = sd.chunk_to_addr(middle); |
duke@435 | 1268 | assert(dest != NULL, "sanity"); |
duke@435 | 1269 | assert(dest <= addr, "must move left"); |
duke@435 | 1270 | |
duke@435 | 1271 | if (middle > left && dest < addr) { |
duke@435 | 1272 | right = middle - 1; |
duke@435 | 1273 | } else if (middle < right && middle_ptr->data_size() == chunk_size) { |
duke@435 | 1274 | left = middle + 1; |
duke@435 | 1275 | } else { |
duke@435 | 1276 | return middle_ptr; |
duke@435 | 1277 | } |
duke@435 | 1278 | } |
duke@435 | 1279 | return sd.chunk(left); |
duke@435 | 1280 | } |
duke@435 | 1281 | |
duke@435 | 1282 | ParallelCompactData::ChunkData* |
duke@435 | 1283 | PSParallelCompact::dead_wood_limit_chunk(const ChunkData* beg, |
duke@435 | 1284 | const ChunkData* end, |
duke@435 | 1285 | size_t dead_words) |
duke@435 | 1286 | { |
duke@435 | 1287 | ParallelCompactData& sd = summary_data(); |
duke@435 | 1288 | size_t left = sd.chunk(beg); |
duke@435 | 1289 | size_t right = end > beg ? sd.chunk(end) - 1 : left; |
duke@435 | 1290 | |
duke@435 | 1291 | // Binary search. |
duke@435 | 1292 | while (left < right) { |
duke@435 | 1293 | // Equivalent to (left + right) / 2, but does not overflow. |
duke@435 | 1294 | const size_t middle = left + (right - left) / 2; |
duke@435 | 1295 | ChunkData* const middle_ptr = sd.chunk(middle); |
duke@435 | 1296 | HeapWord* const dest = middle_ptr->destination(); |
duke@435 | 1297 | HeapWord* const addr = sd.chunk_to_addr(middle); |
duke@435 | 1298 | assert(dest != NULL, "sanity"); |
duke@435 | 1299 | assert(dest <= addr, "must move left"); |
duke@435 | 1300 | |
duke@435 | 1301 | const size_t dead_to_left = pointer_delta(addr, dest); |
duke@435 | 1302 | if (middle > left && dead_to_left > dead_words) { |
duke@435 | 1303 | right = middle - 1; |
duke@435 | 1304 | } else if (middle < right && dead_to_left < dead_words) { |
duke@435 | 1305 | left = middle + 1; |
duke@435 | 1306 | } else { |
duke@435 | 1307 | return middle_ptr; |
duke@435 | 1308 | } |
duke@435 | 1309 | } |
duke@435 | 1310 | return sd.chunk(left); |
duke@435 | 1311 | } |
duke@435 | 1312 | |
duke@435 | 1313 | // The result is valid during the summary phase, after the initial summarization |
duke@435 | 1314 | // of each space into itself, and before final summarization. |
duke@435 | 1315 | inline double |
duke@435 | 1316 | PSParallelCompact::reclaimed_ratio(const ChunkData* const cp, |
duke@435 | 1317 | HeapWord* const bottom, |
duke@435 | 1318 | HeapWord* const top, |
duke@435 | 1319 | HeapWord* const new_top) |
duke@435 | 1320 | { |
duke@435 | 1321 | ParallelCompactData& sd = summary_data(); |
duke@435 | 1322 | |
duke@435 | 1323 | assert(cp != NULL, "sanity"); |
duke@435 | 1324 | assert(bottom != NULL, "sanity"); |
duke@435 | 1325 | assert(top != NULL, "sanity"); |
duke@435 | 1326 | assert(new_top != NULL, "sanity"); |
duke@435 | 1327 | assert(top >= new_top, "summary data problem?"); |
duke@435 | 1328 | assert(new_top > bottom, "space is empty; should not be here"); |
duke@435 | 1329 | assert(new_top >= cp->destination(), "sanity"); |
duke@435 | 1330 | assert(top >= sd.chunk_to_addr(cp), "sanity"); |
duke@435 | 1331 | |
duke@435 | 1332 | HeapWord* const destination = cp->destination(); |
duke@435 | 1333 | const size_t dense_prefix_live = pointer_delta(destination, bottom); |
duke@435 | 1334 | const size_t compacted_region_live = pointer_delta(new_top, destination); |
duke@435 | 1335 | const size_t compacted_region_used = pointer_delta(top, sd.chunk_to_addr(cp)); |
duke@435 | 1336 | const size_t reclaimable = compacted_region_used - compacted_region_live; |
duke@435 | 1337 | |
duke@435 | 1338 | const double divisor = dense_prefix_live + 1.25 * compacted_region_live; |
duke@435 | 1339 | return double(reclaimable) / divisor; |
duke@435 | 1340 | } |
duke@435 | 1341 | |
duke@435 | 1342 | // Return the address of the end of the dense prefix, a.k.a. the start of the |
duke@435 | 1343 | // compacted region. The address is always on a chunk boundary. |
duke@435 | 1344 | // |
duke@435 | 1345 | // Completely full chunks at the left are skipped, since no compaction can occur |
duke@435 | 1346 | // in those chunks. Then the maximum amount of dead wood to allow is computed, |
duke@435 | 1347 | // based on the density (amount live / capacity) of the generation; the chunk |
duke@435 | 1348 | // with approximately that amount of dead space to the left is identified as the |
duke@435 | 1349 | // limit chunk. Chunks between the last completely full chunk and the limit |
duke@435 | 1350 | // chunk are scanned and the one that has the best (maximum) reclaimed_ratio() |
duke@435 | 1351 | // is selected. |
duke@435 | 1352 | HeapWord* |
duke@435 | 1353 | PSParallelCompact::compute_dense_prefix(const SpaceId id, |
duke@435 | 1354 | bool maximum_compaction) |
duke@435 | 1355 | { |
duke@435 | 1356 | const size_t chunk_size = ParallelCompactData::ChunkSize; |
duke@435 | 1357 | const ParallelCompactData& sd = summary_data(); |
duke@435 | 1358 | |
duke@435 | 1359 | const MutableSpace* const space = _space_info[id].space(); |
duke@435 | 1360 | HeapWord* const top = space->top(); |
duke@435 | 1361 | HeapWord* const top_aligned_up = sd.chunk_align_up(top); |
duke@435 | 1362 | HeapWord* const new_top = _space_info[id].new_top(); |
duke@435 | 1363 | HeapWord* const new_top_aligned_up = sd.chunk_align_up(new_top); |
duke@435 | 1364 | HeapWord* const bottom = space->bottom(); |
duke@435 | 1365 | const ChunkData* const beg_cp = sd.addr_to_chunk_ptr(bottom); |
duke@435 | 1366 | const ChunkData* const top_cp = sd.addr_to_chunk_ptr(top_aligned_up); |
duke@435 | 1367 | const ChunkData* const new_top_cp = sd.addr_to_chunk_ptr(new_top_aligned_up); |
duke@435 | 1368 | |
duke@435 | 1369 | // Skip full chunks at the beginning of the space--they are necessarily part |
duke@435 | 1370 | // of the dense prefix. |
duke@435 | 1371 | const ChunkData* const full_cp = first_dead_space_chunk(beg_cp, new_top_cp); |
duke@435 | 1372 | assert(full_cp->destination() == sd.chunk_to_addr(full_cp) || |
duke@435 | 1373 | space->is_empty(), "no dead space allowed to the left"); |
duke@435 | 1374 | assert(full_cp->data_size() < chunk_size || full_cp == new_top_cp - 1, |
duke@435 | 1375 | "chunk must have dead space"); |
duke@435 | 1376 | |
duke@435 | 1377 | // The gc number is saved whenever a maximum compaction is done, and used to |
duke@435 | 1378 | // determine when the maximum compaction interval has expired. This avoids |
duke@435 | 1379 | // successive max compactions for different reasons. |
duke@435 | 1380 | assert(total_invocations() >= _maximum_compaction_gc_num, "sanity"); |
duke@435 | 1381 | const size_t gcs_since_max = total_invocations() - _maximum_compaction_gc_num; |
duke@435 | 1382 | const bool interval_ended = gcs_since_max > HeapMaximumCompactionInterval || |
duke@435 | 1383 | total_invocations() == HeapFirstMaximumCompactionCount; |
duke@435 | 1384 | if (maximum_compaction || full_cp == top_cp || interval_ended) { |
duke@435 | 1385 | _maximum_compaction_gc_num = total_invocations(); |
duke@435 | 1386 | return sd.chunk_to_addr(full_cp); |
duke@435 | 1387 | } |
duke@435 | 1388 | |
duke@435 | 1389 | const size_t space_live = pointer_delta(new_top, bottom); |
duke@435 | 1390 | const size_t space_used = space->used_in_words(); |
duke@435 | 1391 | const size_t space_capacity = space->capacity_in_words(); |
duke@435 | 1392 | |
duke@435 | 1393 | const double density = double(space_live) / double(space_capacity); |
duke@435 | 1394 | const size_t min_percent_free = |
duke@435 | 1395 | id == perm_space_id ? PermMarkSweepDeadRatio : MarkSweepDeadRatio; |
duke@435 | 1396 | const double limiter = dead_wood_limiter(density, min_percent_free); |
duke@435 | 1397 | const size_t dead_wood_max = space_used - space_live; |
duke@435 | 1398 | const size_t dead_wood_limit = MIN2(size_t(space_capacity * limiter), |
duke@435 | 1399 | dead_wood_max); |
duke@435 | 1400 | |
duke@435 | 1401 | if (TraceParallelOldGCDensePrefix) { |
duke@435 | 1402 | tty->print_cr("space_live=" SIZE_FORMAT " " "space_used=" SIZE_FORMAT " " |
duke@435 | 1403 | "space_cap=" SIZE_FORMAT, |
duke@435 | 1404 | space_live, space_used, |
duke@435 | 1405 | space_capacity); |
duke@435 | 1406 | tty->print_cr("dead_wood_limiter(%6.4f, %d)=%6.4f " |
duke@435 | 1407 | "dead_wood_max=" SIZE_FORMAT " dead_wood_limit=" SIZE_FORMAT, |
duke@435 | 1408 | density, min_percent_free, limiter, |
duke@435 | 1409 | dead_wood_max, dead_wood_limit); |
duke@435 | 1410 | } |
duke@435 | 1411 | |
duke@435 | 1412 | // Locate the chunk with the desired amount of dead space to the left. |
duke@435 | 1413 | const ChunkData* const limit_cp = |
duke@435 | 1414 | dead_wood_limit_chunk(full_cp, top_cp, dead_wood_limit); |
duke@435 | 1415 | |
duke@435 | 1416 | // Scan from the first chunk with dead space to the limit chunk and find the |
duke@435 | 1417 | // one with the best (largest) reclaimed ratio. |
duke@435 | 1418 | double best_ratio = 0.0; |
duke@435 | 1419 | const ChunkData* best_cp = full_cp; |
duke@435 | 1420 | for (const ChunkData* cp = full_cp; cp < limit_cp; ++cp) { |
duke@435 | 1421 | double tmp_ratio = reclaimed_ratio(cp, bottom, top, new_top); |
duke@435 | 1422 | if (tmp_ratio > best_ratio) { |
duke@435 | 1423 | best_cp = cp; |
duke@435 | 1424 | best_ratio = tmp_ratio; |
duke@435 | 1425 | } |
duke@435 | 1426 | } |
duke@435 | 1427 | |
duke@435 | 1428 | #if 0 |
duke@435 | 1429 | // Something to consider: if the chunk with the best ratio is 'close to' the |
duke@435 | 1430 | // first chunk w/free space, choose the first chunk with free space |
duke@435 | 1431 | // ("first-free"). The first-free chunk is usually near the start of the |
duke@435 | 1432 | // heap, which means we are copying most of the heap already, so copy a bit |
duke@435 | 1433 | // more to get complete compaction. |
duke@435 | 1434 | if (pointer_delta(best_cp, full_cp, sizeof(ChunkData)) < 4) { |
duke@435 | 1435 | _maximum_compaction_gc_num = total_invocations(); |
duke@435 | 1436 | best_cp = full_cp; |
duke@435 | 1437 | } |
duke@435 | 1438 | #endif // #if 0 |
duke@435 | 1439 | |
duke@435 | 1440 | return sd.chunk_to_addr(best_cp); |
duke@435 | 1441 | } |
duke@435 | 1442 | |
duke@435 | 1443 | void PSParallelCompact::summarize_spaces_quick() |
duke@435 | 1444 | { |
duke@435 | 1445 | for (unsigned int i = 0; i < last_space_id; ++i) { |
duke@435 | 1446 | const MutableSpace* space = _space_info[i].space(); |
duke@435 | 1447 | bool result = _summary_data.summarize(space->bottom(), space->end(), |
duke@435 | 1448 | space->bottom(), space->top(), |
duke@435 | 1449 | _space_info[i].new_top_addr()); |
duke@435 | 1450 | assert(result, "should never fail"); |
duke@435 | 1451 | _space_info[i].set_dense_prefix(space->bottom()); |
duke@435 | 1452 | } |
duke@435 | 1453 | } |
duke@435 | 1454 | |
duke@435 | 1455 | void PSParallelCompact::fill_dense_prefix_end(SpaceId id) |
duke@435 | 1456 | { |
duke@435 | 1457 | HeapWord* const dense_prefix_end = dense_prefix(id); |
duke@435 | 1458 | const ChunkData* chunk = _summary_data.addr_to_chunk_ptr(dense_prefix_end); |
duke@435 | 1459 | const idx_t dense_prefix_bit = _mark_bitmap.addr_to_bit(dense_prefix_end); |
duke@435 | 1460 | if (dead_space_crosses_boundary(chunk, dense_prefix_bit)) { |
duke@435 | 1461 | // Only enough dead space is filled so that any remaining dead space to the |
duke@435 | 1462 | // left is larger than the minimum filler object. (The remainder is filled |
duke@435 | 1463 | // during the copy/update phase.) |
duke@435 | 1464 | // |
duke@435 | 1465 | // The size of the dead space to the right of the boundary is not a |
duke@435 | 1466 | // concern, since compaction will be able to use whatever space is |
duke@435 | 1467 | // available. |
duke@435 | 1468 | // |
duke@435 | 1469 | // Here '||' is the boundary, 'x' represents a don't care bit and a box |
duke@435 | 1470 | // surrounds the space to be filled with an object. |
duke@435 | 1471 | // |
duke@435 | 1472 | // In the 32-bit VM, each bit represents two 32-bit words: |
duke@435 | 1473 | // +---+ |
duke@435 | 1474 | // a) beg_bits: ... x x x | 0 | || 0 x x ... |
duke@435 | 1475 | // end_bits: ... x x x | 0 | || 0 x x ... |
duke@435 | 1476 | // +---+ |
duke@435 | 1477 | // |
duke@435 | 1478 | // In the 64-bit VM, each bit represents one 64-bit word: |
duke@435 | 1479 | // +------------+ |
duke@435 | 1480 | // b) beg_bits: ... x x x | 0 || 0 | x x ... |
duke@435 | 1481 | // end_bits: ... x x 1 | 0 || 0 | x x ... |
duke@435 | 1482 | // +------------+ |
duke@435 | 1483 | // +-------+ |
duke@435 | 1484 | // c) beg_bits: ... x x | 0 0 | || 0 x x ... |
duke@435 | 1485 | // end_bits: ... x 1 | 0 0 | || 0 x x ... |
duke@435 | 1486 | // +-------+ |
duke@435 | 1487 | // +-----------+ |
duke@435 | 1488 | // d) beg_bits: ... x | 0 0 0 | || 0 x x ... |
duke@435 | 1489 | // end_bits: ... 1 | 0 0 0 | || 0 x x ... |
duke@435 | 1490 | // +-----------+ |
duke@435 | 1491 | // +-------+ |
duke@435 | 1492 | // e) beg_bits: ... 0 0 | 0 0 | || 0 x x ... |
duke@435 | 1493 | // end_bits: ... 0 0 | 0 0 | || 0 x x ... |
duke@435 | 1494 | // +-------+ |
duke@435 | 1495 | |
duke@435 | 1496 | // Initially assume case a, c or e will apply. |
duke@435 | 1497 | size_t obj_len = (size_t)oopDesc::header_size(); |
duke@435 | 1498 | HeapWord* obj_beg = dense_prefix_end - obj_len; |
duke@435 | 1499 | |
duke@435 | 1500 | #ifdef _LP64 |
duke@435 | 1501 | if (_mark_bitmap.is_obj_end(dense_prefix_bit - 2)) { |
duke@435 | 1502 | // Case b above. |
duke@435 | 1503 | obj_beg = dense_prefix_end - 1; |
duke@435 | 1504 | } else if (!_mark_bitmap.is_obj_end(dense_prefix_bit - 3) && |
duke@435 | 1505 | _mark_bitmap.is_obj_end(dense_prefix_bit - 4)) { |
duke@435 | 1506 | // Case d above. |
duke@435 | 1507 | obj_beg = dense_prefix_end - 3; |
duke@435 | 1508 | obj_len = 3; |
duke@435 | 1509 | } |
duke@435 | 1510 | #endif // #ifdef _LP64 |
duke@435 | 1511 | |
duke@435 | 1512 | MemRegion region(obj_beg, obj_len); |
duke@435 | 1513 | SharedHeap::fill_region_with_object(region); |
duke@435 | 1514 | _mark_bitmap.mark_obj(obj_beg, obj_len); |
duke@435 | 1515 | _summary_data.add_obj(obj_beg, obj_len); |
duke@435 | 1516 | assert(start_array(id) != NULL, "sanity"); |
duke@435 | 1517 | start_array(id)->allocate_block(obj_beg); |
duke@435 | 1518 | } |
duke@435 | 1519 | } |
duke@435 | 1520 | |
duke@435 | 1521 | void |
duke@435 | 1522 | PSParallelCompact::summarize_space(SpaceId id, bool maximum_compaction) |
duke@435 | 1523 | { |
duke@435 | 1524 | assert(id < last_space_id, "id out of range"); |
duke@435 | 1525 | |
duke@435 | 1526 | const MutableSpace* space = _space_info[id].space(); |
duke@435 | 1527 | HeapWord** new_top_addr = _space_info[id].new_top_addr(); |
duke@435 | 1528 | |
duke@435 | 1529 | HeapWord* dense_prefix_end = compute_dense_prefix(id, maximum_compaction); |
duke@435 | 1530 | _space_info[id].set_dense_prefix(dense_prefix_end); |
duke@435 | 1531 | |
duke@435 | 1532 | #ifndef PRODUCT |
duke@435 | 1533 | if (TraceParallelOldGCDensePrefix) { |
duke@435 | 1534 | print_dense_prefix_stats("ratio", id, maximum_compaction, dense_prefix_end); |
duke@435 | 1535 | HeapWord* addr = compute_dense_prefix_via_density(id, maximum_compaction); |
duke@435 | 1536 | print_dense_prefix_stats("density", id, maximum_compaction, addr); |
duke@435 | 1537 | } |
duke@435 | 1538 | #endif // #ifndef PRODUCT |
duke@435 | 1539 | |
duke@435 | 1540 | // If dead space crosses the dense prefix boundary, it is (at least partially) |
duke@435 | 1541 | // filled with a dummy object, marked live and added to the summary data. |
duke@435 | 1542 | // This simplifies the copy/update phase and must be done before the final |
duke@435 | 1543 | // locations of objects are determined, to prevent leaving a fragment of dead |
duke@435 | 1544 | // space that is too small to fill with an object. |
duke@435 | 1545 | if (!maximum_compaction && dense_prefix_end != space->bottom()) { |
duke@435 | 1546 | fill_dense_prefix_end(id); |
duke@435 | 1547 | } |
duke@435 | 1548 | |
duke@435 | 1549 | // Compute the destination of each Chunk, and thus each object. |
duke@435 | 1550 | _summary_data.summarize_dense_prefix(space->bottom(), dense_prefix_end); |
duke@435 | 1551 | _summary_data.summarize(dense_prefix_end, space->end(), |
duke@435 | 1552 | dense_prefix_end, space->top(), |
duke@435 | 1553 | new_top_addr); |
duke@435 | 1554 | |
duke@435 | 1555 | if (TraceParallelOldGCSummaryPhase) { |
duke@435 | 1556 | const size_t chunk_size = ParallelCompactData::ChunkSize; |
duke@435 | 1557 | const size_t dp_chunk = _summary_data.addr_to_chunk_idx(dense_prefix_end); |
duke@435 | 1558 | const size_t dp_words = pointer_delta(dense_prefix_end, space->bottom()); |
duke@435 | 1559 | const HeapWord* nt_aligned_up = _summary_data.chunk_align_up(*new_top_addr); |
duke@435 | 1560 | const size_t cr_words = pointer_delta(nt_aligned_up, dense_prefix_end); |
duke@435 | 1561 | tty->print_cr("id=%d cap=" SIZE_FORMAT " dp=" PTR_FORMAT " " |
duke@435 | 1562 | "dp_chunk=" SIZE_FORMAT " " "dp_count=" SIZE_FORMAT " " |
duke@435 | 1563 | "cr_count=" SIZE_FORMAT " " "nt=" PTR_FORMAT, |
duke@435 | 1564 | id, space->capacity_in_words(), dense_prefix_end, |
duke@435 | 1565 | dp_chunk, dp_words / chunk_size, |
duke@435 | 1566 | cr_words / chunk_size, *new_top_addr); |
duke@435 | 1567 | } |
duke@435 | 1568 | } |
duke@435 | 1569 | |
duke@435 | 1570 | void PSParallelCompact::summary_phase(ParCompactionManager* cm, |
duke@435 | 1571 | bool maximum_compaction) |
duke@435 | 1572 | { |
duke@435 | 1573 | EventMark m("2 summarize"); |
duke@435 | 1574 | TraceTime tm("summary phase", print_phases(), true, gclog_or_tty); |
duke@435 | 1575 | // trace("2"); |
duke@435 | 1576 | |
duke@435 | 1577 | #ifdef ASSERT |
duke@435 | 1578 | if (VerifyParallelOldWithMarkSweep && |
duke@435 | 1579 | (PSParallelCompact::total_invocations() % |
duke@435 | 1580 | VerifyParallelOldWithMarkSweepInterval) == 0) { |
duke@435 | 1581 | verify_mark_bitmap(_mark_bitmap); |
duke@435 | 1582 | } |
duke@435 | 1583 | if (TraceParallelOldGCMarkingPhase) { |
duke@435 | 1584 | tty->print_cr("add_obj_count=" SIZE_FORMAT " " |
duke@435 | 1585 | "add_obj_bytes=" SIZE_FORMAT, |
duke@435 | 1586 | add_obj_count, add_obj_size * HeapWordSize); |
duke@435 | 1587 | tty->print_cr("mark_bitmap_count=" SIZE_FORMAT " " |
duke@435 | 1588 | "mark_bitmap_bytes=" SIZE_FORMAT, |
duke@435 | 1589 | mark_bitmap_count, mark_bitmap_size * HeapWordSize); |
duke@435 | 1590 | } |
duke@435 | 1591 | #endif // #ifdef ASSERT |
duke@435 | 1592 | |
duke@435 | 1593 | // Quick summarization of each space into itself, to see how much is live. |
duke@435 | 1594 | summarize_spaces_quick(); |
duke@435 | 1595 | |
duke@435 | 1596 | if (TraceParallelOldGCSummaryPhase) { |
duke@435 | 1597 | tty->print_cr("summary_phase: after summarizing each space to self"); |
duke@435 | 1598 | Universe::print(); |
duke@435 | 1599 | NOT_PRODUCT(print_chunk_ranges()); |
duke@435 | 1600 | if (Verbose) { |
duke@435 | 1601 | NOT_PRODUCT(print_initial_summary_data(_summary_data, _space_info)); |
duke@435 | 1602 | } |
duke@435 | 1603 | } |
duke@435 | 1604 | |
duke@435 | 1605 | // The amount of live data that will end up in old space (assuming it fits). |
duke@435 | 1606 | size_t old_space_total_live = 0; |
duke@435 | 1607 | unsigned int id; |
duke@435 | 1608 | for (id = old_space_id; id < last_space_id; ++id) { |
duke@435 | 1609 | old_space_total_live += pointer_delta(_space_info[id].new_top(), |
duke@435 | 1610 | _space_info[id].space()->bottom()); |
duke@435 | 1611 | } |
duke@435 | 1612 | |
duke@435 | 1613 | const MutableSpace* old_space = _space_info[old_space_id].space(); |
duke@435 | 1614 | if (old_space_total_live > old_space->capacity_in_words()) { |
duke@435 | 1615 | // XXX - should also try to expand |
duke@435 | 1616 | maximum_compaction = true; |
duke@435 | 1617 | } else if (!UseParallelOldGCDensePrefix) { |
duke@435 | 1618 | maximum_compaction = true; |
duke@435 | 1619 | } |
duke@435 | 1620 | |
duke@435 | 1621 | // Permanent and Old generations. |
duke@435 | 1622 | summarize_space(perm_space_id, maximum_compaction); |
duke@435 | 1623 | summarize_space(old_space_id, maximum_compaction); |
duke@435 | 1624 | |
duke@435 | 1625 | // Summarize the remaining spaces (those in the young gen) into old space. If |
duke@435 | 1626 | // the live data from a space doesn't fit, the existing summarization is left |
duke@435 | 1627 | // intact, so the data is compacted down within the space itself. |
duke@435 | 1628 | HeapWord** new_top_addr = _space_info[old_space_id].new_top_addr(); |
duke@435 | 1629 | HeapWord* const target_space_end = old_space->end(); |
duke@435 | 1630 | for (id = eden_space_id; id < last_space_id; ++id) { |
duke@435 | 1631 | const MutableSpace* space = _space_info[id].space(); |
duke@435 | 1632 | const size_t live = pointer_delta(_space_info[id].new_top(), |
duke@435 | 1633 | space->bottom()); |
duke@435 | 1634 | const size_t available = pointer_delta(target_space_end, *new_top_addr); |
duke@435 | 1635 | if (live <= available) { |
duke@435 | 1636 | // All the live data will fit. |
duke@435 | 1637 | if (TraceParallelOldGCSummaryPhase) { |
duke@435 | 1638 | tty->print_cr("summarizing %d into old_space @ " PTR_FORMAT, |
duke@435 | 1639 | id, *new_top_addr); |
duke@435 | 1640 | } |
duke@435 | 1641 | _summary_data.summarize(*new_top_addr, target_space_end, |
duke@435 | 1642 | space->bottom(), space->top(), |
duke@435 | 1643 | new_top_addr); |
duke@435 | 1644 | |
duke@435 | 1645 | // Reset the new_top value for the space. |
duke@435 | 1646 | _space_info[id].set_new_top(space->bottom()); |
duke@435 | 1647 | |
duke@435 | 1648 | // Clear the source_chunk field for each chunk in the space. |
duke@435 | 1649 | ChunkData* beg_chunk = _summary_data.addr_to_chunk_ptr(space->bottom()); |
duke@435 | 1650 | ChunkData* end_chunk = _summary_data.addr_to_chunk_ptr(space->top() - 1); |
duke@435 | 1651 | while (beg_chunk <= end_chunk) { |
duke@435 | 1652 | beg_chunk->set_source_chunk(0); |
duke@435 | 1653 | ++beg_chunk; |
duke@435 | 1654 | } |
duke@435 | 1655 | } |
duke@435 | 1656 | } |
duke@435 | 1657 | |
duke@435 | 1658 | // Fill in the block data after any changes to the chunks have |
duke@435 | 1659 | // been made. |
duke@435 | 1660 | #ifdef ASSERT |
duke@435 | 1661 | summarize_blocks(cm, perm_space_id); |
duke@435 | 1662 | summarize_blocks(cm, old_space_id); |
duke@435 | 1663 | #else |
duke@435 | 1664 | if (!UseParallelOldGCChunkPointerCalc) { |
duke@435 | 1665 | summarize_blocks(cm, perm_space_id); |
duke@435 | 1666 | summarize_blocks(cm, old_space_id); |
duke@435 | 1667 | } |
duke@435 | 1668 | #endif |
duke@435 | 1669 | |
duke@435 | 1670 | if (TraceParallelOldGCSummaryPhase) { |
duke@435 | 1671 | tty->print_cr("summary_phase: after final summarization"); |
duke@435 | 1672 | Universe::print(); |
duke@435 | 1673 | NOT_PRODUCT(print_chunk_ranges()); |
duke@435 | 1674 | if (Verbose) { |
duke@435 | 1675 | NOT_PRODUCT(print_generic_summary_data(_summary_data, _space_info)); |
duke@435 | 1676 | } |
duke@435 | 1677 | } |
duke@435 | 1678 | } |
duke@435 | 1679 | |
duke@435 | 1680 | // Fill in the BlockData. |
duke@435 | 1681 | // Iterate over the spaces and within each space iterate over |
duke@435 | 1682 | // the chunks and fill in the BlockData for each chunk. |
duke@435 | 1683 | |
duke@435 | 1684 | void PSParallelCompact::summarize_blocks(ParCompactionManager* cm, |
duke@435 | 1685 | SpaceId first_compaction_space_id) { |
duke@435 | 1686 | #if 0 |
duke@435 | 1687 | DEBUG_ONLY(ParallelCompactData::BlockData::set_cur_phase(1);) |
duke@435 | 1688 | for (SpaceId cur_space_id = first_compaction_space_id; |
duke@435 | 1689 | cur_space_id != last_space_id; |
duke@435 | 1690 | cur_space_id = next_compaction_space_id(cur_space_id)) { |
duke@435 | 1691 | // Iterate over the chunks in the space |
duke@435 | 1692 | size_t start_chunk_index = |
duke@435 | 1693 | _summary_data.addr_to_chunk_idx(space(cur_space_id)->bottom()); |
duke@435 | 1694 | BitBlockUpdateClosure bbu(mark_bitmap(), |
duke@435 | 1695 | cm, |
duke@435 | 1696 | start_chunk_index); |
duke@435 | 1697 | // Iterate over blocks. |
duke@435 | 1698 | for (size_t chunk_index = start_chunk_index; |
duke@435 | 1699 | chunk_index < _summary_data.chunk_count() && |
duke@435 | 1700 | _summary_data.chunk_to_addr(chunk_index) < space(cur_space_id)->top(); |
duke@435 | 1701 | chunk_index++) { |
duke@435 | 1702 | |
duke@435 | 1703 | // Reset the closure for the new chunk. Note that the closure |
duke@435 | 1704 | // maintains some data that does not get reset for each chunk |
duke@435 | 1705 | // so a new instance of the closure is no appropriate. |
duke@435 | 1706 | bbu.reset_chunk(chunk_index); |
duke@435 | 1707 | |
duke@435 | 1708 | // Start the iteration with the first live object. This |
duke@435 | 1709 | // may return the end of the chunk. That is acceptable since |
duke@435 | 1710 | // it will properly limit the iterations. |
duke@435 | 1711 | ParMarkBitMap::idx_t left_offset = mark_bitmap()->addr_to_bit( |
duke@435 | 1712 | _summary_data.first_live_or_end_in_chunk(chunk_index)); |
duke@435 | 1713 | |
duke@435 | 1714 | // End the iteration at the end of the chunk. |
duke@435 | 1715 | HeapWord* chunk_addr = _summary_data.chunk_to_addr(chunk_index); |
duke@435 | 1716 | HeapWord* chunk_end = chunk_addr + ParallelCompactData::ChunkSize; |
duke@435 | 1717 | ParMarkBitMap::idx_t right_offset = |
duke@435 | 1718 | mark_bitmap()->addr_to_bit(chunk_end); |
duke@435 | 1719 | |
duke@435 | 1720 | // Blocks that have not objects starting in them can be |
duke@435 | 1721 | // skipped because their data will never be used. |
duke@435 | 1722 | if (left_offset < right_offset) { |
duke@435 | 1723 | |
duke@435 | 1724 | // Iterate through the objects in the chunk. |
duke@435 | 1725 | ParMarkBitMap::idx_t last_offset = |
duke@435 | 1726 | mark_bitmap()->pair_iterate(&bbu, left_offset, right_offset); |
duke@435 | 1727 | |
duke@435 | 1728 | // If last_offset is less than right_offset, then the iterations |
duke@435 | 1729 | // terminated while it was looking for an end bit. "last_offset" |
duke@435 | 1730 | // is then the offset for the last start bit. In this situation |
duke@435 | 1731 | // the "offset" field for the next block to the right (_cur_block + 1) |
duke@435 | 1732 | // will not have been update although there may be live data |
duke@435 | 1733 | // to the left of the chunk. |
duke@435 | 1734 | |
duke@435 | 1735 | size_t cur_block_plus_1 = bbu.cur_block() + 1; |
duke@435 | 1736 | HeapWord* cur_block_plus_1_addr = |
duke@435 | 1737 | _summary_data.block_to_addr(bbu.cur_block()) + |
duke@435 | 1738 | ParallelCompactData::BlockSize; |
duke@435 | 1739 | HeapWord* last_offset_addr = mark_bitmap()->bit_to_addr(last_offset); |
duke@435 | 1740 | #if 1 // This code works. The else doesn't but should. Why does it? |
duke@435 | 1741 | // The current block (cur_block()) has already been updated. |
duke@435 | 1742 | // The last block that may need to be updated is either the |
duke@435 | 1743 | // next block (current block + 1) or the block where the |
duke@435 | 1744 | // last object starts (which can be greater than the |
duke@435 | 1745 | // next block if there were no objects found in intervening |
duke@435 | 1746 | // blocks). |
duke@435 | 1747 | size_t last_block = |
duke@435 | 1748 | MAX2(bbu.cur_block() + 1, |
duke@435 | 1749 | _summary_data.addr_to_block_idx(last_offset_addr)); |
duke@435 | 1750 | #else |
duke@435 | 1751 | // The current block has already been updated. The only block |
duke@435 | 1752 | // that remains to be updated is the block where the last |
duke@435 | 1753 | // object in the chunk starts. |
duke@435 | 1754 | size_t last_block = _summary_data.addr_to_block_idx(last_offset_addr); |
duke@435 | 1755 | #endif |
duke@435 | 1756 | assert_bit_is_start(last_offset); |
duke@435 | 1757 | assert((last_block == _summary_data.block_count()) || |
duke@435 | 1758 | (_summary_data.block(last_block)->raw_offset() == 0), |
duke@435 | 1759 | "Should not have been set"); |
duke@435 | 1760 | // Is the last block still in the current chunk? If still |
duke@435 | 1761 | // in this chunk, update the last block (the counting that |
duke@435 | 1762 | // included the current block is meant for the offset of the last |
duke@435 | 1763 | // block). If not in this chunk, do nothing. Should not |
duke@435 | 1764 | // update a block in the next chunk. |
duke@435 | 1765 | if (ParallelCompactData::chunk_contains_block(bbu.chunk_index(), |
duke@435 | 1766 | last_block)) { |
duke@435 | 1767 | if (last_offset < right_offset) { |
duke@435 | 1768 | // The last object started in this chunk but ends beyond |
duke@435 | 1769 | // this chunk. Update the block for this last object. |
duke@435 | 1770 | assert(mark_bitmap()->is_marked(last_offset), "Should be marked"); |
duke@435 | 1771 | // No end bit was found. The closure takes care of |
duke@435 | 1772 | // the cases where |
duke@435 | 1773 | // an objects crosses over into the next block |
duke@435 | 1774 | // an objects starts and ends in the next block |
duke@435 | 1775 | // It does not handle the case where an object is |
duke@435 | 1776 | // the first object in a later block and extends |
duke@435 | 1777 | // past the end of the chunk (i.e., the closure |
duke@435 | 1778 | // only handles complete objects that are in the range |
duke@435 | 1779 | // it is given). That object is handed back here |
duke@435 | 1780 | // for any special consideration necessary. |
duke@435 | 1781 | // |
duke@435 | 1782 | // Is the first bit in the last block a start or end bit? |
duke@435 | 1783 | // |
duke@435 | 1784 | // If the partial object ends in the last block L, |
duke@435 | 1785 | // then the 1st bit in L may be an end bit. |
duke@435 | 1786 | // |
duke@435 | 1787 | // Else does the last object start in a block after the current |
duke@435 | 1788 | // block? A block AA will already have been updated if an |
duke@435 | 1789 | // object ends in the next block AA+1. An object found to end in |
duke@435 | 1790 | // the AA+1 is the trigger that updates AA. Objects are being |
duke@435 | 1791 | // counted in the current block for updaing a following |
duke@435 | 1792 | // block. An object may start in later block |
duke@435 | 1793 | // block but may extend beyond the last block in the chunk. |
duke@435 | 1794 | // Updates are only done when the end of an object has been |
duke@435 | 1795 | // found. If the last object (covered by block L) starts |
duke@435 | 1796 | // beyond the current block, then no object ends in L (otherwise |
duke@435 | 1797 | // L would be the current block). So the first bit in L is |
duke@435 | 1798 | // a start bit. |
duke@435 | 1799 | // |
duke@435 | 1800 | // Else the last objects start in the current block and ends |
duke@435 | 1801 | // beyond the chunk. The current block has already been |
duke@435 | 1802 | // updated and there is no later block (with an object |
duke@435 | 1803 | // starting in it) that needs to be updated. |
duke@435 | 1804 | // |
duke@435 | 1805 | if (_summary_data.partial_obj_ends_in_block(last_block)) { |
duke@435 | 1806 | _summary_data.block(last_block)->set_end_bit_offset( |
duke@435 | 1807 | bbu.live_data_left()); |
duke@435 | 1808 | } else if (last_offset_addr >= cur_block_plus_1_addr) { |
duke@435 | 1809 | // The start of the object is on a later block |
duke@435 | 1810 | // (to the right of the current block and there are no |
duke@435 | 1811 | // complete live objects to the left of this last object |
duke@435 | 1812 | // within the chunk. |
duke@435 | 1813 | // The first bit in the block is for the start of the |
duke@435 | 1814 | // last object. |
duke@435 | 1815 | _summary_data.block(last_block)->set_start_bit_offset( |
duke@435 | 1816 | bbu.live_data_left()); |
duke@435 | 1817 | } else { |
duke@435 | 1818 | // The start of the last object was found in |
duke@435 | 1819 | // the current chunk (which has already |
duke@435 | 1820 | // been updated). |
duke@435 | 1821 | assert(bbu.cur_block() == |
duke@435 | 1822 | _summary_data.addr_to_block_idx(last_offset_addr), |
duke@435 | 1823 | "Should be a block already processed"); |
duke@435 | 1824 | } |
duke@435 | 1825 | #ifdef ASSERT |
duke@435 | 1826 | // Is there enough block information to find this object? |
duke@435 | 1827 | // The destination of the chunk has not been set so the |
duke@435 | 1828 | // values returned by calc_new_pointer() and |
duke@435 | 1829 | // block_calc_new_pointer() will only be |
duke@435 | 1830 | // offsets. But they should agree. |
duke@435 | 1831 | HeapWord* moved_obj_with_chunks = |
duke@435 | 1832 | _summary_data.chunk_calc_new_pointer(last_offset_addr); |
duke@435 | 1833 | HeapWord* moved_obj_with_blocks = |
duke@435 | 1834 | _summary_data.calc_new_pointer(last_offset_addr); |
duke@435 | 1835 | assert(moved_obj_with_chunks == moved_obj_with_blocks, |
duke@435 | 1836 | "Block calculation is wrong"); |
duke@435 | 1837 | #endif |
duke@435 | 1838 | } else if (last_block < _summary_data.block_count()) { |
duke@435 | 1839 | // Iterations ended looking for a start bit (but |
duke@435 | 1840 | // did not run off the end of the block table). |
duke@435 | 1841 | _summary_data.block(last_block)->set_start_bit_offset( |
duke@435 | 1842 | bbu.live_data_left()); |
duke@435 | 1843 | } |
duke@435 | 1844 | } |
duke@435 | 1845 | #ifdef ASSERT |
duke@435 | 1846 | // Is there enough block information to find this object? |
duke@435 | 1847 | HeapWord* left_offset_addr = mark_bitmap()->bit_to_addr(left_offset); |
duke@435 | 1848 | HeapWord* moved_obj_with_chunks = |
duke@435 | 1849 | _summary_data.calc_new_pointer(left_offset_addr); |
duke@435 | 1850 | HeapWord* moved_obj_with_blocks = |
duke@435 | 1851 | _summary_data.calc_new_pointer(left_offset_addr); |
duke@435 | 1852 | assert(moved_obj_with_chunks == moved_obj_with_blocks, |
duke@435 | 1853 | "Block calculation is wrong"); |
duke@435 | 1854 | #endif |
duke@435 | 1855 | |
duke@435 | 1856 | // Is there another block after the end of this chunk? |
duke@435 | 1857 | #ifdef ASSERT |
duke@435 | 1858 | if (last_block < _summary_data.block_count()) { |
duke@435 | 1859 | // No object may have been found in a block. If that |
duke@435 | 1860 | // block is at the end of the chunk, the iteration will |
duke@435 | 1861 | // terminate without incrementing the current block so |
duke@435 | 1862 | // that the current block is not the last block in the |
duke@435 | 1863 | // chunk. That situation precludes asserting that the |
duke@435 | 1864 | // current block is the last block in the chunk. Assert |
duke@435 | 1865 | // the lesser condition that the current block does not |
duke@435 | 1866 | // exceed the chunk. |
duke@435 | 1867 | assert(_summary_data.block_to_addr(last_block) <= |
duke@435 | 1868 | (_summary_data.chunk_to_addr(chunk_index) + |
duke@435 | 1869 | ParallelCompactData::ChunkSize), |
duke@435 | 1870 | "Chunk and block inconsistency"); |
duke@435 | 1871 | assert(last_offset <= right_offset, "Iteration over ran end"); |
duke@435 | 1872 | } |
duke@435 | 1873 | #endif |
duke@435 | 1874 | } |
duke@435 | 1875 | #ifdef ASSERT |
duke@435 | 1876 | if (PrintGCDetails && Verbose) { |
duke@435 | 1877 | if (_summary_data.chunk(chunk_index)->partial_obj_size() == 1) { |
duke@435 | 1878 | size_t first_block = |
duke@435 | 1879 | chunk_index / ParallelCompactData::BlocksPerChunk; |
duke@435 | 1880 | gclog_or_tty->print_cr("first_block " PTR_FORMAT |
duke@435 | 1881 | " _offset " PTR_FORMAT |
duke@435 | 1882 | "_first_is_start_bit %d", |
duke@435 | 1883 | first_block, |
duke@435 | 1884 | _summary_data.block(first_block)->raw_offset(), |
duke@435 | 1885 | _summary_data.block(first_block)->first_is_start_bit()); |
duke@435 | 1886 | } |
duke@435 | 1887 | } |
duke@435 | 1888 | #endif |
duke@435 | 1889 | } |
duke@435 | 1890 | } |
duke@435 | 1891 | DEBUG_ONLY(ParallelCompactData::BlockData::set_cur_phase(16);) |
duke@435 | 1892 | #endif // #if 0 |
duke@435 | 1893 | } |
duke@435 | 1894 | |
duke@435 | 1895 | // This method should contain all heap-specific policy for invoking a full |
duke@435 | 1896 | // collection. invoke_no_policy() will only attempt to compact the heap; it |
duke@435 | 1897 | // will do nothing further. If we need to bail out for policy reasons, scavenge |
duke@435 | 1898 | // before full gc, or any other specialized behavior, it needs to be added here. |
duke@435 | 1899 | // |
duke@435 | 1900 | // Note that this method should only be called from the vm_thread while at a |
duke@435 | 1901 | // safepoint. |
duke@435 | 1902 | void PSParallelCompact::invoke(bool maximum_heap_compaction) { |
duke@435 | 1903 | assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); |
duke@435 | 1904 | assert(Thread::current() == (Thread*)VMThread::vm_thread(), |
duke@435 | 1905 | "should be in vm thread"); |
duke@435 | 1906 | ParallelScavengeHeap* heap = gc_heap(); |
duke@435 | 1907 | GCCause::Cause gc_cause = heap->gc_cause(); |
duke@435 | 1908 | assert(!heap->is_gc_active(), "not reentrant"); |
duke@435 | 1909 | |
duke@435 | 1910 | PSAdaptiveSizePolicy* policy = heap->size_policy(); |
duke@435 | 1911 | |
duke@435 | 1912 | // Before each allocation/collection attempt, find out from the |
duke@435 | 1913 | // policy object if GCs are, on the whole, taking too long. If so, |
duke@435 | 1914 | // bail out without attempting a collection. The exceptions are |
duke@435 | 1915 | // for explicitly requested GC's. |
duke@435 | 1916 | if (!policy->gc_time_limit_exceeded() || |
duke@435 | 1917 | GCCause::is_user_requested_gc(gc_cause) || |
duke@435 | 1918 | GCCause::is_serviceability_requested_gc(gc_cause)) { |
duke@435 | 1919 | IsGCActiveMark mark; |
duke@435 | 1920 | |
duke@435 | 1921 | if (ScavengeBeforeFullGC) { |
duke@435 | 1922 | PSScavenge::invoke_no_policy(); |
duke@435 | 1923 | } |
duke@435 | 1924 | |
duke@435 | 1925 | PSParallelCompact::invoke_no_policy(maximum_heap_compaction); |
duke@435 | 1926 | } |
duke@435 | 1927 | } |
duke@435 | 1928 | |
duke@435 | 1929 | bool ParallelCompactData::chunk_contains(size_t chunk_index, HeapWord* addr) { |
duke@435 | 1930 | size_t addr_chunk_index = addr_to_chunk_idx(addr); |
duke@435 | 1931 | return chunk_index == addr_chunk_index; |
duke@435 | 1932 | } |
duke@435 | 1933 | |
duke@435 | 1934 | bool ParallelCompactData::chunk_contains_block(size_t chunk_index, |
duke@435 | 1935 | size_t block_index) { |
duke@435 | 1936 | size_t first_block_in_chunk = chunk_index * BlocksPerChunk; |
duke@435 | 1937 | size_t last_block_in_chunk = (chunk_index + 1) * BlocksPerChunk - 1; |
duke@435 | 1938 | |
duke@435 | 1939 | return (first_block_in_chunk <= block_index) && |
duke@435 | 1940 | (block_index <= last_block_in_chunk); |
duke@435 | 1941 | } |
duke@435 | 1942 | |
duke@435 | 1943 | // This method contains no policy. You should probably |
duke@435 | 1944 | // be calling invoke() instead. |
duke@435 | 1945 | void PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) { |
duke@435 | 1946 | assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); |
duke@435 | 1947 | assert(ref_processor() != NULL, "Sanity"); |
duke@435 | 1948 | |
apetrusenko@574 | 1949 | if (GC_locker::check_active_before_gc()) { |
duke@435 | 1950 | return; |
duke@435 | 1951 | } |
duke@435 | 1952 | |
duke@435 | 1953 | TimeStamp marking_start; |
duke@435 | 1954 | TimeStamp compaction_start; |
duke@435 | 1955 | TimeStamp collection_exit; |
duke@435 | 1956 | |
duke@435 | 1957 | ParallelScavengeHeap* heap = gc_heap(); |
duke@435 | 1958 | GCCause::Cause gc_cause = heap->gc_cause(); |
duke@435 | 1959 | PSYoungGen* young_gen = heap->young_gen(); |
duke@435 | 1960 | PSOldGen* old_gen = heap->old_gen(); |
duke@435 | 1961 | PSPermGen* perm_gen = heap->perm_gen(); |
duke@435 | 1962 | PSAdaptiveSizePolicy* size_policy = heap->size_policy(); |
duke@435 | 1963 | |
duke@435 | 1964 | _print_phases = PrintGCDetails && PrintParallelOldGCPhaseTimes; |
duke@435 | 1965 | |
duke@435 | 1966 | // Make sure data structures are sane, make the heap parsable, and do other |
duke@435 | 1967 | // miscellaneous bookkeeping. |
duke@435 | 1968 | PreGCValues pre_gc_values; |
duke@435 | 1969 | pre_compact(&pre_gc_values); |
duke@435 | 1970 | |
jcoomes@645 | 1971 | // Get the compaction manager reserved for the VM thread. |
jcoomes@645 | 1972 | ParCompactionManager* const vmthread_cm = |
jcoomes@645 | 1973 | ParCompactionManager::manager_array(gc_task_manager()->workers()); |
jcoomes@645 | 1974 | |
duke@435 | 1975 | // Place after pre_compact() where the number of invocations is incremented. |
duke@435 | 1976 | AdaptiveSizePolicyOutput(size_policy, heap->total_collections()); |
duke@435 | 1977 | |
duke@435 | 1978 | { |
duke@435 | 1979 | ResourceMark rm; |
duke@435 | 1980 | HandleMark hm; |
duke@435 | 1981 | |
duke@435 | 1982 | const bool is_system_gc = gc_cause == GCCause::_java_lang_system_gc; |
duke@435 | 1983 | |
duke@435 | 1984 | // This is useful for debugging but don't change the output the |
duke@435 | 1985 | // the customer sees. |
duke@435 | 1986 | const char* gc_cause_str = "Full GC"; |
duke@435 | 1987 | if (is_system_gc && PrintGCDetails) { |
duke@435 | 1988 | gc_cause_str = "Full GC (System)"; |
duke@435 | 1989 | } |
duke@435 | 1990 | gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); |
duke@435 | 1991 | TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); |
duke@435 | 1992 | TraceTime t1(gc_cause_str, PrintGC, !PrintGCDetails, gclog_or_tty); |
duke@435 | 1993 | TraceCollectorStats tcs(counters()); |
duke@435 | 1994 | TraceMemoryManagerStats tms(true /* Full GC */); |
duke@435 | 1995 | |
duke@435 | 1996 | if (TraceGen1Time) accumulated_time()->start(); |
duke@435 | 1997 | |
duke@435 | 1998 | // Let the size policy know we're starting |
duke@435 | 1999 | size_policy->major_collection_begin(); |
duke@435 | 2000 | |
duke@435 | 2001 | // When collecting the permanent generation methodOops may be moving, |
duke@435 | 2002 | // so we either have to flush all bcp data or convert it into bci. |
duke@435 | 2003 | CodeCache::gc_prologue(); |
duke@435 | 2004 | Threads::gc_prologue(); |
duke@435 | 2005 | |
duke@435 | 2006 | NOT_PRODUCT(ref_processor()->verify_no_references_recorded()); |
duke@435 | 2007 | COMPILER2_PRESENT(DerivedPointerTable::clear()); |
duke@435 | 2008 | |
duke@435 | 2009 | ref_processor()->enable_discovery(); |
duke@435 | 2010 | |
duke@435 | 2011 | bool marked_for_unloading = false; |
duke@435 | 2012 | |
duke@435 | 2013 | marking_start.update(); |
jcoomes@645 | 2014 | marking_phase(vmthread_cm, maximum_heap_compaction); |
duke@435 | 2015 | |
duke@435 | 2016 | #ifndef PRODUCT |
duke@435 | 2017 | if (TraceParallelOldGCMarkingPhase) { |
duke@435 | 2018 | gclog_or_tty->print_cr("marking_phase: cas_tries %d cas_retries %d " |
duke@435 | 2019 | "cas_by_another %d", |
duke@435 | 2020 | mark_bitmap()->cas_tries(), mark_bitmap()->cas_retries(), |
duke@435 | 2021 | mark_bitmap()->cas_by_another()); |
duke@435 | 2022 | } |
duke@435 | 2023 | #endif // #ifndef PRODUCT |
duke@435 | 2024 | |
duke@435 | 2025 | #ifdef ASSERT |
duke@435 | 2026 | if (VerifyParallelOldWithMarkSweep && |
duke@435 | 2027 | (PSParallelCompact::total_invocations() % |
duke@435 | 2028 | VerifyParallelOldWithMarkSweepInterval) == 0) { |
duke@435 | 2029 | gclog_or_tty->print_cr("Verify marking with mark_sweep_phase1()"); |
duke@435 | 2030 | if (PrintGCDetails && Verbose) { |
duke@435 | 2031 | gclog_or_tty->print_cr("mark_sweep_phase1:"); |
duke@435 | 2032 | } |
duke@435 | 2033 | // Clear the discovered lists so that discovered objects |
duke@435 | 2034 | // don't look like they have been discovered twice. |
duke@435 | 2035 | ref_processor()->clear_discovered_references(); |
duke@435 | 2036 | |
duke@435 | 2037 | PSMarkSweep::allocate_stacks(); |
duke@435 | 2038 | MemRegion mr = Universe::heap()->reserved_region(); |
duke@435 | 2039 | PSMarkSweep::ref_processor()->enable_discovery(); |
duke@435 | 2040 | PSMarkSweep::mark_sweep_phase1(maximum_heap_compaction); |
duke@435 | 2041 | } |
duke@435 | 2042 | #endif |
duke@435 | 2043 | |
duke@435 | 2044 | bool max_on_system_gc = UseMaximumCompactionOnSystemGC && is_system_gc; |
jcoomes@645 | 2045 | summary_phase(vmthread_cm, maximum_heap_compaction || max_on_system_gc); |
duke@435 | 2046 | |
duke@435 | 2047 | #ifdef ASSERT |
duke@435 | 2048 | if (VerifyParallelOldWithMarkSweep && |
duke@435 | 2049 | (PSParallelCompact::total_invocations() % |
duke@435 | 2050 | VerifyParallelOldWithMarkSweepInterval) == 0) { |
duke@435 | 2051 | if (PrintGCDetails && Verbose) { |
duke@435 | 2052 | gclog_or_tty->print_cr("mark_sweep_phase2:"); |
duke@435 | 2053 | } |
duke@435 | 2054 | PSMarkSweep::mark_sweep_phase2(); |
duke@435 | 2055 | } |
duke@435 | 2056 | #endif |
duke@435 | 2057 | |
duke@435 | 2058 | COMPILER2_PRESENT(assert(DerivedPointerTable::is_active(), "Sanity")); |
duke@435 | 2059 | COMPILER2_PRESENT(DerivedPointerTable::set_active(false)); |
duke@435 | 2060 | |
duke@435 | 2061 | // adjust_roots() updates Universe::_intArrayKlassObj which is |
duke@435 | 2062 | // needed by the compaction for filling holes in the dense prefix. |
duke@435 | 2063 | adjust_roots(); |
duke@435 | 2064 | |
duke@435 | 2065 | #ifdef ASSERT |
duke@435 | 2066 | if (VerifyParallelOldWithMarkSweep && |
duke@435 | 2067 | (PSParallelCompact::total_invocations() % |
duke@435 | 2068 | VerifyParallelOldWithMarkSweepInterval) == 0) { |
duke@435 | 2069 | // Do a separate verify phase so that the verify |
duke@435 | 2070 | // code can use the the forwarding pointers to |
duke@435 | 2071 | // check the new pointer calculation. The restore_marks() |
duke@435 | 2072 | // has to be done before the real compact. |
jcoomes@645 | 2073 | vmthread_cm->set_action(ParCompactionManager::VerifyUpdate); |
jcoomes@645 | 2074 | compact_perm(vmthread_cm); |
jcoomes@645 | 2075 | compact_serial(vmthread_cm); |
jcoomes@645 | 2076 | vmthread_cm->set_action(ParCompactionManager::ResetObjects); |
jcoomes@645 | 2077 | compact_perm(vmthread_cm); |
jcoomes@645 | 2078 | compact_serial(vmthread_cm); |
jcoomes@645 | 2079 | vmthread_cm->set_action(ParCompactionManager::UpdateAndCopy); |
duke@435 | 2080 | |
duke@435 | 2081 | // For debugging only |
duke@435 | 2082 | PSMarkSweep::restore_marks(); |
duke@435 | 2083 | PSMarkSweep::deallocate_stacks(); |
duke@435 | 2084 | } |
duke@435 | 2085 | #endif |
duke@435 | 2086 | |
duke@435 | 2087 | compaction_start.update(); |
duke@435 | 2088 | // Does the perm gen always have to be done serially because |
duke@435 | 2089 | // klasses are used in the update of an object? |
jcoomes@645 | 2090 | compact_perm(vmthread_cm); |
duke@435 | 2091 | |
duke@435 | 2092 | if (UseParallelOldGCCompacting) { |
duke@435 | 2093 | compact(); |
duke@435 | 2094 | } else { |
jcoomes@645 | 2095 | compact_serial(vmthread_cm); |
duke@435 | 2096 | } |
duke@435 | 2097 | |
duke@435 | 2098 | // Reset the mark bitmap, summary data, and do other bookkeeping. Must be |
duke@435 | 2099 | // done before resizing. |
duke@435 | 2100 | post_compact(); |
duke@435 | 2101 | |
duke@435 | 2102 | // Let the size policy know we're done |
duke@435 | 2103 | size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause); |
duke@435 | 2104 | |
duke@435 | 2105 | if (UseAdaptiveSizePolicy) { |
duke@435 | 2106 | if (PrintAdaptiveSizePolicy) { |
duke@435 | 2107 | gclog_or_tty->print("AdaptiveSizeStart: "); |
duke@435 | 2108 | gclog_or_tty->stamp(); |
duke@435 | 2109 | gclog_or_tty->print_cr(" collection: %d ", |
duke@435 | 2110 | heap->total_collections()); |
duke@435 | 2111 | if (Verbose) { |
duke@435 | 2112 | gclog_or_tty->print("old_gen_capacity: %d young_gen_capacity: %d" |
duke@435 | 2113 | " perm_gen_capacity: %d ", |
duke@435 | 2114 | old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes(), |
duke@435 | 2115 | perm_gen->capacity_in_bytes()); |
duke@435 | 2116 | } |
duke@435 | 2117 | } |
duke@435 | 2118 | |
duke@435 | 2119 | // Don't check if the size_policy is ready here. Let |
duke@435 | 2120 | // the size_policy check that internally. |
duke@435 | 2121 | if (UseAdaptiveGenerationSizePolicyAtMajorCollection && |
duke@435 | 2122 | ((gc_cause != GCCause::_java_lang_system_gc) || |
duke@435 | 2123 | UseAdaptiveSizePolicyWithSystemGC)) { |
duke@435 | 2124 | // Calculate optimal free space amounts |
duke@435 | 2125 | assert(young_gen->max_size() > |
duke@435 | 2126 | young_gen->from_space()->capacity_in_bytes() + |
duke@435 | 2127 | young_gen->to_space()->capacity_in_bytes(), |
duke@435 | 2128 | "Sizes of space in young gen are out-of-bounds"); |
duke@435 | 2129 | size_t max_eden_size = young_gen->max_size() - |
duke@435 | 2130 | young_gen->from_space()->capacity_in_bytes() - |
duke@435 | 2131 | young_gen->to_space()->capacity_in_bytes(); |
duke@435 | 2132 | size_policy->compute_generation_free_space(young_gen->used_in_bytes(), |
duke@435 | 2133 | young_gen->eden_space()->used_in_bytes(), |
duke@435 | 2134 | old_gen->used_in_bytes(), |
duke@435 | 2135 | perm_gen->used_in_bytes(), |
duke@435 | 2136 | young_gen->eden_space()->capacity_in_bytes(), |
duke@435 | 2137 | old_gen->max_gen_size(), |
duke@435 | 2138 | max_eden_size, |
duke@435 | 2139 | true /* full gc*/, |
duke@435 | 2140 | gc_cause); |
duke@435 | 2141 | |
duke@435 | 2142 | heap->resize_old_gen(size_policy->calculated_old_free_size_in_bytes()); |
duke@435 | 2143 | |
duke@435 | 2144 | // Don't resize the young generation at an major collection. A |
duke@435 | 2145 | // desired young generation size may have been calculated but |
duke@435 | 2146 | // resizing the young generation complicates the code because the |
duke@435 | 2147 | // resizing of the old generation may have moved the boundary |
duke@435 | 2148 | // between the young generation and the old generation. Let the |
duke@435 | 2149 | // young generation resizing happen at the minor collections. |
duke@435 | 2150 | } |
duke@435 | 2151 | if (PrintAdaptiveSizePolicy) { |
duke@435 | 2152 | gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ", |
duke@435 | 2153 | heap->total_collections()); |
duke@435 | 2154 | } |
duke@435 | 2155 | } |
duke@435 | 2156 | |
duke@435 | 2157 | if (UsePerfData) { |
duke@435 | 2158 | PSGCAdaptivePolicyCounters* const counters = heap->gc_policy_counters(); |
duke@435 | 2159 | counters->update_counters(); |
duke@435 | 2160 | counters->update_old_capacity(old_gen->capacity_in_bytes()); |
duke@435 | 2161 | counters->update_young_capacity(young_gen->capacity_in_bytes()); |
duke@435 | 2162 | } |
duke@435 | 2163 | |
duke@435 | 2164 | heap->resize_all_tlabs(); |
duke@435 | 2165 | |
duke@435 | 2166 | // We collected the perm gen, so we'll resize it here. |
duke@435 | 2167 | perm_gen->compute_new_size(pre_gc_values.perm_gen_used()); |
duke@435 | 2168 | |
duke@435 | 2169 | if (TraceGen1Time) accumulated_time()->stop(); |
duke@435 | 2170 | |
duke@435 | 2171 | if (PrintGC) { |
duke@435 | 2172 | if (PrintGCDetails) { |
duke@435 | 2173 | // No GC timestamp here. This is after GC so it would be confusing. |
duke@435 | 2174 | young_gen->print_used_change(pre_gc_values.young_gen_used()); |
duke@435 | 2175 | old_gen->print_used_change(pre_gc_values.old_gen_used()); |
duke@435 | 2176 | heap->print_heap_change(pre_gc_values.heap_used()); |
duke@435 | 2177 | // Print perm gen last (print_heap_change() excludes the perm gen). |
duke@435 | 2178 | perm_gen->print_used_change(pre_gc_values.perm_gen_used()); |
duke@435 | 2179 | } else { |
duke@435 | 2180 | heap->print_heap_change(pre_gc_values.heap_used()); |
duke@435 | 2181 | } |
duke@435 | 2182 | } |
duke@435 | 2183 | |
duke@435 | 2184 | // Track memory usage and detect low memory |
duke@435 | 2185 | MemoryService::track_memory_usage(); |
duke@435 | 2186 | heap->update_counters(); |
duke@435 | 2187 | |
duke@435 | 2188 | if (PrintGCDetails) { |
duke@435 | 2189 | if (size_policy->print_gc_time_limit_would_be_exceeded()) { |
duke@435 | 2190 | if (size_policy->gc_time_limit_exceeded()) { |
duke@435 | 2191 | gclog_or_tty->print_cr(" GC time is exceeding GCTimeLimit " |
duke@435 | 2192 | "of %d%%", GCTimeLimit); |
duke@435 | 2193 | } else { |
duke@435 | 2194 | gclog_or_tty->print_cr(" GC time would exceed GCTimeLimit " |
duke@435 | 2195 | "of %d%%", GCTimeLimit); |
duke@435 | 2196 | } |
duke@435 | 2197 | } |
duke@435 | 2198 | size_policy->set_print_gc_time_limit_would_be_exceeded(false); |
duke@435 | 2199 | } |
duke@435 | 2200 | } |
duke@435 | 2201 | |
duke@435 | 2202 | if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) { |
duke@435 | 2203 | HandleMark hm; // Discard invalid handles created during verification |
duke@435 | 2204 | gclog_or_tty->print(" VerifyAfterGC:"); |
duke@435 | 2205 | Universe::verify(false); |
duke@435 | 2206 | } |
duke@435 | 2207 | |
duke@435 | 2208 | // Re-verify object start arrays |
duke@435 | 2209 | if (VerifyObjectStartArray && |
duke@435 | 2210 | VerifyAfterGC) { |
duke@435 | 2211 | old_gen->verify_object_start_array(); |
duke@435 | 2212 | perm_gen->verify_object_start_array(); |
duke@435 | 2213 | } |
duke@435 | 2214 | |
duke@435 | 2215 | NOT_PRODUCT(ref_processor()->verify_no_references_recorded()); |
duke@435 | 2216 | |
duke@435 | 2217 | collection_exit.update(); |
duke@435 | 2218 | |
duke@435 | 2219 | if (PrintHeapAtGC) { |
duke@435 | 2220 | Universe::print_heap_after_gc(); |
duke@435 | 2221 | } |
duke@435 | 2222 | if (PrintGCTaskTimeStamps) { |
duke@435 | 2223 | gclog_or_tty->print_cr("VM-Thread " INT64_FORMAT " " INT64_FORMAT " " |
duke@435 | 2224 | INT64_FORMAT, |
duke@435 | 2225 | marking_start.ticks(), compaction_start.ticks(), |
duke@435 | 2226 | collection_exit.ticks()); |
duke@435 | 2227 | gc_task_manager()->print_task_time_stamps(); |
duke@435 | 2228 | } |
duke@435 | 2229 | } |
duke@435 | 2230 | |
duke@435 | 2231 | bool PSParallelCompact::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy, |
duke@435 | 2232 | PSYoungGen* young_gen, |
duke@435 | 2233 | PSOldGen* old_gen) { |
duke@435 | 2234 | MutableSpace* const eden_space = young_gen->eden_space(); |
duke@435 | 2235 | assert(!eden_space->is_empty(), "eden must be non-empty"); |
duke@435 | 2236 | assert(young_gen->virtual_space()->alignment() == |
duke@435 | 2237 | old_gen->virtual_space()->alignment(), "alignments do not match"); |
duke@435 | 2238 | |
duke@435 | 2239 | if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary)) { |
duke@435 | 2240 | return false; |
duke@435 | 2241 | } |
duke@435 | 2242 | |
duke@435 | 2243 | // Both generations must be completely committed. |
duke@435 | 2244 | if (young_gen->virtual_space()->uncommitted_size() != 0) { |
duke@435 | 2245 | return false; |
duke@435 | 2246 | } |
duke@435 | 2247 | if (old_gen->virtual_space()->uncommitted_size() != 0) { |
duke@435 | 2248 | return false; |
duke@435 | 2249 | } |
duke@435 | 2250 | |
duke@435 | 2251 | // Figure out how much to take from eden. Include the average amount promoted |
duke@435 | 2252 | // in the total; otherwise the next young gen GC will simply bail out to a |
duke@435 | 2253 | // full GC. |
duke@435 | 2254 | const size_t alignment = old_gen->virtual_space()->alignment(); |
duke@435 | 2255 | const size_t eden_used = eden_space->used_in_bytes(); |
duke@435 | 2256 | const size_t promoted = (size_t)size_policy->avg_promoted()->padded_average(); |
duke@435 | 2257 | const size_t absorb_size = align_size_up(eden_used + promoted, alignment); |
duke@435 | 2258 | const size_t eden_capacity = eden_space->capacity_in_bytes(); |
duke@435 | 2259 | |
duke@435 | 2260 | if (absorb_size >= eden_capacity) { |
duke@435 | 2261 | return false; // Must leave some space in eden. |
duke@435 | 2262 | } |
duke@435 | 2263 | |
duke@435 | 2264 | const size_t new_young_size = young_gen->capacity_in_bytes() - absorb_size; |
duke@435 | 2265 | if (new_young_size < young_gen->min_gen_size()) { |
duke@435 | 2266 | return false; // Respect young gen minimum size. |
duke@435 | 2267 | } |
duke@435 | 2268 | |
duke@435 | 2269 | if (TraceAdaptiveGCBoundary && Verbose) { |
duke@435 | 2270 | gclog_or_tty->print(" absorbing " SIZE_FORMAT "K: " |
duke@435 | 2271 | "eden " SIZE_FORMAT "K->" SIZE_FORMAT "K " |
duke@435 | 2272 | "from " SIZE_FORMAT "K, to " SIZE_FORMAT "K " |
duke@435 | 2273 | "young_gen " SIZE_FORMAT "K->" SIZE_FORMAT "K ", |
duke@435 | 2274 | absorb_size / K, |
duke@435 | 2275 | eden_capacity / K, (eden_capacity - absorb_size) / K, |
duke@435 | 2276 | young_gen->from_space()->used_in_bytes() / K, |
duke@435 | 2277 | young_gen->to_space()->used_in_bytes() / K, |
duke@435 | 2278 | young_gen->capacity_in_bytes() / K, new_young_size / K); |
duke@435 | 2279 | } |
duke@435 | 2280 | |
duke@435 | 2281 | // Fill the unused part of the old gen. |
duke@435 | 2282 | MutableSpace* const old_space = old_gen->object_space(); |
duke@435 | 2283 | MemRegion old_gen_unused(old_space->top(), old_space->end()); |
duke@435 | 2284 | if (!old_gen_unused.is_empty()) { |
duke@435 | 2285 | SharedHeap::fill_region_with_object(old_gen_unused); |
duke@435 | 2286 | } |
duke@435 | 2287 | |
duke@435 | 2288 | // Take the live data from eden and set both top and end in the old gen to |
duke@435 | 2289 | // eden top. (Need to set end because reset_after_change() mangles the region |
duke@435 | 2290 | // from end to virtual_space->high() in debug builds). |
duke@435 | 2291 | HeapWord* const new_top = eden_space->top(); |
duke@435 | 2292 | old_gen->virtual_space()->expand_into(young_gen->virtual_space(), |
duke@435 | 2293 | absorb_size); |
duke@435 | 2294 | young_gen->reset_after_change(); |
duke@435 | 2295 | old_space->set_top(new_top); |
duke@435 | 2296 | old_space->set_end(new_top); |
duke@435 | 2297 | old_gen->reset_after_change(); |
duke@435 | 2298 | |
duke@435 | 2299 | // Update the object start array for the filler object and the data from eden. |
duke@435 | 2300 | ObjectStartArray* const start_array = old_gen->start_array(); |
duke@435 | 2301 | HeapWord* const start = old_gen_unused.start(); |
duke@435 | 2302 | for (HeapWord* addr = start; addr < new_top; addr += oop(addr)->size()) { |
duke@435 | 2303 | start_array->allocate_block(addr); |
duke@435 | 2304 | } |
duke@435 | 2305 | |
duke@435 | 2306 | // Could update the promoted average here, but it is not typically updated at |
duke@435 | 2307 | // full GCs and the value to use is unclear. Something like |
duke@435 | 2308 | // |
duke@435 | 2309 | // cur_promoted_avg + absorb_size / number_of_scavenges_since_last_full_gc. |
duke@435 | 2310 | |
duke@435 | 2311 | size_policy->set_bytes_absorbed_from_eden(absorb_size); |
duke@435 | 2312 | return true; |
duke@435 | 2313 | } |
duke@435 | 2314 | |
duke@435 | 2315 | GCTaskManager* const PSParallelCompact::gc_task_manager() { |
duke@435 | 2316 | assert(ParallelScavengeHeap::gc_task_manager() != NULL, |
duke@435 | 2317 | "shouldn't return NULL"); |
duke@435 | 2318 | return ParallelScavengeHeap::gc_task_manager(); |
duke@435 | 2319 | } |
duke@435 | 2320 | |
duke@435 | 2321 | void PSParallelCompact::marking_phase(ParCompactionManager* cm, |
duke@435 | 2322 | bool maximum_heap_compaction) { |
duke@435 | 2323 | // Recursively traverse all live objects and mark them |
duke@435 | 2324 | EventMark m("1 mark object"); |
duke@435 | 2325 | TraceTime tm("marking phase", print_phases(), true, gclog_or_tty); |
duke@435 | 2326 | |
duke@435 | 2327 | ParallelScavengeHeap* heap = gc_heap(); |
duke@435 | 2328 | uint parallel_gc_threads = heap->gc_task_manager()->workers(); |
duke@435 | 2329 | TaskQueueSetSuper* qset = ParCompactionManager::chunk_array(); |
duke@435 | 2330 | ParallelTaskTerminator terminator(parallel_gc_threads, qset); |
duke@435 | 2331 | |
duke@435 | 2332 | PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm); |
duke@435 | 2333 | PSParallelCompact::FollowStackClosure follow_stack_closure(cm); |
duke@435 | 2334 | |
duke@435 | 2335 | { |
duke@435 | 2336 | TraceTime tm_m("par mark", print_phases(), true, gclog_or_tty); |
duke@435 | 2337 | |
duke@435 | 2338 | GCTaskQueue* q = GCTaskQueue::create(); |
duke@435 | 2339 | |
duke@435 | 2340 | q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::universe)); |
duke@435 | 2341 | q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::jni_handles)); |
duke@435 | 2342 | // We scan the thread roots in parallel |
duke@435 | 2343 | Threads::create_thread_roots_marking_tasks(q); |
duke@435 | 2344 | q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::object_synchronizer)); |
duke@435 | 2345 | q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::flat_profiler)); |
duke@435 | 2346 | q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::management)); |
duke@435 | 2347 | q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::system_dictionary)); |
duke@435 | 2348 | q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::jvmti)); |
duke@435 | 2349 | q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::vm_symbols)); |
duke@435 | 2350 | |
duke@435 | 2351 | if (parallel_gc_threads > 1) { |
duke@435 | 2352 | for (uint j = 0; j < parallel_gc_threads; j++) { |
duke@435 | 2353 | q->enqueue(new StealMarkingTask(&terminator)); |
duke@435 | 2354 | } |
duke@435 | 2355 | } |
duke@435 | 2356 | |
duke@435 | 2357 | WaitForBarrierGCTask* fin = WaitForBarrierGCTask::create(); |
duke@435 | 2358 | q->enqueue(fin); |
duke@435 | 2359 | |
duke@435 | 2360 | gc_task_manager()->add_list(q); |
duke@435 | 2361 | |
duke@435 | 2362 | fin->wait_for(); |
duke@435 | 2363 | |
duke@435 | 2364 | // We have to release the barrier tasks! |
duke@435 | 2365 | WaitForBarrierGCTask::destroy(fin); |
duke@435 | 2366 | } |
duke@435 | 2367 | |
duke@435 | 2368 | // Process reference objects found during marking |
duke@435 | 2369 | { |
duke@435 | 2370 | TraceTime tm_r("reference processing", print_phases(), true, gclog_or_tty); |
duke@435 | 2371 | ReferencePolicy *soft_ref_policy; |
duke@435 | 2372 | if (maximum_heap_compaction) { |
duke@435 | 2373 | soft_ref_policy = new AlwaysClearPolicy(); |
duke@435 | 2374 | } else { |
duke@435 | 2375 | #ifdef COMPILER2 |
duke@435 | 2376 | soft_ref_policy = new LRUMaxHeapPolicy(); |
duke@435 | 2377 | #else |
duke@435 | 2378 | soft_ref_policy = new LRUCurrentHeapPolicy(); |
duke@435 | 2379 | #endif // COMPILER2 |
duke@435 | 2380 | } |
duke@435 | 2381 | assert(soft_ref_policy != NULL, "No soft reference policy"); |
duke@435 | 2382 | if (ref_processor()->processing_is_mt()) { |
duke@435 | 2383 | RefProcTaskExecutor task_executor; |
duke@435 | 2384 | ref_processor()->process_discovered_references( |
duke@435 | 2385 | soft_ref_policy, is_alive_closure(), &mark_and_push_closure, |
duke@435 | 2386 | &follow_stack_closure, &task_executor); |
duke@435 | 2387 | } else { |
duke@435 | 2388 | ref_processor()->process_discovered_references( |
duke@435 | 2389 | soft_ref_policy, is_alive_closure(), &mark_and_push_closure, |
duke@435 | 2390 | &follow_stack_closure, NULL); |
duke@435 | 2391 | } |
duke@435 | 2392 | } |
duke@435 | 2393 | |
duke@435 | 2394 | TraceTime tm_c("class unloading", print_phases(), true, gclog_or_tty); |
duke@435 | 2395 | // Follow system dictionary roots and unload classes. |
duke@435 | 2396 | bool purged_class = SystemDictionary::do_unloading(is_alive_closure()); |
duke@435 | 2397 | |
duke@435 | 2398 | // Follow code cache roots. |
duke@435 | 2399 | CodeCache::do_unloading(is_alive_closure(), &mark_and_push_closure, |
duke@435 | 2400 | purged_class); |
duke@435 | 2401 | follow_stack(cm); // Flush marking stack. |
duke@435 | 2402 | |
duke@435 | 2403 | // Update subklass/sibling/implementor links of live klasses |
duke@435 | 2404 | // revisit_klass_stack is used in follow_weak_klass_links(). |
duke@435 | 2405 | follow_weak_klass_links(cm); |
duke@435 | 2406 | |
duke@435 | 2407 | // Visit symbol and interned string tables and delete unmarked oops |
duke@435 | 2408 | SymbolTable::unlink(is_alive_closure()); |
duke@435 | 2409 | StringTable::unlink(is_alive_closure()); |
duke@435 | 2410 | |
duke@435 | 2411 | assert(cm->marking_stack()->size() == 0, "stack should be empty by now"); |
duke@435 | 2412 | assert(cm->overflow_stack()->is_empty(), "stack should be empty by now"); |
duke@435 | 2413 | } |
duke@435 | 2414 | |
duke@435 | 2415 | // This should be moved to the shared markSweep code! |
duke@435 | 2416 | class PSAlwaysTrueClosure: public BoolObjectClosure { |
duke@435 | 2417 | public: |
duke@435 | 2418 | void do_object(oop p) { ShouldNotReachHere(); } |
duke@435 | 2419 | bool do_object_b(oop p) { return true; } |
duke@435 | 2420 | }; |
duke@435 | 2421 | static PSAlwaysTrueClosure always_true; |
duke@435 | 2422 | |
duke@435 | 2423 | void PSParallelCompact::adjust_roots() { |
duke@435 | 2424 | // Adjust the pointers to reflect the new locations |
duke@435 | 2425 | EventMark m("3 adjust roots"); |
duke@435 | 2426 | TraceTime tm("adjust roots", print_phases(), true, gclog_or_tty); |
duke@435 | 2427 | |
duke@435 | 2428 | // General strong roots. |
duke@435 | 2429 | Universe::oops_do(adjust_root_pointer_closure()); |
duke@435 | 2430 | ReferenceProcessor::oops_do(adjust_root_pointer_closure()); |
duke@435 | 2431 | JNIHandles::oops_do(adjust_root_pointer_closure()); // Global (strong) JNI handles |
duke@435 | 2432 | Threads::oops_do(adjust_root_pointer_closure()); |
duke@435 | 2433 | ObjectSynchronizer::oops_do(adjust_root_pointer_closure()); |
duke@435 | 2434 | FlatProfiler::oops_do(adjust_root_pointer_closure()); |
duke@435 | 2435 | Management::oops_do(adjust_root_pointer_closure()); |
duke@435 | 2436 | JvmtiExport::oops_do(adjust_root_pointer_closure()); |
duke@435 | 2437 | // SO_AllClasses |
duke@435 | 2438 | SystemDictionary::oops_do(adjust_root_pointer_closure()); |
duke@435 | 2439 | vmSymbols::oops_do(adjust_root_pointer_closure()); |
duke@435 | 2440 | |
duke@435 | 2441 | // Now adjust pointers in remaining weak roots. (All of which should |
duke@435 | 2442 | // have been cleared if they pointed to non-surviving objects.) |
duke@435 | 2443 | // Global (weak) JNI handles |
duke@435 | 2444 | JNIHandles::weak_oops_do(&always_true, adjust_root_pointer_closure()); |
duke@435 | 2445 | |
duke@435 | 2446 | CodeCache::oops_do(adjust_pointer_closure()); |
duke@435 | 2447 | SymbolTable::oops_do(adjust_root_pointer_closure()); |
duke@435 | 2448 | StringTable::oops_do(adjust_root_pointer_closure()); |
duke@435 | 2449 | ref_processor()->weak_oops_do(adjust_root_pointer_closure()); |
duke@435 | 2450 | // Roots were visited so references into the young gen in roots |
duke@435 | 2451 | // may have been scanned. Process them also. |
duke@435 | 2452 | // Should the reference processor have a span that excludes |
duke@435 | 2453 | // young gen objects? |
duke@435 | 2454 | PSScavenge::reference_processor()->weak_oops_do( |
duke@435 | 2455 | adjust_root_pointer_closure()); |
duke@435 | 2456 | } |
duke@435 | 2457 | |
duke@435 | 2458 | void PSParallelCompact::compact_perm(ParCompactionManager* cm) { |
duke@435 | 2459 | EventMark m("4 compact perm"); |
duke@435 | 2460 | TraceTime tm("compact perm gen", print_phases(), true, gclog_or_tty); |
duke@435 | 2461 | // trace("4"); |
duke@435 | 2462 | |
duke@435 | 2463 | gc_heap()->perm_gen()->start_array()->reset(); |
duke@435 | 2464 | move_and_update(cm, perm_space_id); |
duke@435 | 2465 | } |
duke@435 | 2466 | |
duke@435 | 2467 | void PSParallelCompact::enqueue_chunk_draining_tasks(GCTaskQueue* q, |
duke@435 | 2468 | uint parallel_gc_threads) { |
duke@435 | 2469 | TraceTime tm("drain task setup", print_phases(), true, gclog_or_tty); |
duke@435 | 2470 | |
duke@435 | 2471 | const unsigned int task_count = MAX2(parallel_gc_threads, 1U); |
duke@435 | 2472 | for (unsigned int j = 0; j < task_count; j++) { |
duke@435 | 2473 | q->enqueue(new DrainStacksCompactionTask()); |
duke@435 | 2474 | } |
duke@435 | 2475 | |
duke@435 | 2476 | // Find all chunks that are available (can be filled immediately) and |
duke@435 | 2477 | // distribute them to the thread stacks. The iteration is done in reverse |
duke@435 | 2478 | // order (high to low) so the chunks will be removed in ascending order. |
duke@435 | 2479 | |
duke@435 | 2480 | const ParallelCompactData& sd = PSParallelCompact::summary_data(); |
duke@435 | 2481 | |
duke@435 | 2482 | size_t fillable_chunks = 0; // A count for diagnostic purposes. |
duke@435 | 2483 | unsigned int which = 0; // The worker thread number. |
duke@435 | 2484 | |
duke@435 | 2485 | for (unsigned int id = to_space_id; id > perm_space_id; --id) { |
duke@435 | 2486 | SpaceInfo* const space_info = _space_info + id; |
duke@435 | 2487 | MutableSpace* const space = space_info->space(); |
duke@435 | 2488 | HeapWord* const new_top = space_info->new_top(); |
duke@435 | 2489 | |
duke@435 | 2490 | const size_t beg_chunk = sd.addr_to_chunk_idx(space_info->dense_prefix()); |
duke@435 | 2491 | const size_t end_chunk = sd.addr_to_chunk_idx(sd.chunk_align_up(new_top)); |
duke@435 | 2492 | assert(end_chunk > 0, "perm gen cannot be empty"); |
duke@435 | 2493 | |
duke@435 | 2494 | for (size_t cur = end_chunk - 1; cur >= beg_chunk; --cur) { |
duke@435 | 2495 | if (sd.chunk(cur)->claim_unsafe()) { |
duke@435 | 2496 | ParCompactionManager* cm = ParCompactionManager::manager_array(which); |
duke@435 | 2497 | cm->save_for_processing(cur); |
duke@435 | 2498 | |
duke@435 | 2499 | if (TraceParallelOldGCCompactionPhase && Verbose) { |
duke@435 | 2500 | const size_t count_mod_8 = fillable_chunks & 7; |
duke@435 | 2501 | if (count_mod_8 == 0) gclog_or_tty->print("fillable: "); |
duke@435 | 2502 | gclog_or_tty->print(" " SIZE_FORMAT_W("7"), cur); |
duke@435 | 2503 | if (count_mod_8 == 7) gclog_or_tty->cr(); |
duke@435 | 2504 | } |
duke@435 | 2505 | |
duke@435 | 2506 | NOT_PRODUCT(++fillable_chunks;) |
duke@435 | 2507 | |
duke@435 | 2508 | // Assign chunks to threads in round-robin fashion. |
duke@435 | 2509 | if (++which == task_count) { |
duke@435 | 2510 | which = 0; |
duke@435 | 2511 | } |
duke@435 | 2512 | } |
duke@435 | 2513 | } |
duke@435 | 2514 | } |
duke@435 | 2515 | |
duke@435 | 2516 | if (TraceParallelOldGCCompactionPhase) { |
duke@435 | 2517 | if (Verbose && (fillable_chunks & 7) != 0) gclog_or_tty->cr(); |
duke@435 | 2518 | gclog_or_tty->print_cr("%u initially fillable chunks", fillable_chunks); |
duke@435 | 2519 | } |
duke@435 | 2520 | } |
duke@435 | 2521 | |
duke@435 | 2522 | #define PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING 4 |
duke@435 | 2523 | |
duke@435 | 2524 | void PSParallelCompact::enqueue_dense_prefix_tasks(GCTaskQueue* q, |
duke@435 | 2525 | uint parallel_gc_threads) { |
duke@435 | 2526 | TraceTime tm("dense prefix task setup", print_phases(), true, gclog_or_tty); |
duke@435 | 2527 | |
duke@435 | 2528 | ParallelCompactData& sd = PSParallelCompact::summary_data(); |
duke@435 | 2529 | |
duke@435 | 2530 | // Iterate over all the spaces adding tasks for updating |
duke@435 | 2531 | // chunks in the dense prefix. Assume that 1 gc thread |
duke@435 | 2532 | // will work on opening the gaps and the remaining gc threads |
duke@435 | 2533 | // will work on the dense prefix. |
duke@435 | 2534 | SpaceId space_id = old_space_id; |
duke@435 | 2535 | while (space_id != last_space_id) { |
duke@435 | 2536 | HeapWord* const dense_prefix_end = _space_info[space_id].dense_prefix(); |
duke@435 | 2537 | const MutableSpace* const space = _space_info[space_id].space(); |
duke@435 | 2538 | |
duke@435 | 2539 | if (dense_prefix_end == space->bottom()) { |
duke@435 | 2540 | // There is no dense prefix for this space. |
duke@435 | 2541 | space_id = next_compaction_space_id(space_id); |
duke@435 | 2542 | continue; |
duke@435 | 2543 | } |
duke@435 | 2544 | |
duke@435 | 2545 | // The dense prefix is before this chunk. |
duke@435 | 2546 | size_t chunk_index_end_dense_prefix = |
duke@435 | 2547 | sd.addr_to_chunk_idx(dense_prefix_end); |
duke@435 | 2548 | ChunkData* const dense_prefix_cp = sd.chunk(chunk_index_end_dense_prefix); |
duke@435 | 2549 | assert(dense_prefix_end == space->end() || |
duke@435 | 2550 | dense_prefix_cp->available() || |
duke@435 | 2551 | dense_prefix_cp->claimed(), |
duke@435 | 2552 | "The chunk after the dense prefix should always be ready to fill"); |
duke@435 | 2553 | |
duke@435 | 2554 | size_t chunk_index_start = sd.addr_to_chunk_idx(space->bottom()); |
duke@435 | 2555 | |
duke@435 | 2556 | // Is there dense prefix work? |
duke@435 | 2557 | size_t total_dense_prefix_chunks = |
duke@435 | 2558 | chunk_index_end_dense_prefix - chunk_index_start; |
duke@435 | 2559 | // How many chunks of the dense prefix should be given to |
duke@435 | 2560 | // each thread? |
duke@435 | 2561 | if (total_dense_prefix_chunks > 0) { |
duke@435 | 2562 | uint tasks_for_dense_prefix = 1; |
duke@435 | 2563 | if (UseParallelDensePrefixUpdate) { |
duke@435 | 2564 | if (total_dense_prefix_chunks <= |
duke@435 | 2565 | (parallel_gc_threads * PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING)) { |
duke@435 | 2566 | // Don't over partition. This assumes that |
duke@435 | 2567 | // PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING is a small integer value |
duke@435 | 2568 | // so there are not many chunks to process. |
duke@435 | 2569 | tasks_for_dense_prefix = parallel_gc_threads; |
duke@435 | 2570 | } else { |
duke@435 | 2571 | // Over partition |
duke@435 | 2572 | tasks_for_dense_prefix = parallel_gc_threads * |
duke@435 | 2573 | PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING; |
duke@435 | 2574 | } |
duke@435 | 2575 | } |
duke@435 | 2576 | size_t chunks_per_thread = total_dense_prefix_chunks / |
duke@435 | 2577 | tasks_for_dense_prefix; |
duke@435 | 2578 | // Give each thread at least 1 chunk. |
duke@435 | 2579 | if (chunks_per_thread == 0) { |
duke@435 | 2580 | chunks_per_thread = 1; |
duke@435 | 2581 | } |
duke@435 | 2582 | |
duke@435 | 2583 | for (uint k = 0; k < tasks_for_dense_prefix; k++) { |
duke@435 | 2584 | if (chunk_index_start >= chunk_index_end_dense_prefix) { |
duke@435 | 2585 | break; |
duke@435 | 2586 | } |
duke@435 | 2587 | // chunk_index_end is not processed |
duke@435 | 2588 | size_t chunk_index_end = MIN2(chunk_index_start + chunks_per_thread, |
duke@435 | 2589 | chunk_index_end_dense_prefix); |
duke@435 | 2590 | q->enqueue(new UpdateDensePrefixTask( |
duke@435 | 2591 | space_id, |
duke@435 | 2592 | chunk_index_start, |
duke@435 | 2593 | chunk_index_end)); |
duke@435 | 2594 | chunk_index_start = chunk_index_end; |
duke@435 | 2595 | } |
duke@435 | 2596 | } |
duke@435 | 2597 | // This gets any part of the dense prefix that did not |
duke@435 | 2598 | // fit evenly. |
duke@435 | 2599 | if (chunk_index_start < chunk_index_end_dense_prefix) { |
duke@435 | 2600 | q->enqueue(new UpdateDensePrefixTask( |
duke@435 | 2601 | space_id, |
duke@435 | 2602 | chunk_index_start, |
duke@435 | 2603 | chunk_index_end_dense_prefix)); |
duke@435 | 2604 | } |
duke@435 | 2605 | space_id = next_compaction_space_id(space_id); |
duke@435 | 2606 | } // End tasks for dense prefix |
duke@435 | 2607 | } |
duke@435 | 2608 | |
duke@435 | 2609 | void PSParallelCompact::enqueue_chunk_stealing_tasks( |
duke@435 | 2610 | GCTaskQueue* q, |
duke@435 | 2611 | ParallelTaskTerminator* terminator_ptr, |
duke@435 | 2612 | uint parallel_gc_threads) { |
duke@435 | 2613 | TraceTime tm("steal task setup", print_phases(), true, gclog_or_tty); |
duke@435 | 2614 | |
duke@435 | 2615 | // Once a thread has drained it's stack, it should try to steal chunks from |
duke@435 | 2616 | // other threads. |
duke@435 | 2617 | if (parallel_gc_threads > 1) { |
duke@435 | 2618 | for (uint j = 0; j < parallel_gc_threads; j++) { |
duke@435 | 2619 | q->enqueue(new StealChunkCompactionTask(terminator_ptr)); |
duke@435 | 2620 | } |
duke@435 | 2621 | } |
duke@435 | 2622 | } |
duke@435 | 2623 | |
duke@435 | 2624 | void PSParallelCompact::compact() { |
duke@435 | 2625 | EventMark m("5 compact"); |
duke@435 | 2626 | // trace("5"); |
duke@435 | 2627 | TraceTime tm("compaction phase", print_phases(), true, gclog_or_tty); |
duke@435 | 2628 | |
duke@435 | 2629 | ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); |
duke@435 | 2630 | assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); |
duke@435 | 2631 | PSOldGen* old_gen = heap->old_gen(); |
duke@435 | 2632 | old_gen->start_array()->reset(); |
duke@435 | 2633 | uint parallel_gc_threads = heap->gc_task_manager()->workers(); |
duke@435 | 2634 | TaskQueueSetSuper* qset = ParCompactionManager::chunk_array(); |
duke@435 | 2635 | ParallelTaskTerminator terminator(parallel_gc_threads, qset); |
duke@435 | 2636 | |
duke@435 | 2637 | GCTaskQueue* q = GCTaskQueue::create(); |
duke@435 | 2638 | enqueue_chunk_draining_tasks(q, parallel_gc_threads); |
duke@435 | 2639 | enqueue_dense_prefix_tasks(q, parallel_gc_threads); |
duke@435 | 2640 | enqueue_chunk_stealing_tasks(q, &terminator, parallel_gc_threads); |
duke@435 | 2641 | |
duke@435 | 2642 | { |
duke@435 | 2643 | TraceTime tm_pc("par compact", print_phases(), true, gclog_or_tty); |
duke@435 | 2644 | |
duke@435 | 2645 | WaitForBarrierGCTask* fin = WaitForBarrierGCTask::create(); |
duke@435 | 2646 | q->enqueue(fin); |
duke@435 | 2647 | |
duke@435 | 2648 | gc_task_manager()->add_list(q); |
duke@435 | 2649 | |
duke@435 | 2650 | fin->wait_for(); |
duke@435 | 2651 | |
duke@435 | 2652 | // We have to release the barrier tasks! |
duke@435 | 2653 | WaitForBarrierGCTask::destroy(fin); |
duke@435 | 2654 | |
duke@435 | 2655 | #ifdef ASSERT |
duke@435 | 2656 | // Verify that all chunks have been processed before the deferred updates. |
duke@435 | 2657 | // Note that perm_space_id is skipped; this type of verification is not |
duke@435 | 2658 | // valid until the perm gen is compacted by chunks. |
duke@435 | 2659 | for (unsigned int id = old_space_id; id < last_space_id; ++id) { |
duke@435 | 2660 | verify_complete(SpaceId(id)); |
duke@435 | 2661 | } |
duke@435 | 2662 | #endif |
duke@435 | 2663 | } |
duke@435 | 2664 | |
duke@435 | 2665 | { |
duke@435 | 2666 | // Update the deferred objects, if any. Any compaction manager can be used. |
duke@435 | 2667 | TraceTime tm_du("deferred updates", print_phases(), true, gclog_or_tty); |
duke@435 | 2668 | ParCompactionManager* cm = ParCompactionManager::manager_array(0); |
duke@435 | 2669 | for (unsigned int id = old_space_id; id < last_space_id; ++id) { |
duke@435 | 2670 | update_deferred_objects(cm, SpaceId(id)); |
duke@435 | 2671 | } |
duke@435 | 2672 | } |
duke@435 | 2673 | } |
duke@435 | 2674 | |
duke@435 | 2675 | #ifdef ASSERT |
duke@435 | 2676 | void PSParallelCompact::verify_complete(SpaceId space_id) { |
duke@435 | 2677 | // All Chunks between space bottom() to new_top() should be marked as filled |
duke@435 | 2678 | // and all Chunks between new_top() and top() should be available (i.e., |
duke@435 | 2679 | // should have been emptied). |
duke@435 | 2680 | ParallelCompactData& sd = summary_data(); |
duke@435 | 2681 | SpaceInfo si = _space_info[space_id]; |
duke@435 | 2682 | HeapWord* new_top_addr = sd.chunk_align_up(si.new_top()); |
duke@435 | 2683 | HeapWord* old_top_addr = sd.chunk_align_up(si.space()->top()); |
duke@435 | 2684 | const size_t beg_chunk = sd.addr_to_chunk_idx(si.space()->bottom()); |
duke@435 | 2685 | const size_t new_top_chunk = sd.addr_to_chunk_idx(new_top_addr); |
duke@435 | 2686 | const size_t old_top_chunk = sd.addr_to_chunk_idx(old_top_addr); |
duke@435 | 2687 | |
duke@435 | 2688 | bool issued_a_warning = false; |
duke@435 | 2689 | |
duke@435 | 2690 | size_t cur_chunk; |
duke@435 | 2691 | for (cur_chunk = beg_chunk; cur_chunk < new_top_chunk; ++cur_chunk) { |
duke@435 | 2692 | const ChunkData* const c = sd.chunk(cur_chunk); |
duke@435 | 2693 | if (!c->completed()) { |
duke@435 | 2694 | warning("chunk " SIZE_FORMAT " not filled: " |
duke@435 | 2695 | "destination_count=" SIZE_FORMAT, |
duke@435 | 2696 | cur_chunk, c->destination_count()); |
duke@435 | 2697 | issued_a_warning = true; |
duke@435 | 2698 | } |
duke@435 | 2699 | } |
duke@435 | 2700 | |
duke@435 | 2701 | for (cur_chunk = new_top_chunk; cur_chunk < old_top_chunk; ++cur_chunk) { |
duke@435 | 2702 | const ChunkData* const c = sd.chunk(cur_chunk); |
duke@435 | 2703 | if (!c->available()) { |
duke@435 | 2704 | warning("chunk " SIZE_FORMAT " not empty: " |
duke@435 | 2705 | "destination_count=" SIZE_FORMAT, |
duke@435 | 2706 | cur_chunk, c->destination_count()); |
duke@435 | 2707 | issued_a_warning = true; |
duke@435 | 2708 | } |
duke@435 | 2709 | } |
duke@435 | 2710 | |
duke@435 | 2711 | if (issued_a_warning) { |
duke@435 | 2712 | print_chunk_ranges(); |
duke@435 | 2713 | } |
duke@435 | 2714 | } |
duke@435 | 2715 | #endif // #ifdef ASSERT |
duke@435 | 2716 | |
duke@435 | 2717 | void PSParallelCompact::compact_serial(ParCompactionManager* cm) { |
duke@435 | 2718 | EventMark m("5 compact serial"); |
duke@435 | 2719 | TraceTime tm("compact serial", print_phases(), true, gclog_or_tty); |
duke@435 | 2720 | |
duke@435 | 2721 | ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); |
duke@435 | 2722 | assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); |
duke@435 | 2723 | |
duke@435 | 2724 | PSYoungGen* young_gen = heap->young_gen(); |
duke@435 | 2725 | PSOldGen* old_gen = heap->old_gen(); |
duke@435 | 2726 | |
duke@435 | 2727 | old_gen->start_array()->reset(); |
duke@435 | 2728 | old_gen->move_and_update(cm); |
duke@435 | 2729 | young_gen->move_and_update(cm); |
duke@435 | 2730 | } |
duke@435 | 2731 | |
duke@435 | 2732 | |
duke@435 | 2733 | void PSParallelCompact::follow_stack(ParCompactionManager* cm) { |
duke@435 | 2734 | while(!cm->overflow_stack()->is_empty()) { |
duke@435 | 2735 | oop obj = cm->overflow_stack()->pop(); |
duke@435 | 2736 | obj->follow_contents(cm); |
duke@435 | 2737 | } |
duke@435 | 2738 | |
duke@435 | 2739 | oop obj; |
duke@435 | 2740 | // obj is a reference!!! |
duke@435 | 2741 | while (cm->marking_stack()->pop_local(obj)) { |
duke@435 | 2742 | // It would be nice to assert about the type of objects we might |
duke@435 | 2743 | // pop, but they can come from anywhere, unfortunately. |
duke@435 | 2744 | obj->follow_contents(cm); |
duke@435 | 2745 | } |
duke@435 | 2746 | } |
duke@435 | 2747 | |
duke@435 | 2748 | void |
duke@435 | 2749 | PSParallelCompact::follow_weak_klass_links(ParCompactionManager* serial_cm) { |
duke@435 | 2750 | // All klasses on the revisit stack are marked at this point. |
duke@435 | 2751 | // Update and follow all subklass, sibling and implementor links. |
duke@435 | 2752 | for (uint i = 0; i < ParallelGCThreads+1; i++) { |
duke@435 | 2753 | ParCompactionManager* cm = ParCompactionManager::manager_array(i); |
duke@435 | 2754 | KeepAliveClosure keep_alive_closure(cm); |
duke@435 | 2755 | for (int i = 0; i < cm->revisit_klass_stack()->length(); i++) { |
duke@435 | 2756 | cm->revisit_klass_stack()->at(i)->follow_weak_klass_links( |
duke@435 | 2757 | is_alive_closure(), |
duke@435 | 2758 | &keep_alive_closure); |
duke@435 | 2759 | } |
duke@435 | 2760 | follow_stack(cm); |
duke@435 | 2761 | } |
duke@435 | 2762 | } |
duke@435 | 2763 | |
duke@435 | 2764 | void |
duke@435 | 2765 | PSParallelCompact::revisit_weak_klass_link(ParCompactionManager* cm, Klass* k) { |
duke@435 | 2766 | cm->revisit_klass_stack()->push(k); |
duke@435 | 2767 | } |
duke@435 | 2768 | |
duke@435 | 2769 | #ifdef VALIDATE_MARK_SWEEP |
duke@435 | 2770 | |
coleenp@548 | 2771 | void PSParallelCompact::track_adjusted_pointer(void* p, bool isroot) { |
duke@435 | 2772 | if (!ValidateMarkSweep) |
duke@435 | 2773 | return; |
duke@435 | 2774 | |
duke@435 | 2775 | if (!isroot) { |
duke@435 | 2776 | if (_pointer_tracking) { |
duke@435 | 2777 | guarantee(_adjusted_pointers->contains(p), "should have seen this pointer"); |
duke@435 | 2778 | _adjusted_pointers->remove(p); |
duke@435 | 2779 | } |
duke@435 | 2780 | } else { |
duke@435 | 2781 | ptrdiff_t index = _root_refs_stack->find(p); |
duke@435 | 2782 | if (index != -1) { |
duke@435 | 2783 | int l = _root_refs_stack->length(); |
duke@435 | 2784 | if (l > 0 && l - 1 != index) { |
coleenp@548 | 2785 | void* last = _root_refs_stack->pop(); |
duke@435 | 2786 | assert(last != p, "should be different"); |
duke@435 | 2787 | _root_refs_stack->at_put(index, last); |
duke@435 | 2788 | } else { |
duke@435 | 2789 | _root_refs_stack->remove(p); |
duke@435 | 2790 | } |
duke@435 | 2791 | } |
duke@435 | 2792 | } |
duke@435 | 2793 | } |
duke@435 | 2794 | |
duke@435 | 2795 | |
coleenp@548 | 2796 | void PSParallelCompact::check_adjust_pointer(void* p) { |
duke@435 | 2797 | _adjusted_pointers->push(p); |
duke@435 | 2798 | } |
duke@435 | 2799 | |
duke@435 | 2800 | |
duke@435 | 2801 | class AdjusterTracker: public OopClosure { |
duke@435 | 2802 | public: |
duke@435 | 2803 | AdjusterTracker() {}; |
coleenp@548 | 2804 | void do_oop(oop* o) { PSParallelCompact::check_adjust_pointer(o); } |
coleenp@548 | 2805 | void do_oop(narrowOop* o) { PSParallelCompact::check_adjust_pointer(o); } |
duke@435 | 2806 | }; |
duke@435 | 2807 | |
duke@435 | 2808 | |
duke@435 | 2809 | void PSParallelCompact::track_interior_pointers(oop obj) { |
duke@435 | 2810 | if (ValidateMarkSweep) { |
duke@435 | 2811 | _adjusted_pointers->clear(); |
duke@435 | 2812 | _pointer_tracking = true; |
duke@435 | 2813 | |
duke@435 | 2814 | AdjusterTracker checker; |
duke@435 | 2815 | obj->oop_iterate(&checker); |
duke@435 | 2816 | } |
duke@435 | 2817 | } |
duke@435 | 2818 | |
duke@435 | 2819 | |
duke@435 | 2820 | void PSParallelCompact::check_interior_pointers() { |
duke@435 | 2821 | if (ValidateMarkSweep) { |
duke@435 | 2822 | _pointer_tracking = false; |
duke@435 | 2823 | guarantee(_adjusted_pointers->length() == 0, "should have processed the same pointers"); |
duke@435 | 2824 | } |
duke@435 | 2825 | } |
duke@435 | 2826 | |
duke@435 | 2827 | |
duke@435 | 2828 | void PSParallelCompact::reset_live_oop_tracking(bool at_perm) { |
duke@435 | 2829 | if (ValidateMarkSweep) { |
duke@435 | 2830 | guarantee((size_t)_live_oops->length() == _live_oops_index, "should be at end of live oops"); |
duke@435 | 2831 | _live_oops_index = at_perm ? _live_oops_index_at_perm : 0; |
duke@435 | 2832 | } |
duke@435 | 2833 | } |
duke@435 | 2834 | |
duke@435 | 2835 | |
duke@435 | 2836 | void PSParallelCompact::register_live_oop(oop p, size_t size) { |
duke@435 | 2837 | if (ValidateMarkSweep) { |
duke@435 | 2838 | _live_oops->push(p); |
duke@435 | 2839 | _live_oops_size->push(size); |
duke@435 | 2840 | _live_oops_index++; |
duke@435 | 2841 | } |
duke@435 | 2842 | } |
duke@435 | 2843 | |
duke@435 | 2844 | void PSParallelCompact::validate_live_oop(oop p, size_t size) { |
duke@435 | 2845 | if (ValidateMarkSweep) { |
duke@435 | 2846 | oop obj = _live_oops->at((int)_live_oops_index); |
duke@435 | 2847 | guarantee(obj == p, "should be the same object"); |
duke@435 | 2848 | guarantee(_live_oops_size->at((int)_live_oops_index) == size, "should be the same size"); |
duke@435 | 2849 | _live_oops_index++; |
duke@435 | 2850 | } |
duke@435 | 2851 | } |
duke@435 | 2852 | |
duke@435 | 2853 | void PSParallelCompact::live_oop_moved_to(HeapWord* q, size_t size, |
duke@435 | 2854 | HeapWord* compaction_top) { |
duke@435 | 2855 | assert(oop(q)->forwardee() == NULL || oop(q)->forwardee() == oop(compaction_top), |
duke@435 | 2856 | "should be moved to forwarded location"); |
duke@435 | 2857 | if (ValidateMarkSweep) { |
duke@435 | 2858 | PSParallelCompact::validate_live_oop(oop(q), size); |
duke@435 | 2859 | _live_oops_moved_to->push(oop(compaction_top)); |
duke@435 | 2860 | } |
duke@435 | 2861 | if (RecordMarkSweepCompaction) { |
duke@435 | 2862 | _cur_gc_live_oops->push(q); |
duke@435 | 2863 | _cur_gc_live_oops_moved_to->push(compaction_top); |
duke@435 | 2864 | _cur_gc_live_oops_size->push(size); |
duke@435 | 2865 | } |
duke@435 | 2866 | } |
duke@435 | 2867 | |
duke@435 | 2868 | |
duke@435 | 2869 | void PSParallelCompact::compaction_complete() { |
duke@435 | 2870 | if (RecordMarkSweepCompaction) { |
duke@435 | 2871 | GrowableArray<HeapWord*>* _tmp_live_oops = _cur_gc_live_oops; |
duke@435 | 2872 | GrowableArray<HeapWord*>* _tmp_live_oops_moved_to = _cur_gc_live_oops_moved_to; |
duke@435 | 2873 | GrowableArray<size_t> * _tmp_live_oops_size = _cur_gc_live_oops_size; |
duke@435 | 2874 | |
duke@435 | 2875 | _cur_gc_live_oops = _last_gc_live_oops; |
duke@435 | 2876 | _cur_gc_live_oops_moved_to = _last_gc_live_oops_moved_to; |
duke@435 | 2877 | _cur_gc_live_oops_size = _last_gc_live_oops_size; |
duke@435 | 2878 | _last_gc_live_oops = _tmp_live_oops; |
duke@435 | 2879 | _last_gc_live_oops_moved_to = _tmp_live_oops_moved_to; |
duke@435 | 2880 | _last_gc_live_oops_size = _tmp_live_oops_size; |
duke@435 | 2881 | } |
duke@435 | 2882 | } |
duke@435 | 2883 | |
duke@435 | 2884 | |
duke@435 | 2885 | void PSParallelCompact::print_new_location_of_heap_address(HeapWord* q) { |
duke@435 | 2886 | if (!RecordMarkSweepCompaction) { |
duke@435 | 2887 | tty->print_cr("Requires RecordMarkSweepCompaction to be enabled"); |
duke@435 | 2888 | return; |
duke@435 | 2889 | } |
duke@435 | 2890 | |
duke@435 | 2891 | if (_last_gc_live_oops == NULL) { |
duke@435 | 2892 | tty->print_cr("No compaction information gathered yet"); |
duke@435 | 2893 | return; |
duke@435 | 2894 | } |
duke@435 | 2895 | |
duke@435 | 2896 | for (int i = 0; i < _last_gc_live_oops->length(); i++) { |
duke@435 | 2897 | HeapWord* old_oop = _last_gc_live_oops->at(i); |
duke@435 | 2898 | size_t sz = _last_gc_live_oops_size->at(i); |
duke@435 | 2899 | if (old_oop <= q && q < (old_oop + sz)) { |
duke@435 | 2900 | HeapWord* new_oop = _last_gc_live_oops_moved_to->at(i); |
duke@435 | 2901 | size_t offset = (q - old_oop); |
duke@435 | 2902 | tty->print_cr("Address " PTR_FORMAT, q); |
duke@435 | 2903 | tty->print_cr(" Was in oop " PTR_FORMAT ", size %d, at offset %d", old_oop, sz, offset); |
duke@435 | 2904 | tty->print_cr(" Now in oop " PTR_FORMAT ", actual address " PTR_FORMAT, new_oop, new_oop + offset); |
duke@435 | 2905 | return; |
duke@435 | 2906 | } |
duke@435 | 2907 | } |
duke@435 | 2908 | |
duke@435 | 2909 | tty->print_cr("Address " PTR_FORMAT " not found in live oop information from last GC", q); |
duke@435 | 2910 | } |
duke@435 | 2911 | #endif //VALIDATE_MARK_SWEEP |
duke@435 | 2912 | |
duke@435 | 2913 | // Update interior oops in the ranges of chunks [beg_chunk, end_chunk). |
duke@435 | 2914 | void |
duke@435 | 2915 | PSParallelCompact::update_and_deadwood_in_dense_prefix(ParCompactionManager* cm, |
duke@435 | 2916 | SpaceId space_id, |
duke@435 | 2917 | size_t beg_chunk, |
duke@435 | 2918 | size_t end_chunk) { |
duke@435 | 2919 | ParallelCompactData& sd = summary_data(); |
duke@435 | 2920 | ParMarkBitMap* const mbm = mark_bitmap(); |
duke@435 | 2921 | |
duke@435 | 2922 | HeapWord* beg_addr = sd.chunk_to_addr(beg_chunk); |
duke@435 | 2923 | HeapWord* const end_addr = sd.chunk_to_addr(end_chunk); |
duke@435 | 2924 | assert(beg_chunk <= end_chunk, "bad chunk range"); |
duke@435 | 2925 | assert(end_addr <= dense_prefix(space_id), "not in the dense prefix"); |
duke@435 | 2926 | |
duke@435 | 2927 | #ifdef ASSERT |
duke@435 | 2928 | // Claim the chunks to avoid triggering an assert when they are marked as |
duke@435 | 2929 | // filled. |
duke@435 | 2930 | for (size_t claim_chunk = beg_chunk; claim_chunk < end_chunk; ++claim_chunk) { |
duke@435 | 2931 | assert(sd.chunk(claim_chunk)->claim_unsafe(), "claim() failed"); |
duke@435 | 2932 | } |
duke@435 | 2933 | #endif // #ifdef ASSERT |
duke@435 | 2934 | |
duke@435 | 2935 | if (beg_addr != space(space_id)->bottom()) { |
duke@435 | 2936 | // Find the first live object or block of dead space that *starts* in this |
duke@435 | 2937 | // range of chunks. If a partial object crosses onto the chunk, skip it; it |
duke@435 | 2938 | // will be marked for 'deferred update' when the object head is processed. |
duke@435 | 2939 | // If dead space crosses onto the chunk, it is also skipped; it will be |
duke@435 | 2940 | // filled when the prior chunk is processed. If neither of those apply, the |
duke@435 | 2941 | // first word in the chunk is the start of a live object or dead space. |
duke@435 | 2942 | assert(beg_addr > space(space_id)->bottom(), "sanity"); |
duke@435 | 2943 | const ChunkData* const cp = sd.chunk(beg_chunk); |
duke@435 | 2944 | if (cp->partial_obj_size() != 0) { |
duke@435 | 2945 | beg_addr = sd.partial_obj_end(beg_chunk); |
duke@435 | 2946 | } else if (dead_space_crosses_boundary(cp, mbm->addr_to_bit(beg_addr))) { |
duke@435 | 2947 | beg_addr = mbm->find_obj_beg(beg_addr, end_addr); |
duke@435 | 2948 | } |
duke@435 | 2949 | } |
duke@435 | 2950 | |
duke@435 | 2951 | if (beg_addr < end_addr) { |
duke@435 | 2952 | // A live object or block of dead space starts in this range of Chunks. |
duke@435 | 2953 | HeapWord* const dense_prefix_end = dense_prefix(space_id); |
duke@435 | 2954 | |
duke@435 | 2955 | // Create closures and iterate. |
duke@435 | 2956 | UpdateOnlyClosure update_closure(mbm, cm, space_id); |
duke@435 | 2957 | FillClosure fill_closure(cm, space_id); |
duke@435 | 2958 | ParMarkBitMap::IterationStatus status; |
duke@435 | 2959 | status = mbm->iterate(&update_closure, &fill_closure, beg_addr, end_addr, |
duke@435 | 2960 | dense_prefix_end); |
duke@435 | 2961 | if (status == ParMarkBitMap::incomplete) { |
duke@435 | 2962 | update_closure.do_addr(update_closure.source()); |
duke@435 | 2963 | } |
duke@435 | 2964 | } |
duke@435 | 2965 | |
duke@435 | 2966 | // Mark the chunks as filled. |
duke@435 | 2967 | ChunkData* const beg_cp = sd.chunk(beg_chunk); |
duke@435 | 2968 | ChunkData* const end_cp = sd.chunk(end_chunk); |
duke@435 | 2969 | for (ChunkData* cp = beg_cp; cp < end_cp; ++cp) { |
duke@435 | 2970 | cp->set_completed(); |
duke@435 | 2971 | } |
duke@435 | 2972 | } |
duke@435 | 2973 | |
duke@435 | 2974 | // Return the SpaceId for the space containing addr. If addr is not in the |
duke@435 | 2975 | // heap, last_space_id is returned. In debug mode it expects the address to be |
duke@435 | 2976 | // in the heap and asserts such. |
duke@435 | 2977 | PSParallelCompact::SpaceId PSParallelCompact::space_id(HeapWord* addr) { |
duke@435 | 2978 | assert(Universe::heap()->is_in_reserved(addr), "addr not in the heap"); |
duke@435 | 2979 | |
duke@435 | 2980 | for (unsigned int id = perm_space_id; id < last_space_id; ++id) { |
duke@435 | 2981 | if (_space_info[id].space()->contains(addr)) { |
duke@435 | 2982 | return SpaceId(id); |
duke@435 | 2983 | } |
duke@435 | 2984 | } |
duke@435 | 2985 | |
duke@435 | 2986 | assert(false, "no space contains the addr"); |
duke@435 | 2987 | return last_space_id; |
duke@435 | 2988 | } |
duke@435 | 2989 | |
duke@435 | 2990 | void PSParallelCompact::update_deferred_objects(ParCompactionManager* cm, |
duke@435 | 2991 | SpaceId id) { |
duke@435 | 2992 | assert(id < last_space_id, "bad space id"); |
duke@435 | 2993 | |
duke@435 | 2994 | ParallelCompactData& sd = summary_data(); |
duke@435 | 2995 | const SpaceInfo* const space_info = _space_info + id; |
duke@435 | 2996 | ObjectStartArray* const start_array = space_info->start_array(); |
duke@435 | 2997 | |
duke@435 | 2998 | const MutableSpace* const space = space_info->space(); |
duke@435 | 2999 | assert(space_info->dense_prefix() >= space->bottom(), "dense_prefix not set"); |
duke@435 | 3000 | HeapWord* const beg_addr = space_info->dense_prefix(); |
duke@435 | 3001 | HeapWord* const end_addr = sd.chunk_align_up(space_info->new_top()); |
duke@435 | 3002 | |
duke@435 | 3003 | const ChunkData* const beg_chunk = sd.addr_to_chunk_ptr(beg_addr); |
duke@435 | 3004 | const ChunkData* const end_chunk = sd.addr_to_chunk_ptr(end_addr); |
duke@435 | 3005 | const ChunkData* cur_chunk; |
duke@435 | 3006 | for (cur_chunk = beg_chunk; cur_chunk < end_chunk; ++cur_chunk) { |
duke@435 | 3007 | HeapWord* const addr = cur_chunk->deferred_obj_addr(); |
duke@435 | 3008 | if (addr != NULL) { |
duke@435 | 3009 | if (start_array != NULL) { |
duke@435 | 3010 | start_array->allocate_block(addr); |
duke@435 | 3011 | } |
duke@435 | 3012 | oop(addr)->update_contents(cm); |
duke@435 | 3013 | assert(oop(addr)->is_oop_or_null(), "should be an oop now"); |
duke@435 | 3014 | } |
duke@435 | 3015 | } |
duke@435 | 3016 | } |
duke@435 | 3017 | |
duke@435 | 3018 | // Skip over count live words starting from beg, and return the address of the |
duke@435 | 3019 | // next live word. Unless marked, the word corresponding to beg is assumed to |
duke@435 | 3020 | // be dead. Callers must either ensure beg does not correspond to the middle of |
duke@435 | 3021 | // an object, or account for those live words in some other way. Callers must |
duke@435 | 3022 | // also ensure that there are enough live words in the range [beg, end) to skip. |
duke@435 | 3023 | HeapWord* |
duke@435 | 3024 | PSParallelCompact::skip_live_words(HeapWord* beg, HeapWord* end, size_t count) |
duke@435 | 3025 | { |
duke@435 | 3026 | assert(count > 0, "sanity"); |
duke@435 | 3027 | |
duke@435 | 3028 | ParMarkBitMap* m = mark_bitmap(); |
duke@435 | 3029 | idx_t bits_to_skip = m->words_to_bits(count); |
duke@435 | 3030 | idx_t cur_beg = m->addr_to_bit(beg); |
duke@435 | 3031 | const idx_t search_end = BitMap::word_align_up(m->addr_to_bit(end)); |
duke@435 | 3032 | |
duke@435 | 3033 | do { |
duke@435 | 3034 | cur_beg = m->find_obj_beg(cur_beg, search_end); |
duke@435 | 3035 | idx_t cur_end = m->find_obj_end(cur_beg, search_end); |
duke@435 | 3036 | const size_t obj_bits = cur_end - cur_beg + 1; |
duke@435 | 3037 | if (obj_bits > bits_to_skip) { |
duke@435 | 3038 | return m->bit_to_addr(cur_beg + bits_to_skip); |
duke@435 | 3039 | } |
duke@435 | 3040 | bits_to_skip -= obj_bits; |
duke@435 | 3041 | cur_beg = cur_end + 1; |
duke@435 | 3042 | } while (bits_to_skip > 0); |
duke@435 | 3043 | |
duke@435 | 3044 | // Skipping the desired number of words landed just past the end of an object. |
duke@435 | 3045 | // Find the start of the next object. |
duke@435 | 3046 | cur_beg = m->find_obj_beg(cur_beg, search_end); |
duke@435 | 3047 | assert(cur_beg < m->addr_to_bit(end), "not enough live words to skip"); |
duke@435 | 3048 | return m->bit_to_addr(cur_beg); |
duke@435 | 3049 | } |
duke@435 | 3050 | |
duke@435 | 3051 | HeapWord* |
duke@435 | 3052 | PSParallelCompact::first_src_addr(HeapWord* const dest_addr, |
duke@435 | 3053 | size_t src_chunk_idx) |
duke@435 | 3054 | { |
duke@435 | 3055 | ParMarkBitMap* const bitmap = mark_bitmap(); |
duke@435 | 3056 | const ParallelCompactData& sd = summary_data(); |
duke@435 | 3057 | const size_t ChunkSize = ParallelCompactData::ChunkSize; |
duke@435 | 3058 | |
duke@435 | 3059 | assert(sd.is_chunk_aligned(dest_addr), "not aligned"); |
duke@435 | 3060 | |
duke@435 | 3061 | const ChunkData* const src_chunk_ptr = sd.chunk(src_chunk_idx); |
duke@435 | 3062 | const size_t partial_obj_size = src_chunk_ptr->partial_obj_size(); |
duke@435 | 3063 | HeapWord* const src_chunk_destination = src_chunk_ptr->destination(); |
duke@435 | 3064 | |
duke@435 | 3065 | assert(dest_addr >= src_chunk_destination, "wrong src chunk"); |
duke@435 | 3066 | assert(src_chunk_ptr->data_size() > 0, "src chunk cannot be empty"); |
duke@435 | 3067 | |
duke@435 | 3068 | HeapWord* const src_chunk_beg = sd.chunk_to_addr(src_chunk_idx); |
duke@435 | 3069 | HeapWord* const src_chunk_end = src_chunk_beg + ChunkSize; |
duke@435 | 3070 | |
duke@435 | 3071 | HeapWord* addr = src_chunk_beg; |
duke@435 | 3072 | if (dest_addr == src_chunk_destination) { |
duke@435 | 3073 | // Return the first live word in the source chunk. |
duke@435 | 3074 | if (partial_obj_size == 0) { |
duke@435 | 3075 | addr = bitmap->find_obj_beg(addr, src_chunk_end); |
duke@435 | 3076 | assert(addr < src_chunk_end, "no objects start in src chunk"); |
duke@435 | 3077 | } |
duke@435 | 3078 | return addr; |
duke@435 | 3079 | } |
duke@435 | 3080 | |
duke@435 | 3081 | // Must skip some live data. |
duke@435 | 3082 | size_t words_to_skip = dest_addr - src_chunk_destination; |
duke@435 | 3083 | assert(src_chunk_ptr->data_size() > words_to_skip, "wrong src chunk"); |
duke@435 | 3084 | |
duke@435 | 3085 | if (partial_obj_size >= words_to_skip) { |
duke@435 | 3086 | // All the live words to skip are part of the partial object. |
duke@435 | 3087 | addr += words_to_skip; |
duke@435 | 3088 | if (partial_obj_size == words_to_skip) { |
duke@435 | 3089 | // Find the first live word past the partial object. |
duke@435 | 3090 | addr = bitmap->find_obj_beg(addr, src_chunk_end); |
duke@435 | 3091 | assert(addr < src_chunk_end, "wrong src chunk"); |
duke@435 | 3092 | } |
duke@435 | 3093 | return addr; |
duke@435 | 3094 | } |
duke@435 | 3095 | |
duke@435 | 3096 | // Skip over the partial object (if any). |
duke@435 | 3097 | if (partial_obj_size != 0) { |
duke@435 | 3098 | words_to_skip -= partial_obj_size; |
duke@435 | 3099 | addr += partial_obj_size; |
duke@435 | 3100 | } |
duke@435 | 3101 | |
duke@435 | 3102 | // Skip over live words due to objects that start in the chunk. |
duke@435 | 3103 | addr = skip_live_words(addr, src_chunk_end, words_to_skip); |
duke@435 | 3104 | assert(addr < src_chunk_end, "wrong src chunk"); |
duke@435 | 3105 | return addr; |
duke@435 | 3106 | } |
duke@435 | 3107 | |
duke@435 | 3108 | void PSParallelCompact::decrement_destination_counts(ParCompactionManager* cm, |
duke@435 | 3109 | size_t beg_chunk, |
duke@435 | 3110 | HeapWord* end_addr) |
duke@435 | 3111 | { |
duke@435 | 3112 | ParallelCompactData& sd = summary_data(); |
duke@435 | 3113 | ChunkData* const beg = sd.chunk(beg_chunk); |
duke@435 | 3114 | HeapWord* const end_addr_aligned_up = sd.chunk_align_up(end_addr); |
duke@435 | 3115 | ChunkData* const end = sd.addr_to_chunk_ptr(end_addr_aligned_up); |
duke@435 | 3116 | size_t cur_idx = beg_chunk; |
duke@435 | 3117 | for (ChunkData* cur = beg; cur < end; ++cur, ++cur_idx) { |
duke@435 | 3118 | assert(cur->data_size() > 0, "chunk must have live data"); |
duke@435 | 3119 | cur->decrement_destination_count(); |
duke@435 | 3120 | if (cur_idx <= cur->source_chunk() && cur->available() && cur->claim()) { |
duke@435 | 3121 | cm->save_for_processing(cur_idx); |
duke@435 | 3122 | } |
duke@435 | 3123 | } |
duke@435 | 3124 | } |
duke@435 | 3125 | |
duke@435 | 3126 | size_t PSParallelCompact::next_src_chunk(MoveAndUpdateClosure& closure, |
duke@435 | 3127 | SpaceId& src_space_id, |
duke@435 | 3128 | HeapWord*& src_space_top, |
duke@435 | 3129 | HeapWord* end_addr) |
duke@435 | 3130 | { |
duke@435 | 3131 | typedef ParallelCompactData::ChunkData ChunkData; |
duke@435 | 3132 | |
duke@435 | 3133 | ParallelCompactData& sd = PSParallelCompact::summary_data(); |
duke@435 | 3134 | const size_t chunk_size = ParallelCompactData::ChunkSize; |
duke@435 | 3135 | |
duke@435 | 3136 | size_t src_chunk_idx = 0; |
duke@435 | 3137 | |
duke@435 | 3138 | // Skip empty chunks (if any) up to the top of the space. |
duke@435 | 3139 | HeapWord* const src_aligned_up = sd.chunk_align_up(end_addr); |
duke@435 | 3140 | ChunkData* src_chunk_ptr = sd.addr_to_chunk_ptr(src_aligned_up); |
duke@435 | 3141 | HeapWord* const top_aligned_up = sd.chunk_align_up(src_space_top); |
duke@435 | 3142 | const ChunkData* const top_chunk_ptr = sd.addr_to_chunk_ptr(top_aligned_up); |
duke@435 | 3143 | while (src_chunk_ptr < top_chunk_ptr && src_chunk_ptr->data_size() == 0) { |
duke@435 | 3144 | ++src_chunk_ptr; |
duke@435 | 3145 | } |
duke@435 | 3146 | |
duke@435 | 3147 | if (src_chunk_ptr < top_chunk_ptr) { |
duke@435 | 3148 | // The next source chunk is in the current space. Update src_chunk_idx and |
duke@435 | 3149 | // the source address to match src_chunk_ptr. |
duke@435 | 3150 | src_chunk_idx = sd.chunk(src_chunk_ptr); |
duke@435 | 3151 | HeapWord* const src_chunk_addr = sd.chunk_to_addr(src_chunk_idx); |
duke@435 | 3152 | if (src_chunk_addr > closure.source()) { |
duke@435 | 3153 | closure.set_source(src_chunk_addr); |
duke@435 | 3154 | } |
duke@435 | 3155 | return src_chunk_idx; |
duke@435 | 3156 | } |
duke@435 | 3157 | |
duke@435 | 3158 | // Switch to a new source space and find the first non-empty chunk. |
duke@435 | 3159 | unsigned int space_id = src_space_id + 1; |
duke@435 | 3160 | assert(space_id < last_space_id, "not enough spaces"); |
duke@435 | 3161 | |
duke@435 | 3162 | HeapWord* const destination = closure.destination(); |
duke@435 | 3163 | |
duke@435 | 3164 | do { |
duke@435 | 3165 | MutableSpace* space = _space_info[space_id].space(); |
duke@435 | 3166 | HeapWord* const bottom = space->bottom(); |
duke@435 | 3167 | const ChunkData* const bottom_cp = sd.addr_to_chunk_ptr(bottom); |
duke@435 | 3168 | |
duke@435 | 3169 | // Iterate over the spaces that do not compact into themselves. |
duke@435 | 3170 | if (bottom_cp->destination() != bottom) { |
duke@435 | 3171 | HeapWord* const top_aligned_up = sd.chunk_align_up(space->top()); |
duke@435 | 3172 | const ChunkData* const top_cp = sd.addr_to_chunk_ptr(top_aligned_up); |
duke@435 | 3173 | |
duke@435 | 3174 | for (const ChunkData* src_cp = bottom_cp; src_cp < top_cp; ++src_cp) { |
duke@435 | 3175 | if (src_cp->live_obj_size() > 0) { |
duke@435 | 3176 | // Found it. |
duke@435 | 3177 | assert(src_cp->destination() == destination, |
duke@435 | 3178 | "first live obj in the space must match the destination"); |
duke@435 | 3179 | assert(src_cp->partial_obj_size() == 0, |
duke@435 | 3180 | "a space cannot begin with a partial obj"); |
duke@435 | 3181 | |
duke@435 | 3182 | src_space_id = SpaceId(space_id); |
duke@435 | 3183 | src_space_top = space->top(); |
duke@435 | 3184 | const size_t src_chunk_idx = sd.chunk(src_cp); |
duke@435 | 3185 | closure.set_source(sd.chunk_to_addr(src_chunk_idx)); |
duke@435 | 3186 | return src_chunk_idx; |
duke@435 | 3187 | } else { |
duke@435 | 3188 | assert(src_cp->data_size() == 0, "sanity"); |
duke@435 | 3189 | } |
duke@435 | 3190 | } |
duke@435 | 3191 | } |
duke@435 | 3192 | } while (++space_id < last_space_id); |
duke@435 | 3193 | |
duke@435 | 3194 | assert(false, "no source chunk was found"); |
duke@435 | 3195 | return 0; |
duke@435 | 3196 | } |
duke@435 | 3197 | |
duke@435 | 3198 | void PSParallelCompact::fill_chunk(ParCompactionManager* cm, size_t chunk_idx) |
duke@435 | 3199 | { |
duke@435 | 3200 | typedef ParMarkBitMap::IterationStatus IterationStatus; |
duke@435 | 3201 | const size_t ChunkSize = ParallelCompactData::ChunkSize; |
duke@435 | 3202 | ParMarkBitMap* const bitmap = mark_bitmap(); |
duke@435 | 3203 | ParallelCompactData& sd = summary_data(); |
duke@435 | 3204 | ChunkData* const chunk_ptr = sd.chunk(chunk_idx); |
duke@435 | 3205 | |
duke@435 | 3206 | // Get the items needed to construct the closure. |
duke@435 | 3207 | HeapWord* dest_addr = sd.chunk_to_addr(chunk_idx); |
duke@435 | 3208 | SpaceId dest_space_id = space_id(dest_addr); |
duke@435 | 3209 | ObjectStartArray* start_array = _space_info[dest_space_id].start_array(); |
duke@435 | 3210 | HeapWord* new_top = _space_info[dest_space_id].new_top(); |
duke@435 | 3211 | assert(dest_addr < new_top, "sanity"); |
duke@435 | 3212 | const size_t words = MIN2(pointer_delta(new_top, dest_addr), ChunkSize); |
duke@435 | 3213 | |
duke@435 | 3214 | // Get the source chunk and related info. |
duke@435 | 3215 | size_t src_chunk_idx = chunk_ptr->source_chunk(); |
duke@435 | 3216 | SpaceId src_space_id = space_id(sd.chunk_to_addr(src_chunk_idx)); |
duke@435 | 3217 | HeapWord* src_space_top = _space_info[src_space_id].space()->top(); |
duke@435 | 3218 | |
duke@435 | 3219 | MoveAndUpdateClosure closure(bitmap, cm, start_array, dest_addr, words); |
duke@435 | 3220 | closure.set_source(first_src_addr(dest_addr, src_chunk_idx)); |
duke@435 | 3221 | |
duke@435 | 3222 | // Adjust src_chunk_idx to prepare for decrementing destination counts (the |
duke@435 | 3223 | // destination count is not decremented when a chunk is copied to itself). |
duke@435 | 3224 | if (src_chunk_idx == chunk_idx) { |
duke@435 | 3225 | src_chunk_idx += 1; |
duke@435 | 3226 | } |
duke@435 | 3227 | |
duke@435 | 3228 | if (bitmap->is_unmarked(closure.source())) { |
duke@435 | 3229 | // The first source word is in the middle of an object; copy the remainder |
duke@435 | 3230 | // of the object or as much as will fit. The fact that pointer updates were |
duke@435 | 3231 | // deferred will be noted when the object header is processed. |
duke@435 | 3232 | HeapWord* const old_src_addr = closure.source(); |
duke@435 | 3233 | closure.copy_partial_obj(); |
duke@435 | 3234 | if (closure.is_full()) { |
duke@435 | 3235 | decrement_destination_counts(cm, src_chunk_idx, closure.source()); |
duke@435 | 3236 | chunk_ptr->set_deferred_obj_addr(NULL); |
duke@435 | 3237 | chunk_ptr->set_completed(); |
duke@435 | 3238 | return; |
duke@435 | 3239 | } |
duke@435 | 3240 | |
duke@435 | 3241 | HeapWord* const end_addr = sd.chunk_align_down(closure.source()); |
duke@435 | 3242 | if (sd.chunk_align_down(old_src_addr) != end_addr) { |
duke@435 | 3243 | // The partial object was copied from more than one source chunk. |
duke@435 | 3244 | decrement_destination_counts(cm, src_chunk_idx, end_addr); |
duke@435 | 3245 | |
duke@435 | 3246 | // Move to the next source chunk, possibly switching spaces as well. All |
duke@435 | 3247 | // args except end_addr may be modified. |
duke@435 | 3248 | src_chunk_idx = next_src_chunk(closure, src_space_id, src_space_top, |
duke@435 | 3249 | end_addr); |
duke@435 | 3250 | } |
duke@435 | 3251 | } |
duke@435 | 3252 | |
duke@435 | 3253 | do { |
duke@435 | 3254 | HeapWord* const cur_addr = closure.source(); |
duke@435 | 3255 | HeapWord* const end_addr = MIN2(sd.chunk_align_up(cur_addr + 1), |
duke@435 | 3256 | src_space_top); |
duke@435 | 3257 | IterationStatus status = bitmap->iterate(&closure, cur_addr, end_addr); |
duke@435 | 3258 | |
duke@435 | 3259 | if (status == ParMarkBitMap::incomplete) { |
duke@435 | 3260 | // The last obj that starts in the source chunk does not end in the chunk. |
duke@435 | 3261 | assert(closure.source() < end_addr, "sanity") |
duke@435 | 3262 | HeapWord* const obj_beg = closure.source(); |
duke@435 | 3263 | HeapWord* const range_end = MIN2(obj_beg + closure.words_remaining(), |
duke@435 | 3264 | src_space_top); |
duke@435 | 3265 | HeapWord* const obj_end = bitmap->find_obj_end(obj_beg, range_end); |
duke@435 | 3266 | if (obj_end < range_end) { |
duke@435 | 3267 | // The end was found; the entire object will fit. |
duke@435 | 3268 | status = closure.do_addr(obj_beg, bitmap->obj_size(obj_beg, obj_end)); |
duke@435 | 3269 | assert(status != ParMarkBitMap::would_overflow, "sanity"); |
duke@435 | 3270 | } else { |
duke@435 | 3271 | // The end was not found; the object will not fit. |
duke@435 | 3272 | assert(range_end < src_space_top, "obj cannot cross space boundary"); |
duke@435 | 3273 | status = ParMarkBitMap::would_overflow; |
duke@435 | 3274 | } |
duke@435 | 3275 | } |
duke@435 | 3276 | |
duke@435 | 3277 | if (status == ParMarkBitMap::would_overflow) { |
duke@435 | 3278 | // The last object did not fit. Note that interior oop updates were |
duke@435 | 3279 | // deferred, then copy enough of the object to fill the chunk. |
duke@435 | 3280 | chunk_ptr->set_deferred_obj_addr(closure.destination()); |
duke@435 | 3281 | status = closure.copy_until_full(); // copies from closure.source() |
duke@435 | 3282 | |
duke@435 | 3283 | decrement_destination_counts(cm, src_chunk_idx, closure.source()); |
duke@435 | 3284 | chunk_ptr->set_completed(); |
duke@435 | 3285 | return; |
duke@435 | 3286 | } |
duke@435 | 3287 | |
duke@435 | 3288 | if (status == ParMarkBitMap::full) { |
duke@435 | 3289 | decrement_destination_counts(cm, src_chunk_idx, closure.source()); |
duke@435 | 3290 | chunk_ptr->set_deferred_obj_addr(NULL); |
duke@435 | 3291 | chunk_ptr->set_completed(); |
duke@435 | 3292 | return; |
duke@435 | 3293 | } |
duke@435 | 3294 | |
duke@435 | 3295 | decrement_destination_counts(cm, src_chunk_idx, end_addr); |
duke@435 | 3296 | |
duke@435 | 3297 | // Move to the next source chunk, possibly switching spaces as well. All |
duke@435 | 3298 | // args except end_addr may be modified. |
duke@435 | 3299 | src_chunk_idx = next_src_chunk(closure, src_space_id, src_space_top, |
duke@435 | 3300 | end_addr); |
duke@435 | 3301 | } while (true); |
duke@435 | 3302 | } |
duke@435 | 3303 | |
duke@435 | 3304 | void |
duke@435 | 3305 | PSParallelCompact::move_and_update(ParCompactionManager* cm, SpaceId space_id) { |
duke@435 | 3306 | const MutableSpace* sp = space(space_id); |
duke@435 | 3307 | if (sp->is_empty()) { |
duke@435 | 3308 | return; |
duke@435 | 3309 | } |
duke@435 | 3310 | |
duke@435 | 3311 | ParallelCompactData& sd = PSParallelCompact::summary_data(); |
duke@435 | 3312 | ParMarkBitMap* const bitmap = mark_bitmap(); |
duke@435 | 3313 | HeapWord* const dp_addr = dense_prefix(space_id); |
duke@435 | 3314 | HeapWord* beg_addr = sp->bottom(); |
duke@435 | 3315 | HeapWord* end_addr = sp->top(); |
duke@435 | 3316 | |
duke@435 | 3317 | #ifdef ASSERT |
duke@435 | 3318 | assert(beg_addr <= dp_addr && dp_addr <= end_addr, "bad dense prefix"); |
duke@435 | 3319 | if (cm->should_verify_only()) { |
duke@435 | 3320 | VerifyUpdateClosure verify_update(cm, sp); |
duke@435 | 3321 | bitmap->iterate(&verify_update, beg_addr, end_addr); |
duke@435 | 3322 | return; |
duke@435 | 3323 | } |
duke@435 | 3324 | |
duke@435 | 3325 | if (cm->should_reset_only()) { |
duke@435 | 3326 | ResetObjectsClosure reset_objects(cm); |
duke@435 | 3327 | bitmap->iterate(&reset_objects, beg_addr, end_addr); |
duke@435 | 3328 | return; |
duke@435 | 3329 | } |
duke@435 | 3330 | #endif |
duke@435 | 3331 | |
duke@435 | 3332 | const size_t beg_chunk = sd.addr_to_chunk_idx(beg_addr); |
duke@435 | 3333 | const size_t dp_chunk = sd.addr_to_chunk_idx(dp_addr); |
duke@435 | 3334 | if (beg_chunk < dp_chunk) { |
duke@435 | 3335 | update_and_deadwood_in_dense_prefix(cm, space_id, beg_chunk, dp_chunk); |
duke@435 | 3336 | } |
duke@435 | 3337 | |
duke@435 | 3338 | // The destination of the first live object that starts in the chunk is one |
duke@435 | 3339 | // past the end of the partial object entering the chunk (if any). |
duke@435 | 3340 | HeapWord* const dest_addr = sd.partial_obj_end(dp_chunk); |
duke@435 | 3341 | HeapWord* const new_top = _space_info[space_id].new_top(); |
duke@435 | 3342 | assert(new_top >= dest_addr, "bad new_top value"); |
duke@435 | 3343 | const size_t words = pointer_delta(new_top, dest_addr); |
duke@435 | 3344 | |
duke@435 | 3345 | if (words > 0) { |
duke@435 | 3346 | ObjectStartArray* start_array = _space_info[space_id].start_array(); |
duke@435 | 3347 | MoveAndUpdateClosure closure(bitmap, cm, start_array, dest_addr, words); |
duke@435 | 3348 | |
duke@435 | 3349 | ParMarkBitMap::IterationStatus status; |
duke@435 | 3350 | status = bitmap->iterate(&closure, dest_addr, end_addr); |
duke@435 | 3351 | assert(status == ParMarkBitMap::full, "iteration not complete"); |
duke@435 | 3352 | assert(bitmap->find_obj_beg(closure.source(), end_addr) == end_addr, |
duke@435 | 3353 | "live objects skipped because closure is full"); |
duke@435 | 3354 | } |
duke@435 | 3355 | } |
duke@435 | 3356 | |
duke@435 | 3357 | jlong PSParallelCompact::millis_since_last_gc() { |
duke@435 | 3358 | jlong ret_val = os::javaTimeMillis() - _time_of_last_gc; |
duke@435 | 3359 | // XXX See note in genCollectedHeap::millis_since_last_gc(). |
duke@435 | 3360 | if (ret_val < 0) { |
duke@435 | 3361 | NOT_PRODUCT(warning("time warp: %d", ret_val);) |
duke@435 | 3362 | return 0; |
duke@435 | 3363 | } |
duke@435 | 3364 | return ret_val; |
duke@435 | 3365 | } |
duke@435 | 3366 | |
duke@435 | 3367 | void PSParallelCompact::reset_millis_since_last_gc() { |
duke@435 | 3368 | _time_of_last_gc = os::javaTimeMillis(); |
duke@435 | 3369 | } |
duke@435 | 3370 | |
duke@435 | 3371 | ParMarkBitMap::IterationStatus MoveAndUpdateClosure::copy_until_full() |
duke@435 | 3372 | { |
duke@435 | 3373 | if (source() != destination()) { |
duke@435 | 3374 | assert(source() > destination(), "must copy to the left"); |
duke@435 | 3375 | Copy::aligned_conjoint_words(source(), destination(), words_remaining()); |
duke@435 | 3376 | } |
duke@435 | 3377 | update_state(words_remaining()); |
duke@435 | 3378 | assert(is_full(), "sanity"); |
duke@435 | 3379 | return ParMarkBitMap::full; |
duke@435 | 3380 | } |
duke@435 | 3381 | |
duke@435 | 3382 | void MoveAndUpdateClosure::copy_partial_obj() |
duke@435 | 3383 | { |
duke@435 | 3384 | size_t words = words_remaining(); |
duke@435 | 3385 | |
duke@435 | 3386 | HeapWord* const range_end = MIN2(source() + words, bitmap()->region_end()); |
duke@435 | 3387 | HeapWord* const end_addr = bitmap()->find_obj_end(source(), range_end); |
duke@435 | 3388 | if (end_addr < range_end) { |
duke@435 | 3389 | words = bitmap()->obj_size(source(), end_addr); |
duke@435 | 3390 | } |
duke@435 | 3391 | |
duke@435 | 3392 | // This test is necessary; if omitted, the pointer updates to a partial object |
duke@435 | 3393 | // that crosses the dense prefix boundary could be overwritten. |
duke@435 | 3394 | if (source() != destination()) { |
duke@435 | 3395 | assert(source() > destination(), "must copy to the left"); |
duke@435 | 3396 | Copy::aligned_conjoint_words(source(), destination(), words); |
duke@435 | 3397 | } |
duke@435 | 3398 | update_state(words); |
duke@435 | 3399 | } |
duke@435 | 3400 | |
duke@435 | 3401 | ParMarkBitMapClosure::IterationStatus |
duke@435 | 3402 | MoveAndUpdateClosure::do_addr(HeapWord* addr, size_t words) { |
duke@435 | 3403 | assert(destination() != NULL, "sanity"); |
duke@435 | 3404 | assert(bitmap()->obj_size(addr) == words, "bad size"); |
duke@435 | 3405 | |
duke@435 | 3406 | _source = addr; |
duke@435 | 3407 | assert(PSParallelCompact::summary_data().calc_new_pointer(source()) == |
duke@435 | 3408 | destination(), "wrong destination"); |
duke@435 | 3409 | |
duke@435 | 3410 | if (words > words_remaining()) { |
duke@435 | 3411 | return ParMarkBitMap::would_overflow; |
duke@435 | 3412 | } |
duke@435 | 3413 | |
duke@435 | 3414 | // The start_array must be updated even if the object is not moving. |
duke@435 | 3415 | if (_start_array != NULL) { |
duke@435 | 3416 | _start_array->allocate_block(destination()); |
duke@435 | 3417 | } |
duke@435 | 3418 | |
duke@435 | 3419 | if (destination() != source()) { |
duke@435 | 3420 | assert(destination() < source(), "must copy to the left"); |
duke@435 | 3421 | Copy::aligned_conjoint_words(source(), destination(), words); |
duke@435 | 3422 | } |
duke@435 | 3423 | |
duke@435 | 3424 | oop moved_oop = (oop) destination(); |
duke@435 | 3425 | moved_oop->update_contents(compaction_manager()); |
duke@435 | 3426 | assert(moved_oop->is_oop_or_null(), "Object should be whole at this point"); |
duke@435 | 3427 | |
duke@435 | 3428 | update_state(words); |
duke@435 | 3429 | assert(destination() == (HeapWord*)moved_oop + moved_oop->size(), "sanity"); |
duke@435 | 3430 | return is_full() ? ParMarkBitMap::full : ParMarkBitMap::incomplete; |
duke@435 | 3431 | } |
duke@435 | 3432 | |
duke@435 | 3433 | UpdateOnlyClosure::UpdateOnlyClosure(ParMarkBitMap* mbm, |
duke@435 | 3434 | ParCompactionManager* cm, |
duke@435 | 3435 | PSParallelCompact::SpaceId space_id) : |
duke@435 | 3436 | ParMarkBitMapClosure(mbm, cm), |
duke@435 | 3437 | _space_id(space_id), |
duke@435 | 3438 | _start_array(PSParallelCompact::start_array(space_id)) |
duke@435 | 3439 | { |
duke@435 | 3440 | } |
duke@435 | 3441 | |
duke@435 | 3442 | // Updates the references in the object to their new values. |
duke@435 | 3443 | ParMarkBitMapClosure::IterationStatus |
duke@435 | 3444 | UpdateOnlyClosure::do_addr(HeapWord* addr, size_t words) { |
duke@435 | 3445 | do_addr(addr); |
duke@435 | 3446 | return ParMarkBitMap::incomplete; |
duke@435 | 3447 | } |
duke@435 | 3448 | |
duke@435 | 3449 | BitBlockUpdateClosure::BitBlockUpdateClosure(ParMarkBitMap* mbm, |
duke@435 | 3450 | ParCompactionManager* cm, |
duke@435 | 3451 | size_t chunk_index) : |
duke@435 | 3452 | ParMarkBitMapClosure(mbm, cm), |
duke@435 | 3453 | _live_data_left(0), |
duke@435 | 3454 | _cur_block(0) { |
duke@435 | 3455 | _chunk_start = |
duke@435 | 3456 | PSParallelCompact::summary_data().chunk_to_addr(chunk_index); |
duke@435 | 3457 | _chunk_end = |
duke@435 | 3458 | PSParallelCompact::summary_data().chunk_to_addr(chunk_index) + |
duke@435 | 3459 | ParallelCompactData::ChunkSize; |
duke@435 | 3460 | _chunk_index = chunk_index; |
duke@435 | 3461 | _cur_block = |
duke@435 | 3462 | PSParallelCompact::summary_data().addr_to_block_idx(_chunk_start); |
duke@435 | 3463 | } |
duke@435 | 3464 | |
duke@435 | 3465 | bool BitBlockUpdateClosure::chunk_contains_cur_block() { |
duke@435 | 3466 | return ParallelCompactData::chunk_contains_block(_chunk_index, _cur_block); |
duke@435 | 3467 | } |
duke@435 | 3468 | |
duke@435 | 3469 | void BitBlockUpdateClosure::reset_chunk(size_t chunk_index) { |
duke@435 | 3470 | DEBUG_ONLY(ParallelCompactData::BlockData::set_cur_phase(7);) |
duke@435 | 3471 | ParallelCompactData& sd = PSParallelCompact::summary_data(); |
duke@435 | 3472 | _chunk_index = chunk_index; |
duke@435 | 3473 | _live_data_left = 0; |
duke@435 | 3474 | _chunk_start = sd.chunk_to_addr(chunk_index); |
duke@435 | 3475 | _chunk_end = sd.chunk_to_addr(chunk_index) + ParallelCompactData::ChunkSize; |
duke@435 | 3476 | |
duke@435 | 3477 | // The first block in this chunk |
duke@435 | 3478 | size_t first_block = sd.addr_to_block_idx(_chunk_start); |
duke@435 | 3479 | size_t partial_live_size = sd.chunk(chunk_index)->partial_obj_size(); |
duke@435 | 3480 | |
duke@435 | 3481 | // Set the offset to 0. By definition it should have that value |
duke@435 | 3482 | // but it may have been written while processing an earlier chunk. |
duke@435 | 3483 | if (partial_live_size == 0) { |
duke@435 | 3484 | // No live object extends onto the chunk. The first bit |
duke@435 | 3485 | // in the bit map for the first chunk must be a start bit. |
duke@435 | 3486 | // Although there may not be any marked bits, it is safe |
duke@435 | 3487 | // to set it as a start bit. |
duke@435 | 3488 | sd.block(first_block)->set_start_bit_offset(0); |
duke@435 | 3489 | sd.block(first_block)->set_first_is_start_bit(true); |
duke@435 | 3490 | } else if (sd.partial_obj_ends_in_block(first_block)) { |
duke@435 | 3491 | sd.block(first_block)->set_end_bit_offset(0); |
duke@435 | 3492 | sd.block(first_block)->set_first_is_start_bit(false); |
duke@435 | 3493 | } else { |
duke@435 | 3494 | // The partial object extends beyond the first block. |
duke@435 | 3495 | // There is no object starting in the first block |
duke@435 | 3496 | // so the offset and bit parity are not needed. |
duke@435 | 3497 | // Set the the bit parity to start bit so assertions |
duke@435 | 3498 | // work when not bit is found. |
duke@435 | 3499 | sd.block(first_block)->set_end_bit_offset(0); |
duke@435 | 3500 | sd.block(first_block)->set_first_is_start_bit(false); |
duke@435 | 3501 | } |
duke@435 | 3502 | _cur_block = first_block; |
duke@435 | 3503 | #ifdef ASSERT |
duke@435 | 3504 | if (sd.block(first_block)->first_is_start_bit()) { |
duke@435 | 3505 | assert(!sd.partial_obj_ends_in_block(first_block), |
duke@435 | 3506 | "Partial object cannot end in first block"); |
duke@435 | 3507 | } |
duke@435 | 3508 | |
duke@435 | 3509 | if (PrintGCDetails && Verbose) { |
duke@435 | 3510 | if (partial_live_size == 1) { |
duke@435 | 3511 | gclog_or_tty->print_cr("first_block " PTR_FORMAT |
duke@435 | 3512 | " _offset " PTR_FORMAT |
duke@435 | 3513 | " _first_is_start_bit %d", |
duke@435 | 3514 | first_block, |
duke@435 | 3515 | sd.block(first_block)->raw_offset(), |
duke@435 | 3516 | sd.block(first_block)->first_is_start_bit()); |
duke@435 | 3517 | } |
duke@435 | 3518 | } |
duke@435 | 3519 | #endif |
duke@435 | 3520 | DEBUG_ONLY(ParallelCompactData::BlockData::set_cur_phase(17);) |
duke@435 | 3521 | } |
duke@435 | 3522 | |
duke@435 | 3523 | // This method is called when a object has been found (both beginning |
duke@435 | 3524 | // and end of the object) in the range of iteration. This method is |
duke@435 | 3525 | // calculating the words of live data to the left of a block. That live |
duke@435 | 3526 | // data includes any object starting to the left of the block (i.e., |
duke@435 | 3527 | // the live-data-to-the-left of block AAA will include the full size |
duke@435 | 3528 | // of any object entering AAA). |
duke@435 | 3529 | |
duke@435 | 3530 | ParMarkBitMapClosure::IterationStatus |
duke@435 | 3531 | BitBlockUpdateClosure::do_addr(HeapWord* addr, size_t words) { |
duke@435 | 3532 | // add the size to the block data. |
duke@435 | 3533 | HeapWord* obj = addr; |
duke@435 | 3534 | ParallelCompactData& sd = PSParallelCompact::summary_data(); |
duke@435 | 3535 | |
duke@435 | 3536 | assert(bitmap()->obj_size(obj) == words, "bad size"); |
duke@435 | 3537 | assert(_chunk_start <= obj, "object is not in chunk"); |
duke@435 | 3538 | assert(obj + words <= _chunk_end, "object is not in chunk"); |
duke@435 | 3539 | |
duke@435 | 3540 | // Update the live data to the left |
duke@435 | 3541 | size_t prev_live_data_left = _live_data_left; |
duke@435 | 3542 | _live_data_left = _live_data_left + words; |
duke@435 | 3543 | |
duke@435 | 3544 | // Is this object in the current block. |
duke@435 | 3545 | size_t block_of_obj = sd.addr_to_block_idx(obj); |
duke@435 | 3546 | size_t block_of_obj_last = sd.addr_to_block_idx(obj + words - 1); |
duke@435 | 3547 | HeapWord* block_of_obj_last_addr = sd.block_to_addr(block_of_obj_last); |
duke@435 | 3548 | if (_cur_block < block_of_obj) { |
duke@435 | 3549 | |
duke@435 | 3550 | // |
duke@435 | 3551 | // No object crossed the block boundary and this object was found |
duke@435 | 3552 | // on the other side of the block boundary. Update the offset for |
duke@435 | 3553 | // the new block with the data size that does not include this object. |
duke@435 | 3554 | // |
duke@435 | 3555 | // The first bit in block_of_obj is a start bit except in the |
duke@435 | 3556 | // case where the partial object for the chunk extends into |
duke@435 | 3557 | // this block. |
duke@435 | 3558 | if (sd.partial_obj_ends_in_block(block_of_obj)) { |
duke@435 | 3559 | sd.block(block_of_obj)->set_end_bit_offset(prev_live_data_left); |
duke@435 | 3560 | } else { |
duke@435 | 3561 | sd.block(block_of_obj)->set_start_bit_offset(prev_live_data_left); |
duke@435 | 3562 | } |
duke@435 | 3563 | |
duke@435 | 3564 | // Does this object pass beyond the its block? |
duke@435 | 3565 | if (block_of_obj < block_of_obj_last) { |
duke@435 | 3566 | // Object crosses block boundary. Two blocks need to be udpated: |
duke@435 | 3567 | // the current block where the object started |
duke@435 | 3568 | // the block where the object ends |
duke@435 | 3569 | // |
duke@435 | 3570 | // The offset for blocks with no objects starting in them |
duke@435 | 3571 | // (e.g., blocks between _cur_block and block_of_obj_last) |
duke@435 | 3572 | // should not be needed. |
duke@435 | 3573 | // Note that block_of_obj_last may be in another chunk. If so, |
duke@435 | 3574 | // it should be overwritten later. This is a problem (writting |
duke@435 | 3575 | // into a block in a later chunk) for parallel execution. |
duke@435 | 3576 | assert(obj < block_of_obj_last_addr, |
duke@435 | 3577 | "Object should start in previous block"); |
duke@435 | 3578 | |
duke@435 | 3579 | // obj is crossing into block_of_obj_last so the first bit |
duke@435 | 3580 | // is and end bit. |
duke@435 | 3581 | sd.block(block_of_obj_last)->set_end_bit_offset(_live_data_left); |
duke@435 | 3582 | |
duke@435 | 3583 | _cur_block = block_of_obj_last; |
duke@435 | 3584 | } else { |
duke@435 | 3585 | // _first_is_start_bit has already been set correctly |
duke@435 | 3586 | // in the if-then-else above so don't reset it here. |
duke@435 | 3587 | _cur_block = block_of_obj; |
duke@435 | 3588 | } |
duke@435 | 3589 | } else { |
duke@435 | 3590 | // The current block only changes if the object extends beyound |
duke@435 | 3591 | // the block it starts in. |
duke@435 | 3592 | // |
duke@435 | 3593 | // The object starts in the current block. |
duke@435 | 3594 | // Does this object pass beyond the end of it? |
duke@435 | 3595 | if (block_of_obj < block_of_obj_last) { |
duke@435 | 3596 | // Object crosses block boundary. |
duke@435 | 3597 | // See note above on possible blocks between block_of_obj and |
duke@435 | 3598 | // block_of_obj_last |
duke@435 | 3599 | assert(obj < block_of_obj_last_addr, |
duke@435 | 3600 | "Object should start in previous block"); |
duke@435 | 3601 | |
duke@435 | 3602 | sd.block(block_of_obj_last)->set_end_bit_offset(_live_data_left); |
duke@435 | 3603 | |
duke@435 | 3604 | _cur_block = block_of_obj_last; |
duke@435 | 3605 | } |
duke@435 | 3606 | } |
duke@435 | 3607 | |
duke@435 | 3608 | // Return incomplete if there are more blocks to be done. |
duke@435 | 3609 | if (chunk_contains_cur_block()) { |
duke@435 | 3610 | return ParMarkBitMap::incomplete; |
duke@435 | 3611 | } |
duke@435 | 3612 | return ParMarkBitMap::complete; |
duke@435 | 3613 | } |
duke@435 | 3614 | |
duke@435 | 3615 | // Verify the new location using the forwarding pointer |
duke@435 | 3616 | // from MarkSweep::mark_sweep_phase2(). Set the mark_word |
duke@435 | 3617 | // to the initial value. |
duke@435 | 3618 | ParMarkBitMapClosure::IterationStatus |
duke@435 | 3619 | PSParallelCompact::VerifyUpdateClosure::do_addr(HeapWord* addr, size_t words) { |
duke@435 | 3620 | // The second arg (words) is not used. |
duke@435 | 3621 | oop obj = (oop) addr; |
duke@435 | 3622 | HeapWord* forwarding_ptr = (HeapWord*) obj->mark()->decode_pointer(); |
duke@435 | 3623 | HeapWord* new_pointer = summary_data().calc_new_pointer(obj); |
duke@435 | 3624 | if (forwarding_ptr == NULL) { |
duke@435 | 3625 | // The object is dead or not moving. |
duke@435 | 3626 | assert(bitmap()->is_unmarked(obj) || (new_pointer == (HeapWord*) obj), |
duke@435 | 3627 | "Object liveness is wrong."); |
duke@435 | 3628 | return ParMarkBitMap::incomplete; |
duke@435 | 3629 | } |
duke@435 | 3630 | assert(UseParallelOldGCDensePrefix || |
duke@435 | 3631 | (HeapMaximumCompactionInterval > 1) || |
duke@435 | 3632 | (MarkSweepAlwaysCompactCount > 1) || |
duke@435 | 3633 | (forwarding_ptr == new_pointer), |
duke@435 | 3634 | "Calculation of new location is incorrect"); |
duke@435 | 3635 | return ParMarkBitMap::incomplete; |
duke@435 | 3636 | } |
duke@435 | 3637 | |
duke@435 | 3638 | // Reset objects modified for debug checking. |
duke@435 | 3639 | ParMarkBitMapClosure::IterationStatus |
duke@435 | 3640 | PSParallelCompact::ResetObjectsClosure::do_addr(HeapWord* addr, size_t words) { |
duke@435 | 3641 | // The second arg (words) is not used. |
duke@435 | 3642 | oop obj = (oop) addr; |
duke@435 | 3643 | obj->init_mark(); |
duke@435 | 3644 | return ParMarkBitMap::incomplete; |
duke@435 | 3645 | } |
duke@435 | 3646 | |
duke@435 | 3647 | // Prepare for compaction. This method is executed once |
duke@435 | 3648 | // (i.e., by a single thread) before compaction. |
duke@435 | 3649 | // Save the updated location of the intArrayKlassObj for |
duke@435 | 3650 | // filling holes in the dense prefix. |
duke@435 | 3651 | void PSParallelCompact::compact_prologue() { |
duke@435 | 3652 | _updated_int_array_klass_obj = (klassOop) |
duke@435 | 3653 | summary_data().calc_new_pointer(Universe::intArrayKlassObj()); |
duke@435 | 3654 | } |
duke@435 | 3655 | |
duke@435 | 3656 | // The initial implementation of this method created a field |
duke@435 | 3657 | // _next_compaction_space_id in SpaceInfo and initialized |
duke@435 | 3658 | // that field in SpaceInfo::initialize_space_info(). That |
duke@435 | 3659 | // required that _next_compaction_space_id be declared a |
duke@435 | 3660 | // SpaceId in SpaceInfo and that would have required that |
duke@435 | 3661 | // either SpaceId be declared in a separate class or that |
duke@435 | 3662 | // it be declared in SpaceInfo. It didn't seem consistent |
duke@435 | 3663 | // to declare it in SpaceInfo (didn't really fit logically). |
duke@435 | 3664 | // Alternatively, defining a separate class to define SpaceId |
duke@435 | 3665 | // seem excessive. This implementation is simple and localizes |
duke@435 | 3666 | // the knowledge. |
duke@435 | 3667 | |
duke@435 | 3668 | PSParallelCompact::SpaceId |
duke@435 | 3669 | PSParallelCompact::next_compaction_space_id(SpaceId id) { |
duke@435 | 3670 | assert(id < last_space_id, "id out of range"); |
duke@435 | 3671 | switch (id) { |
duke@435 | 3672 | case perm_space_id : |
duke@435 | 3673 | return last_space_id; |
duke@435 | 3674 | case old_space_id : |
duke@435 | 3675 | return eden_space_id; |
duke@435 | 3676 | case eden_space_id : |
duke@435 | 3677 | return from_space_id; |
duke@435 | 3678 | case from_space_id : |
duke@435 | 3679 | return to_space_id; |
duke@435 | 3680 | case to_space_id : |
duke@435 | 3681 | return last_space_id; |
duke@435 | 3682 | default: |
duke@435 | 3683 | assert(false, "Bad space id"); |
duke@435 | 3684 | return last_space_id; |
duke@435 | 3685 | } |
duke@435 | 3686 | } |
duke@435 | 3687 | |
duke@435 | 3688 | // Here temporarily for debugging |
duke@435 | 3689 | #ifdef ASSERT |
duke@435 | 3690 | size_t ParallelCompactData::block_idx(BlockData* block) { |
duke@435 | 3691 | size_t index = pointer_delta(block, |
duke@435 | 3692 | PSParallelCompact::summary_data()._block_data, sizeof(BlockData)); |
duke@435 | 3693 | return index; |
duke@435 | 3694 | } |
duke@435 | 3695 | #endif |