src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp

Fri, 11 Jul 2008 16:11:50 -0700

author
jcoomes
date
Fri, 11 Jul 2008 16:11:50 -0700
changeset 701
2214b226b7f0
parent 700
f88815ca1af1
child 704
850fdf70db2b
permissions
-rw-r--r--

6724367: par compact could clear less young gen summary data
Reviewed-by: jmasa, apetrusenko

duke@435 1 /*
duke@435 2 * Copyright 2005-2007 Sun Microsystems, Inc. All Rights Reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
duke@435 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
duke@435 20 * CA 95054 USA or visit www.sun.com if you need additional information or
duke@435 21 * have any questions.
duke@435 22 *
duke@435 23 */
duke@435 24
duke@435 25 #include "incls/_precompiled.incl"
duke@435 26 #include "incls/_psParallelCompact.cpp.incl"
duke@435 27
duke@435 28 #include <math.h>
duke@435 29
duke@435 30 // All sizes are in HeapWords.
duke@435 31 const size_t ParallelCompactData::Log2ChunkSize = 9; // 512 words
duke@435 32 const size_t ParallelCompactData::ChunkSize = (size_t)1 << Log2ChunkSize;
duke@435 33 const size_t ParallelCompactData::ChunkSizeBytes = ChunkSize << LogHeapWordSize;
duke@435 34 const size_t ParallelCompactData::ChunkSizeOffsetMask = ChunkSize - 1;
duke@435 35 const size_t ParallelCompactData::ChunkAddrOffsetMask = ChunkSizeBytes - 1;
duke@435 36 const size_t ParallelCompactData::ChunkAddrMask = ~ChunkAddrOffsetMask;
duke@435 37
duke@435 38 // 32-bit: 128 words covers 4 bitmap words
duke@435 39 // 64-bit: 128 words covers 2 bitmap words
duke@435 40 const size_t ParallelCompactData::Log2BlockSize = 7; // 128 words
duke@435 41 const size_t ParallelCompactData::BlockSize = (size_t)1 << Log2BlockSize;
duke@435 42 const size_t ParallelCompactData::BlockOffsetMask = BlockSize - 1;
duke@435 43 const size_t ParallelCompactData::BlockMask = ~BlockOffsetMask;
duke@435 44
duke@435 45 const size_t ParallelCompactData::BlocksPerChunk = ChunkSize / BlockSize;
duke@435 46
duke@435 47 const ParallelCompactData::ChunkData::chunk_sz_t
duke@435 48 ParallelCompactData::ChunkData::dc_shift = 27;
duke@435 49
duke@435 50 const ParallelCompactData::ChunkData::chunk_sz_t
duke@435 51 ParallelCompactData::ChunkData::dc_mask = ~0U << dc_shift;
duke@435 52
duke@435 53 const ParallelCompactData::ChunkData::chunk_sz_t
duke@435 54 ParallelCompactData::ChunkData::dc_one = 0x1U << dc_shift;
duke@435 55
duke@435 56 const ParallelCompactData::ChunkData::chunk_sz_t
duke@435 57 ParallelCompactData::ChunkData::los_mask = ~dc_mask;
duke@435 58
duke@435 59 const ParallelCompactData::ChunkData::chunk_sz_t
duke@435 60 ParallelCompactData::ChunkData::dc_claimed = 0x8U << dc_shift;
duke@435 61
duke@435 62 const ParallelCompactData::ChunkData::chunk_sz_t
duke@435 63 ParallelCompactData::ChunkData::dc_completed = 0xcU << dc_shift;
duke@435 64
duke@435 65 #ifdef ASSERT
duke@435 66 short ParallelCompactData::BlockData::_cur_phase = 0;
duke@435 67 #endif
duke@435 68
duke@435 69 SpaceInfo PSParallelCompact::_space_info[PSParallelCompact::last_space_id];
duke@435 70 bool PSParallelCompact::_print_phases = false;
duke@435 71
duke@435 72 ReferenceProcessor* PSParallelCompact::_ref_processor = NULL;
duke@435 73 klassOop PSParallelCompact::_updated_int_array_klass_obj = NULL;
duke@435 74
duke@435 75 double PSParallelCompact::_dwl_mean;
duke@435 76 double PSParallelCompact::_dwl_std_dev;
duke@435 77 double PSParallelCompact::_dwl_first_term;
duke@435 78 double PSParallelCompact::_dwl_adjustment;
duke@435 79 #ifdef ASSERT
duke@435 80 bool PSParallelCompact::_dwl_initialized = false;
duke@435 81 #endif // #ifdef ASSERT
duke@435 82
duke@435 83 #ifdef VALIDATE_MARK_SWEEP
coleenp@548 84 GrowableArray<void*>* PSParallelCompact::_root_refs_stack = NULL;
duke@435 85 GrowableArray<oop> * PSParallelCompact::_live_oops = NULL;
duke@435 86 GrowableArray<oop> * PSParallelCompact::_live_oops_moved_to = NULL;
duke@435 87 GrowableArray<size_t>* PSParallelCompact::_live_oops_size = NULL;
duke@435 88 size_t PSParallelCompact::_live_oops_index = 0;
duke@435 89 size_t PSParallelCompact::_live_oops_index_at_perm = 0;
coleenp@548 90 GrowableArray<void*>* PSParallelCompact::_other_refs_stack = NULL;
coleenp@548 91 GrowableArray<void*>* PSParallelCompact::_adjusted_pointers = NULL;
duke@435 92 bool PSParallelCompact::_pointer_tracking = false;
duke@435 93 bool PSParallelCompact::_root_tracking = true;
duke@435 94
duke@435 95 GrowableArray<HeapWord*>* PSParallelCompact::_cur_gc_live_oops = NULL;
duke@435 96 GrowableArray<HeapWord*>* PSParallelCompact::_cur_gc_live_oops_moved_to = NULL;
duke@435 97 GrowableArray<size_t> * PSParallelCompact::_cur_gc_live_oops_size = NULL;
duke@435 98 GrowableArray<HeapWord*>* PSParallelCompact::_last_gc_live_oops = NULL;
duke@435 99 GrowableArray<HeapWord*>* PSParallelCompact::_last_gc_live_oops_moved_to = NULL;
duke@435 100 GrowableArray<size_t> * PSParallelCompact::_last_gc_live_oops_size = NULL;
duke@435 101 #endif
duke@435 102
duke@435 103 // XXX beg - verification code; only works while we also mark in object headers
duke@435 104 static void
duke@435 105 verify_mark_bitmap(ParMarkBitMap& _mark_bitmap)
duke@435 106 {
duke@435 107 ParallelScavengeHeap* heap = PSParallelCompact::gc_heap();
duke@435 108
duke@435 109 PSPermGen* perm_gen = heap->perm_gen();
duke@435 110 PSOldGen* old_gen = heap->old_gen();
duke@435 111 PSYoungGen* young_gen = heap->young_gen();
duke@435 112
duke@435 113 MutableSpace* perm_space = perm_gen->object_space();
duke@435 114 MutableSpace* old_space = old_gen->object_space();
duke@435 115 MutableSpace* eden_space = young_gen->eden_space();
duke@435 116 MutableSpace* from_space = young_gen->from_space();
duke@435 117 MutableSpace* to_space = young_gen->to_space();
duke@435 118
duke@435 119 // 'from_space' here is the survivor space at the lower address.
duke@435 120 if (to_space->bottom() < from_space->bottom()) {
duke@435 121 from_space = to_space;
duke@435 122 to_space = young_gen->from_space();
duke@435 123 }
duke@435 124
duke@435 125 HeapWord* boundaries[12];
duke@435 126 unsigned int bidx = 0;
duke@435 127 const unsigned int bidx_max = sizeof(boundaries) / sizeof(boundaries[0]);
duke@435 128
duke@435 129 boundaries[0] = perm_space->bottom();
duke@435 130 boundaries[1] = perm_space->top();
duke@435 131 boundaries[2] = old_space->bottom();
duke@435 132 boundaries[3] = old_space->top();
duke@435 133 boundaries[4] = eden_space->bottom();
duke@435 134 boundaries[5] = eden_space->top();
duke@435 135 boundaries[6] = from_space->bottom();
duke@435 136 boundaries[7] = from_space->top();
duke@435 137 boundaries[8] = to_space->bottom();
duke@435 138 boundaries[9] = to_space->top();
duke@435 139 boundaries[10] = to_space->end();
duke@435 140 boundaries[11] = to_space->end();
duke@435 141
duke@435 142 BitMap::idx_t beg_bit = 0;
duke@435 143 BitMap::idx_t end_bit;
duke@435 144 BitMap::idx_t tmp_bit;
duke@435 145 const BitMap::idx_t last_bit = _mark_bitmap.size();
duke@435 146 do {
duke@435 147 HeapWord* addr = _mark_bitmap.bit_to_addr(beg_bit);
duke@435 148 if (_mark_bitmap.is_marked(beg_bit)) {
duke@435 149 oop obj = (oop)addr;
duke@435 150 assert(obj->is_gc_marked(), "obj header is not marked");
duke@435 151 end_bit = _mark_bitmap.find_obj_end(beg_bit, last_bit);
duke@435 152 const size_t size = _mark_bitmap.obj_size(beg_bit, end_bit);
duke@435 153 assert(size == (size_t)obj->size(), "end bit wrong?");
duke@435 154 beg_bit = _mark_bitmap.find_obj_beg(beg_bit + 1, last_bit);
duke@435 155 assert(beg_bit > end_bit, "bit set in middle of an obj");
duke@435 156 } else {
duke@435 157 if (addr >= boundaries[bidx] && addr < boundaries[bidx + 1]) {
duke@435 158 // a dead object in the current space.
duke@435 159 oop obj = (oop)addr;
duke@435 160 end_bit = _mark_bitmap.addr_to_bit(addr + obj->size());
duke@435 161 assert(!obj->is_gc_marked(), "obj marked in header, not in bitmap");
duke@435 162 tmp_bit = beg_bit + 1;
duke@435 163 beg_bit = _mark_bitmap.find_obj_beg(tmp_bit, end_bit);
duke@435 164 assert(beg_bit == end_bit, "beg bit set in unmarked obj");
duke@435 165 beg_bit = _mark_bitmap.find_obj_end(tmp_bit, end_bit);
duke@435 166 assert(beg_bit == end_bit, "end bit set in unmarked obj");
duke@435 167 } else if (addr < boundaries[bidx + 2]) {
duke@435 168 // addr is between top in the current space and bottom in the next.
duke@435 169 end_bit = beg_bit + pointer_delta(boundaries[bidx + 2], addr);
duke@435 170 tmp_bit = beg_bit;
duke@435 171 beg_bit = _mark_bitmap.find_obj_beg(tmp_bit, end_bit);
duke@435 172 assert(beg_bit == end_bit, "beg bit set above top");
duke@435 173 beg_bit = _mark_bitmap.find_obj_end(tmp_bit, end_bit);
duke@435 174 assert(beg_bit == end_bit, "end bit set above top");
duke@435 175 bidx += 2;
duke@435 176 } else if (bidx < bidx_max - 2) {
duke@435 177 bidx += 2; // ???
duke@435 178 } else {
duke@435 179 tmp_bit = beg_bit;
duke@435 180 beg_bit = _mark_bitmap.find_obj_beg(tmp_bit, last_bit);
duke@435 181 assert(beg_bit == last_bit, "beg bit set outside heap");
duke@435 182 beg_bit = _mark_bitmap.find_obj_end(tmp_bit, last_bit);
duke@435 183 assert(beg_bit == last_bit, "end bit set outside heap");
duke@435 184 }
duke@435 185 }
duke@435 186 } while (beg_bit < last_bit);
duke@435 187 }
duke@435 188 // XXX end - verification code; only works while we also mark in object headers
duke@435 189
duke@435 190 #ifndef PRODUCT
duke@435 191 const char* PSParallelCompact::space_names[] = {
duke@435 192 "perm", "old ", "eden", "from", "to "
duke@435 193 };
duke@435 194
duke@435 195 void PSParallelCompact::print_chunk_ranges()
duke@435 196 {
duke@435 197 tty->print_cr("space bottom top end new_top");
duke@435 198 tty->print_cr("------ ---------- ---------- ---------- ----------");
duke@435 199
duke@435 200 for (unsigned int id = 0; id < last_space_id; ++id) {
duke@435 201 const MutableSpace* space = _space_info[id].space();
duke@435 202 tty->print_cr("%u %s "
jcoomes@699 203 SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10) " "
jcoomes@699 204 SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10) " ",
duke@435 205 id, space_names[id],
duke@435 206 summary_data().addr_to_chunk_idx(space->bottom()),
duke@435 207 summary_data().addr_to_chunk_idx(space->top()),
duke@435 208 summary_data().addr_to_chunk_idx(space->end()),
duke@435 209 summary_data().addr_to_chunk_idx(_space_info[id].new_top()));
duke@435 210 }
duke@435 211 }
duke@435 212
duke@435 213 void
duke@435 214 print_generic_summary_chunk(size_t i, const ParallelCompactData::ChunkData* c)
duke@435 215 {
jcoomes@699 216 #define CHUNK_IDX_FORMAT SIZE_FORMAT_W(7)
jcoomes@699 217 #define CHUNK_DATA_FORMAT SIZE_FORMAT_W(5)
duke@435 218
duke@435 219 ParallelCompactData& sd = PSParallelCompact::summary_data();
duke@435 220 size_t dci = c->destination() ? sd.addr_to_chunk_idx(c->destination()) : 0;
duke@435 221 tty->print_cr(CHUNK_IDX_FORMAT " " PTR_FORMAT " "
duke@435 222 CHUNK_IDX_FORMAT " " PTR_FORMAT " "
duke@435 223 CHUNK_DATA_FORMAT " " CHUNK_DATA_FORMAT " "
duke@435 224 CHUNK_DATA_FORMAT " " CHUNK_IDX_FORMAT " %d",
duke@435 225 i, c->data_location(), dci, c->destination(),
duke@435 226 c->partial_obj_size(), c->live_obj_size(),
duke@435 227 c->data_size(), c->source_chunk(), c->destination_count());
duke@435 228
duke@435 229 #undef CHUNK_IDX_FORMAT
duke@435 230 #undef CHUNK_DATA_FORMAT
duke@435 231 }
duke@435 232
duke@435 233 void
duke@435 234 print_generic_summary_data(ParallelCompactData& summary_data,
duke@435 235 HeapWord* const beg_addr,
duke@435 236 HeapWord* const end_addr)
duke@435 237 {
duke@435 238 size_t total_words = 0;
duke@435 239 size_t i = summary_data.addr_to_chunk_idx(beg_addr);
duke@435 240 const size_t last = summary_data.addr_to_chunk_idx(end_addr);
duke@435 241 HeapWord* pdest = 0;
duke@435 242
duke@435 243 while (i <= last) {
duke@435 244 ParallelCompactData::ChunkData* c = summary_data.chunk(i);
duke@435 245 if (c->data_size() != 0 || c->destination() != pdest) {
duke@435 246 print_generic_summary_chunk(i, c);
duke@435 247 total_words += c->data_size();
duke@435 248 pdest = c->destination();
duke@435 249 }
duke@435 250 ++i;
duke@435 251 }
duke@435 252
duke@435 253 tty->print_cr("summary_data_bytes=" SIZE_FORMAT, total_words * HeapWordSize);
duke@435 254 }
duke@435 255
duke@435 256 void
duke@435 257 print_generic_summary_data(ParallelCompactData& summary_data,
duke@435 258 SpaceInfo* space_info)
duke@435 259 {
duke@435 260 for (unsigned int id = 0; id < PSParallelCompact::last_space_id; ++id) {
duke@435 261 const MutableSpace* space = space_info[id].space();
duke@435 262 print_generic_summary_data(summary_data, space->bottom(),
duke@435 263 MAX2(space->top(), space_info[id].new_top()));
duke@435 264 }
duke@435 265 }
duke@435 266
duke@435 267 void
duke@435 268 print_initial_summary_chunk(size_t i,
duke@435 269 const ParallelCompactData::ChunkData* c,
duke@435 270 bool newline = true)
duke@435 271 {
jcoomes@699 272 tty->print(SIZE_FORMAT_W(5) " " PTR_FORMAT " "
jcoomes@699 273 SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " "
jcoomes@699 274 SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " %d",
duke@435 275 i, c->destination(),
duke@435 276 c->partial_obj_size(), c->live_obj_size(),
duke@435 277 c->data_size(), c->source_chunk(), c->destination_count());
duke@435 278 if (newline) tty->cr();
duke@435 279 }
duke@435 280
duke@435 281 void
duke@435 282 print_initial_summary_data(ParallelCompactData& summary_data,
duke@435 283 const MutableSpace* space) {
duke@435 284 if (space->top() == space->bottom()) {
duke@435 285 return;
duke@435 286 }
duke@435 287
duke@435 288 const size_t chunk_size = ParallelCompactData::ChunkSize;
duke@435 289 HeapWord* const top_aligned_up = summary_data.chunk_align_up(space->top());
duke@435 290 const size_t end_chunk = summary_data.addr_to_chunk_idx(top_aligned_up);
duke@435 291 const ParallelCompactData::ChunkData* c = summary_data.chunk(end_chunk - 1);
duke@435 292 HeapWord* end_addr = c->destination() + c->data_size();
duke@435 293 const size_t live_in_space = pointer_delta(end_addr, space->bottom());
duke@435 294
duke@435 295 // Print (and count) the full chunks at the beginning of the space.
duke@435 296 size_t full_chunk_count = 0;
duke@435 297 size_t i = summary_data.addr_to_chunk_idx(space->bottom());
duke@435 298 while (i < end_chunk && summary_data.chunk(i)->data_size() == chunk_size) {
duke@435 299 print_initial_summary_chunk(i, summary_data.chunk(i));
duke@435 300 ++full_chunk_count;
duke@435 301 ++i;
duke@435 302 }
duke@435 303
duke@435 304 size_t live_to_right = live_in_space - full_chunk_count * chunk_size;
duke@435 305
duke@435 306 double max_reclaimed_ratio = 0.0;
duke@435 307 size_t max_reclaimed_ratio_chunk = 0;
duke@435 308 size_t max_dead_to_right = 0;
duke@435 309 size_t max_live_to_right = 0;
duke@435 310
duke@435 311 // Print the 'reclaimed ratio' for chunks while there is something live in the
duke@435 312 // chunk or to the right of it. The remaining chunks are empty (and
duke@435 313 // uninteresting), and computing the ratio will result in division by 0.
duke@435 314 while (i < end_chunk && live_to_right > 0) {
duke@435 315 c = summary_data.chunk(i);
duke@435 316 HeapWord* const chunk_addr = summary_data.chunk_to_addr(i);
duke@435 317 const size_t used_to_right = pointer_delta(space->top(), chunk_addr);
duke@435 318 const size_t dead_to_right = used_to_right - live_to_right;
duke@435 319 const double reclaimed_ratio = double(dead_to_right) / live_to_right;
duke@435 320
duke@435 321 if (reclaimed_ratio > max_reclaimed_ratio) {
duke@435 322 max_reclaimed_ratio = reclaimed_ratio;
duke@435 323 max_reclaimed_ratio_chunk = i;
duke@435 324 max_dead_to_right = dead_to_right;
duke@435 325 max_live_to_right = live_to_right;
duke@435 326 }
duke@435 327
duke@435 328 print_initial_summary_chunk(i, c, false);
jcoomes@699 329 tty->print_cr(" %12.10f " SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10),
duke@435 330 reclaimed_ratio, dead_to_right, live_to_right);
duke@435 331
duke@435 332 live_to_right -= c->data_size();
duke@435 333 ++i;
duke@435 334 }
duke@435 335
duke@435 336 // Any remaining chunks are empty. Print one more if there is one.
duke@435 337 if (i < end_chunk) {
duke@435 338 print_initial_summary_chunk(i, summary_data.chunk(i));
duke@435 339 }
duke@435 340
jcoomes@699 341 tty->print_cr("max: " SIZE_FORMAT_W(4) " d2r=" SIZE_FORMAT_W(10) " "
jcoomes@699 342 "l2r=" SIZE_FORMAT_W(10) " max_ratio=%14.12f",
duke@435 343 max_reclaimed_ratio_chunk, max_dead_to_right,
duke@435 344 max_live_to_right, max_reclaimed_ratio);
duke@435 345 }
duke@435 346
duke@435 347 void
duke@435 348 print_initial_summary_data(ParallelCompactData& summary_data,
duke@435 349 SpaceInfo* space_info) {
duke@435 350 unsigned int id = PSParallelCompact::perm_space_id;
duke@435 351 const MutableSpace* space;
duke@435 352 do {
duke@435 353 space = space_info[id].space();
duke@435 354 print_initial_summary_data(summary_data, space);
duke@435 355 } while (++id < PSParallelCompact::eden_space_id);
duke@435 356
duke@435 357 do {
duke@435 358 space = space_info[id].space();
duke@435 359 print_generic_summary_data(summary_data, space->bottom(), space->top());
duke@435 360 } while (++id < PSParallelCompact::last_space_id);
duke@435 361 }
duke@435 362 #endif // #ifndef PRODUCT
duke@435 363
duke@435 364 #ifdef ASSERT
duke@435 365 size_t add_obj_count;
duke@435 366 size_t add_obj_size;
duke@435 367 size_t mark_bitmap_count;
duke@435 368 size_t mark_bitmap_size;
duke@435 369 #endif // #ifdef ASSERT
duke@435 370
duke@435 371 ParallelCompactData::ParallelCompactData()
duke@435 372 {
duke@435 373 _region_start = 0;
duke@435 374
duke@435 375 _chunk_vspace = 0;
duke@435 376 _chunk_data = 0;
duke@435 377 _chunk_count = 0;
duke@435 378
duke@435 379 _block_vspace = 0;
duke@435 380 _block_data = 0;
duke@435 381 _block_count = 0;
duke@435 382 }
duke@435 383
duke@435 384 bool ParallelCompactData::initialize(MemRegion covered_region)
duke@435 385 {
duke@435 386 _region_start = covered_region.start();
duke@435 387 const size_t region_size = covered_region.word_size();
duke@435 388 DEBUG_ONLY(_region_end = _region_start + region_size;)
duke@435 389
duke@435 390 assert(chunk_align_down(_region_start) == _region_start,
duke@435 391 "region start not aligned");
duke@435 392 assert((region_size & ChunkSizeOffsetMask) == 0,
duke@435 393 "region size not a multiple of ChunkSize");
duke@435 394
duke@435 395 bool result = initialize_chunk_data(region_size);
duke@435 396
duke@435 397 // Initialize the block data if it will be used for updating pointers, or if
duke@435 398 // this is a debug build.
duke@435 399 if (!UseParallelOldGCChunkPointerCalc || trueInDebug) {
duke@435 400 result = result && initialize_block_data(region_size);
duke@435 401 }
duke@435 402
duke@435 403 return result;
duke@435 404 }
duke@435 405
duke@435 406 PSVirtualSpace*
duke@435 407 ParallelCompactData::create_vspace(size_t count, size_t element_size)
duke@435 408 {
duke@435 409 const size_t raw_bytes = count * element_size;
duke@435 410 const size_t page_sz = os::page_size_for_region(raw_bytes, raw_bytes, 10);
duke@435 411 const size_t granularity = os::vm_allocation_granularity();
duke@435 412 const size_t bytes = align_size_up(raw_bytes, MAX2(page_sz, granularity));
duke@435 413
duke@435 414 const size_t rs_align = page_sz == (size_t) os::vm_page_size() ? 0 :
duke@435 415 MAX2(page_sz, granularity);
jcoomes@514 416 ReservedSpace rs(bytes, rs_align, rs_align > 0);
duke@435 417 os::trace_page_sizes("par compact", raw_bytes, raw_bytes, page_sz, rs.base(),
duke@435 418 rs.size());
duke@435 419 PSVirtualSpace* vspace = new PSVirtualSpace(rs, page_sz);
duke@435 420 if (vspace != 0) {
duke@435 421 if (vspace->expand_by(bytes)) {
duke@435 422 return vspace;
duke@435 423 }
duke@435 424 delete vspace;
duke@435 425 }
duke@435 426
duke@435 427 return 0;
duke@435 428 }
duke@435 429
duke@435 430 bool ParallelCompactData::initialize_chunk_data(size_t region_size)
duke@435 431 {
duke@435 432 const size_t count = (region_size + ChunkSizeOffsetMask) >> Log2ChunkSize;
duke@435 433 _chunk_vspace = create_vspace(count, sizeof(ChunkData));
duke@435 434 if (_chunk_vspace != 0) {
duke@435 435 _chunk_data = (ChunkData*)_chunk_vspace->reserved_low_addr();
duke@435 436 _chunk_count = count;
duke@435 437 return true;
duke@435 438 }
duke@435 439 return false;
duke@435 440 }
duke@435 441
duke@435 442 bool ParallelCompactData::initialize_block_data(size_t region_size)
duke@435 443 {
duke@435 444 const size_t count = (region_size + BlockOffsetMask) >> Log2BlockSize;
duke@435 445 _block_vspace = create_vspace(count, sizeof(BlockData));
duke@435 446 if (_block_vspace != 0) {
duke@435 447 _block_data = (BlockData*)_block_vspace->reserved_low_addr();
duke@435 448 _block_count = count;
duke@435 449 return true;
duke@435 450 }
duke@435 451 return false;
duke@435 452 }
duke@435 453
duke@435 454 void ParallelCompactData::clear()
duke@435 455 {
duke@435 456 if (_block_data) {
duke@435 457 memset(_block_data, 0, _block_vspace->committed_size());
duke@435 458 }
duke@435 459 memset(_chunk_data, 0, _chunk_vspace->committed_size());
duke@435 460 }
duke@435 461
duke@435 462 void ParallelCompactData::clear_range(size_t beg_chunk, size_t end_chunk) {
duke@435 463 assert(beg_chunk <= _chunk_count, "beg_chunk out of range");
duke@435 464 assert(end_chunk <= _chunk_count, "end_chunk out of range");
duke@435 465 assert(ChunkSize % BlockSize == 0, "ChunkSize not a multiple of BlockSize");
duke@435 466
duke@435 467 const size_t chunk_cnt = end_chunk - beg_chunk;
duke@435 468
duke@435 469 if (_block_data) {
duke@435 470 const size_t blocks_per_chunk = ChunkSize / BlockSize;
duke@435 471 const size_t beg_block = beg_chunk * blocks_per_chunk;
duke@435 472 const size_t block_cnt = chunk_cnt * blocks_per_chunk;
duke@435 473 memset(_block_data + beg_block, 0, block_cnt * sizeof(BlockData));
duke@435 474 }
duke@435 475 memset(_chunk_data + beg_chunk, 0, chunk_cnt * sizeof(ChunkData));
duke@435 476 }
duke@435 477
duke@435 478 HeapWord* ParallelCompactData::partial_obj_end(size_t chunk_idx) const
duke@435 479 {
duke@435 480 const ChunkData* cur_cp = chunk(chunk_idx);
duke@435 481 const ChunkData* const end_cp = chunk(chunk_count() - 1);
duke@435 482
duke@435 483 HeapWord* result = chunk_to_addr(chunk_idx);
duke@435 484 if (cur_cp < end_cp) {
duke@435 485 do {
duke@435 486 result += cur_cp->partial_obj_size();
duke@435 487 } while (cur_cp->partial_obj_size() == ChunkSize && ++cur_cp < end_cp);
duke@435 488 }
duke@435 489 return result;
duke@435 490 }
duke@435 491
duke@435 492 void ParallelCompactData::add_obj(HeapWord* addr, size_t len)
duke@435 493 {
duke@435 494 const size_t obj_ofs = pointer_delta(addr, _region_start);
duke@435 495 const size_t beg_chunk = obj_ofs >> Log2ChunkSize;
duke@435 496 const size_t end_chunk = (obj_ofs + len - 1) >> Log2ChunkSize;
duke@435 497
duke@435 498 DEBUG_ONLY(Atomic::inc_ptr(&add_obj_count);)
duke@435 499 DEBUG_ONLY(Atomic::add_ptr(len, &add_obj_size);)
duke@435 500
duke@435 501 if (beg_chunk == end_chunk) {
duke@435 502 // All in one chunk.
duke@435 503 _chunk_data[beg_chunk].add_live_obj(len);
duke@435 504 return;
duke@435 505 }
duke@435 506
duke@435 507 // First chunk.
duke@435 508 const size_t beg_ofs = chunk_offset(addr);
duke@435 509 _chunk_data[beg_chunk].add_live_obj(ChunkSize - beg_ofs);
duke@435 510
duke@435 511 klassOop klass = ((oop)addr)->klass();
duke@435 512 // Middle chunks--completely spanned by this object.
duke@435 513 for (size_t chunk = beg_chunk + 1; chunk < end_chunk; ++chunk) {
duke@435 514 _chunk_data[chunk].set_partial_obj_size(ChunkSize);
duke@435 515 _chunk_data[chunk].set_partial_obj_addr(addr);
duke@435 516 }
duke@435 517
duke@435 518 // Last chunk.
duke@435 519 const size_t end_ofs = chunk_offset(addr + len - 1);
duke@435 520 _chunk_data[end_chunk].set_partial_obj_size(end_ofs + 1);
duke@435 521 _chunk_data[end_chunk].set_partial_obj_addr(addr);
duke@435 522 }
duke@435 523
duke@435 524 void
duke@435 525 ParallelCompactData::summarize_dense_prefix(HeapWord* beg, HeapWord* end)
duke@435 526 {
duke@435 527 assert(chunk_offset(beg) == 0, "not ChunkSize aligned");
duke@435 528 assert(chunk_offset(end) == 0, "not ChunkSize aligned");
duke@435 529
duke@435 530 size_t cur_chunk = addr_to_chunk_idx(beg);
duke@435 531 const size_t end_chunk = addr_to_chunk_idx(end);
duke@435 532 HeapWord* addr = beg;
duke@435 533 while (cur_chunk < end_chunk) {
duke@435 534 _chunk_data[cur_chunk].set_destination(addr);
duke@435 535 _chunk_data[cur_chunk].set_destination_count(0);
duke@435 536 _chunk_data[cur_chunk].set_source_chunk(cur_chunk);
duke@435 537 _chunk_data[cur_chunk].set_data_location(addr);
duke@435 538
duke@435 539 // Update live_obj_size so the chunk appears completely full.
duke@435 540 size_t live_size = ChunkSize - _chunk_data[cur_chunk].partial_obj_size();
duke@435 541 _chunk_data[cur_chunk].set_live_obj_size(live_size);
duke@435 542
duke@435 543 ++cur_chunk;
duke@435 544 addr += ChunkSize;
duke@435 545 }
duke@435 546 }
duke@435 547
duke@435 548 bool ParallelCompactData::summarize(HeapWord* target_beg, HeapWord* target_end,
duke@435 549 HeapWord* source_beg, HeapWord* source_end,
duke@435 550 HeapWord** target_next,
duke@435 551 HeapWord** source_next) {
duke@435 552 // This is too strict.
duke@435 553 // assert(chunk_offset(source_beg) == 0, "not ChunkSize aligned");
duke@435 554
duke@435 555 if (TraceParallelOldGCSummaryPhase) {
duke@435 556 tty->print_cr("tb=" PTR_FORMAT " te=" PTR_FORMAT " "
duke@435 557 "sb=" PTR_FORMAT " se=" PTR_FORMAT " "
duke@435 558 "tn=" PTR_FORMAT " sn=" PTR_FORMAT,
duke@435 559 target_beg, target_end,
duke@435 560 source_beg, source_end,
duke@435 561 target_next != 0 ? *target_next : (HeapWord*) 0,
duke@435 562 source_next != 0 ? *source_next : (HeapWord*) 0);
duke@435 563 }
duke@435 564
duke@435 565 size_t cur_chunk = addr_to_chunk_idx(source_beg);
duke@435 566 const size_t end_chunk = addr_to_chunk_idx(chunk_align_up(source_end));
duke@435 567
duke@435 568 HeapWord *dest_addr = target_beg;
duke@435 569 while (cur_chunk < end_chunk) {
duke@435 570 size_t words = _chunk_data[cur_chunk].data_size();
duke@435 571
duke@435 572 #if 1
duke@435 573 assert(pointer_delta(target_end, dest_addr) >= words,
duke@435 574 "source region does not fit into target region");
duke@435 575 #else
duke@435 576 // XXX - need some work on the corner cases here. If the chunk does not
duke@435 577 // fit, then must either make sure any partial_obj from the chunk fits, or
duke@435 578 // 'undo' the initial part of the partial_obj that is in the previous chunk.
duke@435 579 if (dest_addr + words >= target_end) {
duke@435 580 // Let the caller know where to continue.
duke@435 581 *target_next = dest_addr;
duke@435 582 *source_next = chunk_to_addr(cur_chunk);
duke@435 583 return false;
duke@435 584 }
duke@435 585 #endif // #if 1
duke@435 586
duke@435 587 _chunk_data[cur_chunk].set_destination(dest_addr);
duke@435 588
duke@435 589 // Set the destination_count for cur_chunk, and if necessary, update
duke@435 590 // source_chunk for a destination chunk. The source_chunk field is updated
duke@435 591 // if cur_chunk is the first (left-most) chunk to be copied to a destination
duke@435 592 // chunk.
duke@435 593 //
duke@435 594 // The destination_count calculation is a bit subtle. A chunk that has data
duke@435 595 // that compacts into itself does not count itself as a destination. This
duke@435 596 // maintains the invariant that a zero count means the chunk is available
duke@435 597 // and can be claimed and then filled.
duke@435 598 if (words > 0) {
duke@435 599 HeapWord* const last_addr = dest_addr + words - 1;
duke@435 600 const size_t dest_chunk_1 = addr_to_chunk_idx(dest_addr);
duke@435 601 const size_t dest_chunk_2 = addr_to_chunk_idx(last_addr);
duke@435 602 #if 0
duke@435 603 // Initially assume that the destination chunks will be the same and
duke@435 604 // adjust the value below if necessary. Under this assumption, if
duke@435 605 // cur_chunk == dest_chunk_2, then cur_chunk will be compacted completely
duke@435 606 // into itself.
duke@435 607 uint destination_count = cur_chunk == dest_chunk_2 ? 0 : 1;
duke@435 608 if (dest_chunk_1 != dest_chunk_2) {
duke@435 609 // Destination chunks differ; adjust destination_count.
duke@435 610 destination_count += 1;
duke@435 611 // Data from cur_chunk will be copied to the start of dest_chunk_2.
duke@435 612 _chunk_data[dest_chunk_2].set_source_chunk(cur_chunk);
duke@435 613 } else if (chunk_offset(dest_addr) == 0) {
duke@435 614 // Data from cur_chunk will be copied to the start of the destination
duke@435 615 // chunk.
duke@435 616 _chunk_data[dest_chunk_1].set_source_chunk(cur_chunk);
duke@435 617 }
duke@435 618 #else
duke@435 619 // Initially assume that the destination chunks will be different and
duke@435 620 // adjust the value below if necessary. Under this assumption, if
duke@435 621 // cur_chunk == dest_chunk2, then cur_chunk will be compacted partially
duke@435 622 // into dest_chunk_1 and partially into itself.
duke@435 623 uint destination_count = cur_chunk == dest_chunk_2 ? 1 : 2;
duke@435 624 if (dest_chunk_1 != dest_chunk_2) {
duke@435 625 // Data from cur_chunk will be copied to the start of dest_chunk_2.
duke@435 626 _chunk_data[dest_chunk_2].set_source_chunk(cur_chunk);
duke@435 627 } else {
duke@435 628 // Destination chunks are the same; adjust destination_count.
duke@435 629 destination_count -= 1;
duke@435 630 if (chunk_offset(dest_addr) == 0) {
duke@435 631 // Data from cur_chunk will be copied to the start of the destination
duke@435 632 // chunk.
duke@435 633 _chunk_data[dest_chunk_1].set_source_chunk(cur_chunk);
duke@435 634 }
duke@435 635 }
duke@435 636 #endif // #if 0
duke@435 637
duke@435 638 _chunk_data[cur_chunk].set_destination_count(destination_count);
duke@435 639 _chunk_data[cur_chunk].set_data_location(chunk_to_addr(cur_chunk));
duke@435 640 dest_addr += words;
duke@435 641 }
duke@435 642
duke@435 643 ++cur_chunk;
duke@435 644 }
duke@435 645
duke@435 646 *target_next = dest_addr;
duke@435 647 return true;
duke@435 648 }
duke@435 649
duke@435 650 bool ParallelCompactData::partial_obj_ends_in_block(size_t block_index) {
duke@435 651 HeapWord* block_addr = block_to_addr(block_index);
duke@435 652 HeapWord* block_end_addr = block_addr + BlockSize;
duke@435 653 size_t chunk_index = addr_to_chunk_idx(block_addr);
duke@435 654 HeapWord* partial_obj_end_addr = partial_obj_end(chunk_index);
duke@435 655
duke@435 656 // An object that ends at the end of the block, ends
duke@435 657 // in the block (the last word of the object is to
duke@435 658 // the left of the end).
duke@435 659 if ((block_addr < partial_obj_end_addr) &&
duke@435 660 (partial_obj_end_addr <= block_end_addr)) {
duke@435 661 return true;
duke@435 662 }
duke@435 663
duke@435 664 return false;
duke@435 665 }
duke@435 666
duke@435 667 HeapWord* ParallelCompactData::calc_new_pointer(HeapWord* addr) {
duke@435 668 HeapWord* result = NULL;
duke@435 669 if (UseParallelOldGCChunkPointerCalc) {
duke@435 670 result = chunk_calc_new_pointer(addr);
duke@435 671 } else {
duke@435 672 result = block_calc_new_pointer(addr);
duke@435 673 }
duke@435 674 return result;
duke@435 675 }
duke@435 676
duke@435 677 // This method is overly complicated (expensive) to be called
duke@435 678 // for every reference.
duke@435 679 // Try to restructure this so that a NULL is returned if
duke@435 680 // the object is dead. But don't wast the cycles to explicitly check
duke@435 681 // that it is dead since only live objects should be passed in.
duke@435 682
duke@435 683 HeapWord* ParallelCompactData::chunk_calc_new_pointer(HeapWord* addr) {
duke@435 684 assert(addr != NULL, "Should detect NULL oop earlier");
duke@435 685 assert(PSParallelCompact::gc_heap()->is_in(addr), "addr not in heap");
duke@435 686 #ifdef ASSERT
duke@435 687 if (PSParallelCompact::mark_bitmap()->is_unmarked(addr)) {
duke@435 688 gclog_or_tty->print_cr("calc_new_pointer:: addr " PTR_FORMAT, addr);
duke@435 689 }
duke@435 690 #endif
duke@435 691 assert(PSParallelCompact::mark_bitmap()->is_marked(addr), "obj not marked");
duke@435 692
duke@435 693 // Chunk covering the object.
duke@435 694 size_t chunk_index = addr_to_chunk_idx(addr);
duke@435 695 const ChunkData* const chunk_ptr = chunk(chunk_index);
duke@435 696 HeapWord* const chunk_addr = chunk_align_down(addr);
duke@435 697
duke@435 698 assert(addr < chunk_addr + ChunkSize, "Chunk does not cover object");
duke@435 699 assert(addr_to_chunk_ptr(chunk_addr) == chunk_ptr, "sanity check");
duke@435 700
duke@435 701 HeapWord* result = chunk_ptr->destination();
duke@435 702
duke@435 703 // If all the data in the chunk is live, then the new location of the object
duke@435 704 // can be calculated from the destination of the chunk plus the offset of the
duke@435 705 // object in the chunk.
duke@435 706 if (chunk_ptr->data_size() == ChunkSize) {
duke@435 707 result += pointer_delta(addr, chunk_addr);
duke@435 708 return result;
duke@435 709 }
duke@435 710
duke@435 711 // The new location of the object is
duke@435 712 // chunk destination +
duke@435 713 // size of the partial object extending onto the chunk +
duke@435 714 // sizes of the live objects in the Chunk that are to the left of addr
duke@435 715 const size_t partial_obj_size = chunk_ptr->partial_obj_size();
duke@435 716 HeapWord* const search_start = chunk_addr + partial_obj_size;
duke@435 717
duke@435 718 const ParMarkBitMap* bitmap = PSParallelCompact::mark_bitmap();
duke@435 719 size_t live_to_left = bitmap->live_words_in_range(search_start, oop(addr));
duke@435 720
duke@435 721 result += partial_obj_size + live_to_left;
duke@435 722 assert(result <= addr, "object cannot move to the right");
duke@435 723 return result;
duke@435 724 }
duke@435 725
duke@435 726 HeapWord* ParallelCompactData::block_calc_new_pointer(HeapWord* addr) {
duke@435 727 assert(addr != NULL, "Should detect NULL oop earlier");
duke@435 728 assert(PSParallelCompact::gc_heap()->is_in(addr), "addr not in heap");
duke@435 729 #ifdef ASSERT
duke@435 730 if (PSParallelCompact::mark_bitmap()->is_unmarked(addr)) {
duke@435 731 gclog_or_tty->print_cr("calc_new_pointer:: addr " PTR_FORMAT, addr);
duke@435 732 }
duke@435 733 #endif
duke@435 734 assert(PSParallelCompact::mark_bitmap()->is_marked(addr), "obj not marked");
duke@435 735
duke@435 736 // Chunk covering the object.
duke@435 737 size_t chunk_index = addr_to_chunk_idx(addr);
duke@435 738 const ChunkData* const chunk_ptr = chunk(chunk_index);
duke@435 739 HeapWord* const chunk_addr = chunk_align_down(addr);
duke@435 740
duke@435 741 assert(addr < chunk_addr + ChunkSize, "Chunk does not cover object");
duke@435 742 assert(addr_to_chunk_ptr(chunk_addr) == chunk_ptr, "sanity check");
duke@435 743
duke@435 744 HeapWord* result = chunk_ptr->destination();
duke@435 745
duke@435 746 // If all the data in the chunk is live, then the new location of the object
duke@435 747 // can be calculated from the destination of the chunk plus the offset of the
duke@435 748 // object in the chunk.
duke@435 749 if (chunk_ptr->data_size() == ChunkSize) {
duke@435 750 result += pointer_delta(addr, chunk_addr);
duke@435 751 return result;
duke@435 752 }
duke@435 753
duke@435 754 // The new location of the object is
duke@435 755 // chunk destination +
duke@435 756 // block offset +
duke@435 757 // sizes of the live objects in the Block that are to the left of addr
duke@435 758 const size_t block_offset = addr_to_block_ptr(addr)->offset();
duke@435 759 HeapWord* const search_start = chunk_addr + block_offset;
duke@435 760
duke@435 761 const ParMarkBitMap* bitmap = PSParallelCompact::mark_bitmap();
duke@435 762 size_t live_to_left = bitmap->live_words_in_range(search_start, oop(addr));
duke@435 763
duke@435 764 result += block_offset + live_to_left;
duke@435 765 assert(result <= addr, "object cannot move to the right");
duke@435 766 assert(result == chunk_calc_new_pointer(addr), "Should match");
duke@435 767 return result;
duke@435 768 }
duke@435 769
duke@435 770 klassOop ParallelCompactData::calc_new_klass(klassOop old_klass) {
duke@435 771 klassOop updated_klass;
duke@435 772 if (PSParallelCompact::should_update_klass(old_klass)) {
duke@435 773 updated_klass = (klassOop) calc_new_pointer(old_klass);
duke@435 774 } else {
duke@435 775 updated_klass = old_klass;
duke@435 776 }
duke@435 777
duke@435 778 return updated_klass;
duke@435 779 }
duke@435 780
duke@435 781 #ifdef ASSERT
duke@435 782 void ParallelCompactData::verify_clear(const PSVirtualSpace* vspace)
duke@435 783 {
duke@435 784 const size_t* const beg = (const size_t*)vspace->committed_low_addr();
duke@435 785 const size_t* const end = (const size_t*)vspace->committed_high_addr();
duke@435 786 for (const size_t* p = beg; p < end; ++p) {
duke@435 787 assert(*p == 0, "not zero");
duke@435 788 }
duke@435 789 }
duke@435 790
duke@435 791 void ParallelCompactData::verify_clear()
duke@435 792 {
duke@435 793 verify_clear(_chunk_vspace);
duke@435 794 verify_clear(_block_vspace);
duke@435 795 }
duke@435 796 #endif // #ifdef ASSERT
duke@435 797
duke@435 798 #ifdef NOT_PRODUCT
duke@435 799 ParallelCompactData::ChunkData* debug_chunk(size_t chunk_index) {
duke@435 800 ParallelCompactData& sd = PSParallelCompact::summary_data();
duke@435 801 return sd.chunk(chunk_index);
duke@435 802 }
duke@435 803 #endif
duke@435 804
duke@435 805 elapsedTimer PSParallelCompact::_accumulated_time;
duke@435 806 unsigned int PSParallelCompact::_total_invocations = 0;
duke@435 807 unsigned int PSParallelCompact::_maximum_compaction_gc_num = 0;
duke@435 808 jlong PSParallelCompact::_time_of_last_gc = 0;
duke@435 809 CollectorCounters* PSParallelCompact::_counters = NULL;
duke@435 810 ParMarkBitMap PSParallelCompact::_mark_bitmap;
duke@435 811 ParallelCompactData PSParallelCompact::_summary_data;
duke@435 812
duke@435 813 PSParallelCompact::IsAliveClosure PSParallelCompact::_is_alive_closure;
coleenp@548 814
coleenp@548 815 void PSParallelCompact::IsAliveClosure::do_object(oop p) { ShouldNotReachHere(); }
coleenp@548 816 bool PSParallelCompact::IsAliveClosure::do_object_b(oop p) { return mark_bitmap()->is_marked(p); }
coleenp@548 817
coleenp@548 818 void PSParallelCompact::KeepAliveClosure::do_oop(oop* p) { PSParallelCompact::KeepAliveClosure::do_oop_work(p); }
coleenp@548 819 void PSParallelCompact::KeepAliveClosure::do_oop(narrowOop* p) { PSParallelCompact::KeepAliveClosure::do_oop_work(p); }
coleenp@548 820
duke@435 821 PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_root_pointer_closure(true);
duke@435 822 PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_pointer_closure(false);
duke@435 823
coleenp@548 824 void PSParallelCompact::AdjustPointerClosure::do_oop(oop* p) { adjust_pointer(p, _is_root); }
coleenp@548 825 void PSParallelCompact::AdjustPointerClosure::do_oop(narrowOop* p) { adjust_pointer(p, _is_root); }
coleenp@548 826
coleenp@548 827 void PSParallelCompact::FollowStackClosure::do_void() { follow_stack(_compaction_manager); }
coleenp@548 828
coleenp@548 829 void PSParallelCompact::MarkAndPushClosure::do_oop(oop* p) { mark_and_push(_compaction_manager, p); }
coleenp@548 830 void PSParallelCompact::MarkAndPushClosure::do_oop(narrowOop* p) { mark_and_push(_compaction_manager, p); }
duke@435 831
duke@435 832 void PSParallelCompact::post_initialize() {
duke@435 833 ParallelScavengeHeap* heap = gc_heap();
duke@435 834 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
duke@435 835
duke@435 836 MemRegion mr = heap->reserved_region();
duke@435 837 _ref_processor = ReferenceProcessor::create_ref_processor(
duke@435 838 mr, // span
duke@435 839 true, // atomic_discovery
duke@435 840 true, // mt_discovery
duke@435 841 &_is_alive_closure,
duke@435 842 ParallelGCThreads,
duke@435 843 ParallelRefProcEnabled);
duke@435 844 _counters = new CollectorCounters("PSParallelCompact", 1);
duke@435 845
duke@435 846 // Initialize static fields in ParCompactionManager.
duke@435 847 ParCompactionManager::initialize(mark_bitmap());
duke@435 848 }
duke@435 849
duke@435 850 bool PSParallelCompact::initialize() {
duke@435 851 ParallelScavengeHeap* heap = gc_heap();
duke@435 852 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
duke@435 853 MemRegion mr = heap->reserved_region();
duke@435 854
duke@435 855 // Was the old gen get allocated successfully?
duke@435 856 if (!heap->old_gen()->is_allocated()) {
duke@435 857 return false;
duke@435 858 }
duke@435 859
duke@435 860 initialize_space_info();
duke@435 861 initialize_dead_wood_limiter();
duke@435 862
duke@435 863 if (!_mark_bitmap.initialize(mr)) {
duke@435 864 vm_shutdown_during_initialization("Unable to allocate bit map for "
duke@435 865 "parallel garbage collection for the requested heap size.");
duke@435 866 return false;
duke@435 867 }
duke@435 868
duke@435 869 if (!_summary_data.initialize(mr)) {
duke@435 870 vm_shutdown_during_initialization("Unable to allocate tables for "
duke@435 871 "parallel garbage collection for the requested heap size.");
duke@435 872 return false;
duke@435 873 }
duke@435 874
duke@435 875 return true;
duke@435 876 }
duke@435 877
duke@435 878 void PSParallelCompact::initialize_space_info()
duke@435 879 {
duke@435 880 memset(&_space_info, 0, sizeof(_space_info));
duke@435 881
duke@435 882 ParallelScavengeHeap* heap = gc_heap();
duke@435 883 PSYoungGen* young_gen = heap->young_gen();
duke@435 884 MutableSpace* perm_space = heap->perm_gen()->object_space();
duke@435 885
duke@435 886 _space_info[perm_space_id].set_space(perm_space);
duke@435 887 _space_info[old_space_id].set_space(heap->old_gen()->object_space());
duke@435 888 _space_info[eden_space_id].set_space(young_gen->eden_space());
duke@435 889 _space_info[from_space_id].set_space(young_gen->from_space());
duke@435 890 _space_info[to_space_id].set_space(young_gen->to_space());
duke@435 891
duke@435 892 _space_info[perm_space_id].set_start_array(heap->perm_gen()->start_array());
duke@435 893 _space_info[old_space_id].set_start_array(heap->old_gen()->start_array());
duke@435 894
duke@435 895 _space_info[perm_space_id].set_min_dense_prefix(perm_space->top());
duke@435 896 if (TraceParallelOldGCDensePrefix) {
duke@435 897 tty->print_cr("perm min_dense_prefix=" PTR_FORMAT,
duke@435 898 _space_info[perm_space_id].min_dense_prefix());
duke@435 899 }
duke@435 900 }
duke@435 901
duke@435 902 void PSParallelCompact::initialize_dead_wood_limiter()
duke@435 903 {
duke@435 904 const size_t max = 100;
duke@435 905 _dwl_mean = double(MIN2(ParallelOldDeadWoodLimiterMean, max)) / 100.0;
duke@435 906 _dwl_std_dev = double(MIN2(ParallelOldDeadWoodLimiterStdDev, max)) / 100.0;
duke@435 907 _dwl_first_term = 1.0 / (sqrt(2.0 * M_PI) * _dwl_std_dev);
duke@435 908 DEBUG_ONLY(_dwl_initialized = true;)
duke@435 909 _dwl_adjustment = normal_distribution(1.0);
duke@435 910 }
duke@435 911
duke@435 912 // Simple class for storing info about the heap at the start of GC, to be used
duke@435 913 // after GC for comparison/printing.
duke@435 914 class PreGCValues {
duke@435 915 public:
duke@435 916 PreGCValues() { }
duke@435 917 PreGCValues(ParallelScavengeHeap* heap) { fill(heap); }
duke@435 918
duke@435 919 void fill(ParallelScavengeHeap* heap) {
duke@435 920 _heap_used = heap->used();
duke@435 921 _young_gen_used = heap->young_gen()->used_in_bytes();
duke@435 922 _old_gen_used = heap->old_gen()->used_in_bytes();
duke@435 923 _perm_gen_used = heap->perm_gen()->used_in_bytes();
duke@435 924 };
duke@435 925
duke@435 926 size_t heap_used() const { return _heap_used; }
duke@435 927 size_t young_gen_used() const { return _young_gen_used; }
duke@435 928 size_t old_gen_used() const { return _old_gen_used; }
duke@435 929 size_t perm_gen_used() const { return _perm_gen_used; }
duke@435 930
duke@435 931 private:
duke@435 932 size_t _heap_used;
duke@435 933 size_t _young_gen_used;
duke@435 934 size_t _old_gen_used;
duke@435 935 size_t _perm_gen_used;
duke@435 936 };
duke@435 937
duke@435 938 void
duke@435 939 PSParallelCompact::clear_data_covering_space(SpaceId id)
duke@435 940 {
duke@435 941 // At this point, top is the value before GC, new_top() is the value that will
duke@435 942 // be set at the end of GC. The marking bitmap is cleared to top; nothing
duke@435 943 // should be marked above top. The summary data is cleared to the larger of
duke@435 944 // top & new_top.
duke@435 945 MutableSpace* const space = _space_info[id].space();
duke@435 946 HeapWord* const bot = space->bottom();
duke@435 947 HeapWord* const top = space->top();
duke@435 948 HeapWord* const max_top = MAX2(top, _space_info[id].new_top());
duke@435 949
duke@435 950 const idx_t beg_bit = _mark_bitmap.addr_to_bit(bot);
duke@435 951 const idx_t end_bit = BitMap::word_align_up(_mark_bitmap.addr_to_bit(top));
duke@435 952 _mark_bitmap.clear_range(beg_bit, end_bit);
duke@435 953
duke@435 954 const size_t beg_chunk = _summary_data.addr_to_chunk_idx(bot);
duke@435 955 const size_t end_chunk =
duke@435 956 _summary_data.addr_to_chunk_idx(_summary_data.chunk_align_up(max_top));
duke@435 957 _summary_data.clear_range(beg_chunk, end_chunk);
duke@435 958 }
duke@435 959
duke@435 960 void PSParallelCompact::pre_compact(PreGCValues* pre_gc_values)
duke@435 961 {
duke@435 962 // Update the from & to space pointers in space_info, since they are swapped
duke@435 963 // at each young gen gc. Do the update unconditionally (even though a
duke@435 964 // promotion failure does not swap spaces) because an unknown number of minor
duke@435 965 // collections will have swapped the spaces an unknown number of times.
duke@435 966 TraceTime tm("pre compact", print_phases(), true, gclog_or_tty);
duke@435 967 ParallelScavengeHeap* heap = gc_heap();
duke@435 968 _space_info[from_space_id].set_space(heap->young_gen()->from_space());
duke@435 969 _space_info[to_space_id].set_space(heap->young_gen()->to_space());
duke@435 970
duke@435 971 pre_gc_values->fill(heap);
duke@435 972
duke@435 973 ParCompactionManager::reset();
duke@435 974 NOT_PRODUCT(_mark_bitmap.reset_counters());
duke@435 975 DEBUG_ONLY(add_obj_count = add_obj_size = 0;)
duke@435 976 DEBUG_ONLY(mark_bitmap_count = mark_bitmap_size = 0;)
duke@435 977
duke@435 978 // Increment the invocation count
apetrusenko@574 979 heap->increment_total_collections(true);
duke@435 980
duke@435 981 // We need to track unique mark sweep invocations as well.
duke@435 982 _total_invocations++;
duke@435 983
duke@435 984 if (PrintHeapAtGC) {
duke@435 985 Universe::print_heap_before_gc();
duke@435 986 }
duke@435 987
duke@435 988 // Fill in TLABs
duke@435 989 heap->accumulate_statistics_all_tlabs();
duke@435 990 heap->ensure_parsability(true); // retire TLABs
duke@435 991
duke@435 992 if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
duke@435 993 HandleMark hm; // Discard invalid handles created during verification
duke@435 994 gclog_or_tty->print(" VerifyBeforeGC:");
duke@435 995 Universe::verify(true);
duke@435 996 }
duke@435 997
duke@435 998 // Verify object start arrays
duke@435 999 if (VerifyObjectStartArray &&
duke@435 1000 VerifyBeforeGC) {
duke@435 1001 heap->old_gen()->verify_object_start_array();
duke@435 1002 heap->perm_gen()->verify_object_start_array();
duke@435 1003 }
duke@435 1004
duke@435 1005 DEBUG_ONLY(mark_bitmap()->verify_clear();)
duke@435 1006 DEBUG_ONLY(summary_data().verify_clear();)
jcoomes@645 1007
jcoomes@645 1008 // Have worker threads release resources the next time they run a task.
jcoomes@645 1009 gc_task_manager()->release_all_resources();
duke@435 1010 }
duke@435 1011
duke@435 1012 void PSParallelCompact::post_compact()
duke@435 1013 {
duke@435 1014 TraceTime tm("post compact", print_phases(), true, gclog_or_tty);
duke@435 1015
duke@435 1016 // Clear the marking bitmap and summary data and update top() in each space.
duke@435 1017 for (unsigned int id = perm_space_id; id < last_space_id; ++id) {
duke@435 1018 clear_data_covering_space(SpaceId(id));
duke@435 1019 _space_info[id].space()->set_top(_space_info[id].new_top());
duke@435 1020 }
duke@435 1021
duke@435 1022 MutableSpace* const eden_space = _space_info[eden_space_id].space();
duke@435 1023 MutableSpace* const from_space = _space_info[from_space_id].space();
duke@435 1024 MutableSpace* const to_space = _space_info[to_space_id].space();
duke@435 1025
duke@435 1026 ParallelScavengeHeap* heap = gc_heap();
duke@435 1027 bool eden_empty = eden_space->is_empty();
duke@435 1028 if (!eden_empty) {
duke@435 1029 eden_empty = absorb_live_data_from_eden(heap->size_policy(),
duke@435 1030 heap->young_gen(), heap->old_gen());
duke@435 1031 }
duke@435 1032
duke@435 1033 // Update heap occupancy information which is used as input to the soft ref
duke@435 1034 // clearing policy at the next gc.
duke@435 1035 Universe::update_heap_info_at_gc();
duke@435 1036
duke@435 1037 bool young_gen_empty = eden_empty && from_space->is_empty() &&
duke@435 1038 to_space->is_empty();
duke@435 1039
duke@435 1040 BarrierSet* bs = heap->barrier_set();
duke@435 1041 if (bs->is_a(BarrierSet::ModRef)) {
duke@435 1042 ModRefBarrierSet* modBS = (ModRefBarrierSet*)bs;
duke@435 1043 MemRegion old_mr = heap->old_gen()->reserved();
duke@435 1044 MemRegion perm_mr = heap->perm_gen()->reserved();
duke@435 1045 assert(perm_mr.end() <= old_mr.start(), "Generations out of order");
duke@435 1046
duke@435 1047 if (young_gen_empty) {
duke@435 1048 modBS->clear(MemRegion(perm_mr.start(), old_mr.end()));
duke@435 1049 } else {
duke@435 1050 modBS->invalidate(MemRegion(perm_mr.start(), old_mr.end()));
duke@435 1051 }
duke@435 1052 }
duke@435 1053
duke@435 1054 Threads::gc_epilogue();
duke@435 1055 CodeCache::gc_epilogue();
duke@435 1056
duke@435 1057 COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
duke@435 1058
duke@435 1059 ref_processor()->enqueue_discovered_references(NULL);
duke@435 1060
jmasa@698 1061 if (ZapUnusedHeapArea) {
jmasa@698 1062 heap->gen_mangle_unused_area();
jmasa@698 1063 }
jmasa@698 1064
duke@435 1065 // Update time of last GC
duke@435 1066 reset_millis_since_last_gc();
duke@435 1067 }
duke@435 1068
duke@435 1069 HeapWord*
duke@435 1070 PSParallelCompact::compute_dense_prefix_via_density(const SpaceId id,
duke@435 1071 bool maximum_compaction)
duke@435 1072 {
duke@435 1073 const size_t chunk_size = ParallelCompactData::ChunkSize;
duke@435 1074 const ParallelCompactData& sd = summary_data();
duke@435 1075
duke@435 1076 const MutableSpace* const space = _space_info[id].space();
duke@435 1077 HeapWord* const top_aligned_up = sd.chunk_align_up(space->top());
duke@435 1078 const ChunkData* const beg_cp = sd.addr_to_chunk_ptr(space->bottom());
duke@435 1079 const ChunkData* const end_cp = sd.addr_to_chunk_ptr(top_aligned_up);
duke@435 1080
duke@435 1081 // Skip full chunks at the beginning of the space--they are necessarily part
duke@435 1082 // of the dense prefix.
duke@435 1083 size_t full_count = 0;
duke@435 1084 const ChunkData* cp;
duke@435 1085 for (cp = beg_cp; cp < end_cp && cp->data_size() == chunk_size; ++cp) {
duke@435 1086 ++full_count;
duke@435 1087 }
duke@435 1088
duke@435 1089 assert(total_invocations() >= _maximum_compaction_gc_num, "sanity");
duke@435 1090 const size_t gcs_since_max = total_invocations() - _maximum_compaction_gc_num;
duke@435 1091 const bool interval_ended = gcs_since_max > HeapMaximumCompactionInterval;
duke@435 1092 if (maximum_compaction || cp == end_cp || interval_ended) {
duke@435 1093 _maximum_compaction_gc_num = total_invocations();
duke@435 1094 return sd.chunk_to_addr(cp);
duke@435 1095 }
duke@435 1096
duke@435 1097 HeapWord* const new_top = _space_info[id].new_top();
duke@435 1098 const size_t space_live = pointer_delta(new_top, space->bottom());
duke@435 1099 const size_t space_used = space->used_in_words();
duke@435 1100 const size_t space_capacity = space->capacity_in_words();
duke@435 1101
duke@435 1102 const double cur_density = double(space_live) / space_capacity;
duke@435 1103 const double deadwood_density =
duke@435 1104 (1.0 - cur_density) * (1.0 - cur_density) * cur_density * cur_density;
duke@435 1105 const size_t deadwood_goal = size_t(space_capacity * deadwood_density);
duke@435 1106
duke@435 1107 if (TraceParallelOldGCDensePrefix) {
duke@435 1108 tty->print_cr("cur_dens=%5.3f dw_dens=%5.3f dw_goal=" SIZE_FORMAT,
duke@435 1109 cur_density, deadwood_density, deadwood_goal);
duke@435 1110 tty->print_cr("space_live=" SIZE_FORMAT " " "space_used=" SIZE_FORMAT " "
duke@435 1111 "space_cap=" SIZE_FORMAT,
duke@435 1112 space_live, space_used,
duke@435 1113 space_capacity);
duke@435 1114 }
duke@435 1115
duke@435 1116 // XXX - Use binary search?
duke@435 1117 HeapWord* dense_prefix = sd.chunk_to_addr(cp);
duke@435 1118 const ChunkData* full_cp = cp;
duke@435 1119 const ChunkData* const top_cp = sd.addr_to_chunk_ptr(space->top() - 1);
duke@435 1120 while (cp < end_cp) {
duke@435 1121 HeapWord* chunk_destination = cp->destination();
duke@435 1122 const size_t cur_deadwood = pointer_delta(dense_prefix, chunk_destination);
duke@435 1123 if (TraceParallelOldGCDensePrefix && Verbose) {
jcoomes@699 1124 tty->print_cr("c#=" SIZE_FORMAT_W(4) " dst=" PTR_FORMAT " "
jcoomes@699 1125 "dp=" SIZE_FORMAT_W(8) " " "cdw=" SIZE_FORMAT_W(8),
duke@435 1126 sd.chunk(cp), chunk_destination,
duke@435 1127 dense_prefix, cur_deadwood);
duke@435 1128 }
duke@435 1129
duke@435 1130 if (cur_deadwood >= deadwood_goal) {
duke@435 1131 // Found the chunk that has the correct amount of deadwood to the left.
duke@435 1132 // This typically occurs after crossing a fairly sparse set of chunks, so
duke@435 1133 // iterate backwards over those sparse chunks, looking for the chunk that
duke@435 1134 // has the lowest density of live objects 'to the right.'
duke@435 1135 size_t space_to_left = sd.chunk(cp) * chunk_size;
duke@435 1136 size_t live_to_left = space_to_left - cur_deadwood;
duke@435 1137 size_t space_to_right = space_capacity - space_to_left;
duke@435 1138 size_t live_to_right = space_live - live_to_left;
duke@435 1139 double density_to_right = double(live_to_right) / space_to_right;
duke@435 1140 while (cp > full_cp) {
duke@435 1141 --cp;
duke@435 1142 const size_t prev_chunk_live_to_right = live_to_right - cp->data_size();
duke@435 1143 const size_t prev_chunk_space_to_right = space_to_right + chunk_size;
duke@435 1144 double prev_chunk_density_to_right =
duke@435 1145 double(prev_chunk_live_to_right) / prev_chunk_space_to_right;
duke@435 1146 if (density_to_right <= prev_chunk_density_to_right) {
duke@435 1147 return dense_prefix;
duke@435 1148 }
duke@435 1149 if (TraceParallelOldGCDensePrefix && Verbose) {
jcoomes@699 1150 tty->print_cr("backing up from c=" SIZE_FORMAT_W(4) " d2r=%10.8f "
duke@435 1151 "pc_d2r=%10.8f", sd.chunk(cp), density_to_right,
duke@435 1152 prev_chunk_density_to_right);
duke@435 1153 }
duke@435 1154 dense_prefix -= chunk_size;
duke@435 1155 live_to_right = prev_chunk_live_to_right;
duke@435 1156 space_to_right = prev_chunk_space_to_right;
duke@435 1157 density_to_right = prev_chunk_density_to_right;
duke@435 1158 }
duke@435 1159 return dense_prefix;
duke@435 1160 }
duke@435 1161
duke@435 1162 dense_prefix += chunk_size;
duke@435 1163 ++cp;
duke@435 1164 }
duke@435 1165
duke@435 1166 return dense_prefix;
duke@435 1167 }
duke@435 1168
duke@435 1169 #ifndef PRODUCT
duke@435 1170 void PSParallelCompact::print_dense_prefix_stats(const char* const algorithm,
duke@435 1171 const SpaceId id,
duke@435 1172 const bool maximum_compaction,
duke@435 1173 HeapWord* const addr)
duke@435 1174 {
duke@435 1175 const size_t chunk_idx = summary_data().addr_to_chunk_idx(addr);
duke@435 1176 ChunkData* const cp = summary_data().chunk(chunk_idx);
duke@435 1177 const MutableSpace* const space = _space_info[id].space();
duke@435 1178 HeapWord* const new_top = _space_info[id].new_top();
duke@435 1179
duke@435 1180 const size_t space_live = pointer_delta(new_top, space->bottom());
duke@435 1181 const size_t dead_to_left = pointer_delta(addr, cp->destination());
duke@435 1182 const size_t space_cap = space->capacity_in_words();
duke@435 1183 const double dead_to_left_pct = double(dead_to_left) / space_cap;
duke@435 1184 const size_t live_to_right = new_top - cp->destination();
duke@435 1185 const size_t dead_to_right = space->top() - addr - live_to_right;
duke@435 1186
jcoomes@699 1187 tty->print_cr("%s=" PTR_FORMAT " dpc=" SIZE_FORMAT_W(5) " "
duke@435 1188 "spl=" SIZE_FORMAT " "
duke@435 1189 "d2l=" SIZE_FORMAT " d2l%%=%6.4f "
duke@435 1190 "d2r=" SIZE_FORMAT " l2r=" SIZE_FORMAT
duke@435 1191 " ratio=%10.8f",
duke@435 1192 algorithm, addr, chunk_idx,
duke@435 1193 space_live,
duke@435 1194 dead_to_left, dead_to_left_pct,
duke@435 1195 dead_to_right, live_to_right,
duke@435 1196 double(dead_to_right) / live_to_right);
duke@435 1197 }
duke@435 1198 #endif // #ifndef PRODUCT
duke@435 1199
duke@435 1200 // Return a fraction indicating how much of the generation can be treated as
duke@435 1201 // "dead wood" (i.e., not reclaimed). The function uses a normal distribution
duke@435 1202 // based on the density of live objects in the generation to determine a limit,
duke@435 1203 // which is then adjusted so the return value is min_percent when the density is
duke@435 1204 // 1.
duke@435 1205 //
duke@435 1206 // The following table shows some return values for a different values of the
duke@435 1207 // standard deviation (ParallelOldDeadWoodLimiterStdDev); the mean is 0.5 and
duke@435 1208 // min_percent is 1.
duke@435 1209 //
duke@435 1210 // fraction allowed as dead wood
duke@435 1211 // -----------------------------------------------------------------
duke@435 1212 // density std_dev=70 std_dev=75 std_dev=80 std_dev=85 std_dev=90 std_dev=95
duke@435 1213 // ------- ---------- ---------- ---------- ---------- ---------- ----------
duke@435 1214 // 0.00000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000
duke@435 1215 // 0.05000 0.03193096 0.02836880 0.02550828 0.02319280 0.02130337 0.01974941
duke@435 1216 // 0.10000 0.05247504 0.04547452 0.03988045 0.03537016 0.03170171 0.02869272
duke@435 1217 // 0.15000 0.07135702 0.06111390 0.05296419 0.04641639 0.04110601 0.03676066
duke@435 1218 // 0.20000 0.08831616 0.07509618 0.06461766 0.05622444 0.04943437 0.04388975
duke@435 1219 // 0.25000 0.10311208 0.08724696 0.07471205 0.06469760 0.05661313 0.05002313
duke@435 1220 // 0.30000 0.11553050 0.09741183 0.08313394 0.07175114 0.06257797 0.05511132
duke@435 1221 // 0.35000 0.12538832 0.10545958 0.08978741 0.07731366 0.06727491 0.05911289
duke@435 1222 // 0.40000 0.13253818 0.11128511 0.09459590 0.08132834 0.07066107 0.06199500
duke@435 1223 // 0.45000 0.13687208 0.11481163 0.09750361 0.08375387 0.07270534 0.06373386
duke@435 1224 // 0.50000 0.13832410 0.11599237 0.09847664 0.08456518 0.07338887 0.06431510
duke@435 1225 // 0.55000 0.13687208 0.11481163 0.09750361 0.08375387 0.07270534 0.06373386
duke@435 1226 // 0.60000 0.13253818 0.11128511 0.09459590 0.08132834 0.07066107 0.06199500
duke@435 1227 // 0.65000 0.12538832 0.10545958 0.08978741 0.07731366 0.06727491 0.05911289
duke@435 1228 // 0.70000 0.11553050 0.09741183 0.08313394 0.07175114 0.06257797 0.05511132
duke@435 1229 // 0.75000 0.10311208 0.08724696 0.07471205 0.06469760 0.05661313 0.05002313
duke@435 1230 // 0.80000 0.08831616 0.07509618 0.06461766 0.05622444 0.04943437 0.04388975
duke@435 1231 // 0.85000 0.07135702 0.06111390 0.05296419 0.04641639 0.04110601 0.03676066
duke@435 1232 // 0.90000 0.05247504 0.04547452 0.03988045 0.03537016 0.03170171 0.02869272
duke@435 1233 // 0.95000 0.03193096 0.02836880 0.02550828 0.02319280 0.02130337 0.01974941
duke@435 1234 // 1.00000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000
duke@435 1235
duke@435 1236 double PSParallelCompact::dead_wood_limiter(double density, size_t min_percent)
duke@435 1237 {
duke@435 1238 assert(_dwl_initialized, "uninitialized");
duke@435 1239
duke@435 1240 // The raw limit is the value of the normal distribution at x = density.
duke@435 1241 const double raw_limit = normal_distribution(density);
duke@435 1242
duke@435 1243 // Adjust the raw limit so it becomes the minimum when the density is 1.
duke@435 1244 //
duke@435 1245 // First subtract the adjustment value (which is simply the precomputed value
duke@435 1246 // normal_distribution(1.0)); this yields a value of 0 when the density is 1.
duke@435 1247 // Then add the minimum value, so the minimum is returned when the density is
duke@435 1248 // 1. Finally, prevent negative values, which occur when the mean is not 0.5.
duke@435 1249 const double min = double(min_percent) / 100.0;
duke@435 1250 const double limit = raw_limit - _dwl_adjustment + min;
duke@435 1251 return MAX2(limit, 0.0);
duke@435 1252 }
duke@435 1253
duke@435 1254 ParallelCompactData::ChunkData*
duke@435 1255 PSParallelCompact::first_dead_space_chunk(const ChunkData* beg,
duke@435 1256 const ChunkData* end)
duke@435 1257 {
duke@435 1258 const size_t chunk_size = ParallelCompactData::ChunkSize;
duke@435 1259 ParallelCompactData& sd = summary_data();
duke@435 1260 size_t left = sd.chunk(beg);
duke@435 1261 size_t right = end > beg ? sd.chunk(end) - 1 : left;
duke@435 1262
duke@435 1263 // Binary search.
duke@435 1264 while (left < right) {
duke@435 1265 // Equivalent to (left + right) / 2, but does not overflow.
duke@435 1266 const size_t middle = left + (right - left) / 2;
duke@435 1267 ChunkData* const middle_ptr = sd.chunk(middle);
duke@435 1268 HeapWord* const dest = middle_ptr->destination();
duke@435 1269 HeapWord* const addr = sd.chunk_to_addr(middle);
duke@435 1270 assert(dest != NULL, "sanity");
duke@435 1271 assert(dest <= addr, "must move left");
duke@435 1272
duke@435 1273 if (middle > left && dest < addr) {
duke@435 1274 right = middle - 1;
duke@435 1275 } else if (middle < right && middle_ptr->data_size() == chunk_size) {
duke@435 1276 left = middle + 1;
duke@435 1277 } else {
duke@435 1278 return middle_ptr;
duke@435 1279 }
duke@435 1280 }
duke@435 1281 return sd.chunk(left);
duke@435 1282 }
duke@435 1283
duke@435 1284 ParallelCompactData::ChunkData*
duke@435 1285 PSParallelCompact::dead_wood_limit_chunk(const ChunkData* beg,
duke@435 1286 const ChunkData* end,
duke@435 1287 size_t dead_words)
duke@435 1288 {
duke@435 1289 ParallelCompactData& sd = summary_data();
duke@435 1290 size_t left = sd.chunk(beg);
duke@435 1291 size_t right = end > beg ? sd.chunk(end) - 1 : left;
duke@435 1292
duke@435 1293 // Binary search.
duke@435 1294 while (left < right) {
duke@435 1295 // Equivalent to (left + right) / 2, but does not overflow.
duke@435 1296 const size_t middle = left + (right - left) / 2;
duke@435 1297 ChunkData* const middle_ptr = sd.chunk(middle);
duke@435 1298 HeapWord* const dest = middle_ptr->destination();
duke@435 1299 HeapWord* const addr = sd.chunk_to_addr(middle);
duke@435 1300 assert(dest != NULL, "sanity");
duke@435 1301 assert(dest <= addr, "must move left");
duke@435 1302
duke@435 1303 const size_t dead_to_left = pointer_delta(addr, dest);
duke@435 1304 if (middle > left && dead_to_left > dead_words) {
duke@435 1305 right = middle - 1;
duke@435 1306 } else if (middle < right && dead_to_left < dead_words) {
duke@435 1307 left = middle + 1;
duke@435 1308 } else {
duke@435 1309 return middle_ptr;
duke@435 1310 }
duke@435 1311 }
duke@435 1312 return sd.chunk(left);
duke@435 1313 }
duke@435 1314
duke@435 1315 // The result is valid during the summary phase, after the initial summarization
duke@435 1316 // of each space into itself, and before final summarization.
duke@435 1317 inline double
duke@435 1318 PSParallelCompact::reclaimed_ratio(const ChunkData* const cp,
duke@435 1319 HeapWord* const bottom,
duke@435 1320 HeapWord* const top,
duke@435 1321 HeapWord* const new_top)
duke@435 1322 {
duke@435 1323 ParallelCompactData& sd = summary_data();
duke@435 1324
duke@435 1325 assert(cp != NULL, "sanity");
duke@435 1326 assert(bottom != NULL, "sanity");
duke@435 1327 assert(top != NULL, "sanity");
duke@435 1328 assert(new_top != NULL, "sanity");
duke@435 1329 assert(top >= new_top, "summary data problem?");
duke@435 1330 assert(new_top > bottom, "space is empty; should not be here");
duke@435 1331 assert(new_top >= cp->destination(), "sanity");
duke@435 1332 assert(top >= sd.chunk_to_addr(cp), "sanity");
duke@435 1333
duke@435 1334 HeapWord* const destination = cp->destination();
duke@435 1335 const size_t dense_prefix_live = pointer_delta(destination, bottom);
duke@435 1336 const size_t compacted_region_live = pointer_delta(new_top, destination);
duke@435 1337 const size_t compacted_region_used = pointer_delta(top, sd.chunk_to_addr(cp));
duke@435 1338 const size_t reclaimable = compacted_region_used - compacted_region_live;
duke@435 1339
duke@435 1340 const double divisor = dense_prefix_live + 1.25 * compacted_region_live;
duke@435 1341 return double(reclaimable) / divisor;
duke@435 1342 }
duke@435 1343
duke@435 1344 // Return the address of the end of the dense prefix, a.k.a. the start of the
duke@435 1345 // compacted region. The address is always on a chunk boundary.
duke@435 1346 //
duke@435 1347 // Completely full chunks at the left are skipped, since no compaction can occur
duke@435 1348 // in those chunks. Then the maximum amount of dead wood to allow is computed,
duke@435 1349 // based on the density (amount live / capacity) of the generation; the chunk
duke@435 1350 // with approximately that amount of dead space to the left is identified as the
duke@435 1351 // limit chunk. Chunks between the last completely full chunk and the limit
duke@435 1352 // chunk are scanned and the one that has the best (maximum) reclaimed_ratio()
duke@435 1353 // is selected.
duke@435 1354 HeapWord*
duke@435 1355 PSParallelCompact::compute_dense_prefix(const SpaceId id,
duke@435 1356 bool maximum_compaction)
duke@435 1357 {
duke@435 1358 const size_t chunk_size = ParallelCompactData::ChunkSize;
duke@435 1359 const ParallelCompactData& sd = summary_data();
duke@435 1360
duke@435 1361 const MutableSpace* const space = _space_info[id].space();
duke@435 1362 HeapWord* const top = space->top();
duke@435 1363 HeapWord* const top_aligned_up = sd.chunk_align_up(top);
duke@435 1364 HeapWord* const new_top = _space_info[id].new_top();
duke@435 1365 HeapWord* const new_top_aligned_up = sd.chunk_align_up(new_top);
duke@435 1366 HeapWord* const bottom = space->bottom();
duke@435 1367 const ChunkData* const beg_cp = sd.addr_to_chunk_ptr(bottom);
duke@435 1368 const ChunkData* const top_cp = sd.addr_to_chunk_ptr(top_aligned_up);
duke@435 1369 const ChunkData* const new_top_cp = sd.addr_to_chunk_ptr(new_top_aligned_up);
duke@435 1370
duke@435 1371 // Skip full chunks at the beginning of the space--they are necessarily part
duke@435 1372 // of the dense prefix.
duke@435 1373 const ChunkData* const full_cp = first_dead_space_chunk(beg_cp, new_top_cp);
duke@435 1374 assert(full_cp->destination() == sd.chunk_to_addr(full_cp) ||
duke@435 1375 space->is_empty(), "no dead space allowed to the left");
duke@435 1376 assert(full_cp->data_size() < chunk_size || full_cp == new_top_cp - 1,
duke@435 1377 "chunk must have dead space");
duke@435 1378
duke@435 1379 // The gc number is saved whenever a maximum compaction is done, and used to
duke@435 1380 // determine when the maximum compaction interval has expired. This avoids
duke@435 1381 // successive max compactions for different reasons.
duke@435 1382 assert(total_invocations() >= _maximum_compaction_gc_num, "sanity");
duke@435 1383 const size_t gcs_since_max = total_invocations() - _maximum_compaction_gc_num;
duke@435 1384 const bool interval_ended = gcs_since_max > HeapMaximumCompactionInterval ||
duke@435 1385 total_invocations() == HeapFirstMaximumCompactionCount;
duke@435 1386 if (maximum_compaction || full_cp == top_cp || interval_ended) {
duke@435 1387 _maximum_compaction_gc_num = total_invocations();
duke@435 1388 return sd.chunk_to_addr(full_cp);
duke@435 1389 }
duke@435 1390
duke@435 1391 const size_t space_live = pointer_delta(new_top, bottom);
duke@435 1392 const size_t space_used = space->used_in_words();
duke@435 1393 const size_t space_capacity = space->capacity_in_words();
duke@435 1394
duke@435 1395 const double density = double(space_live) / double(space_capacity);
duke@435 1396 const size_t min_percent_free =
duke@435 1397 id == perm_space_id ? PermMarkSweepDeadRatio : MarkSweepDeadRatio;
duke@435 1398 const double limiter = dead_wood_limiter(density, min_percent_free);
duke@435 1399 const size_t dead_wood_max = space_used - space_live;
duke@435 1400 const size_t dead_wood_limit = MIN2(size_t(space_capacity * limiter),
duke@435 1401 dead_wood_max);
duke@435 1402
duke@435 1403 if (TraceParallelOldGCDensePrefix) {
duke@435 1404 tty->print_cr("space_live=" SIZE_FORMAT " " "space_used=" SIZE_FORMAT " "
duke@435 1405 "space_cap=" SIZE_FORMAT,
duke@435 1406 space_live, space_used,
duke@435 1407 space_capacity);
duke@435 1408 tty->print_cr("dead_wood_limiter(%6.4f, %d)=%6.4f "
duke@435 1409 "dead_wood_max=" SIZE_FORMAT " dead_wood_limit=" SIZE_FORMAT,
duke@435 1410 density, min_percent_free, limiter,
duke@435 1411 dead_wood_max, dead_wood_limit);
duke@435 1412 }
duke@435 1413
duke@435 1414 // Locate the chunk with the desired amount of dead space to the left.
duke@435 1415 const ChunkData* const limit_cp =
duke@435 1416 dead_wood_limit_chunk(full_cp, top_cp, dead_wood_limit);
duke@435 1417
duke@435 1418 // Scan from the first chunk with dead space to the limit chunk and find the
duke@435 1419 // one with the best (largest) reclaimed ratio.
duke@435 1420 double best_ratio = 0.0;
duke@435 1421 const ChunkData* best_cp = full_cp;
duke@435 1422 for (const ChunkData* cp = full_cp; cp < limit_cp; ++cp) {
duke@435 1423 double tmp_ratio = reclaimed_ratio(cp, bottom, top, new_top);
duke@435 1424 if (tmp_ratio > best_ratio) {
duke@435 1425 best_cp = cp;
duke@435 1426 best_ratio = tmp_ratio;
duke@435 1427 }
duke@435 1428 }
duke@435 1429
duke@435 1430 #if 0
duke@435 1431 // Something to consider: if the chunk with the best ratio is 'close to' the
duke@435 1432 // first chunk w/free space, choose the first chunk with free space
duke@435 1433 // ("first-free"). The first-free chunk is usually near the start of the
duke@435 1434 // heap, which means we are copying most of the heap already, so copy a bit
duke@435 1435 // more to get complete compaction.
duke@435 1436 if (pointer_delta(best_cp, full_cp, sizeof(ChunkData)) < 4) {
duke@435 1437 _maximum_compaction_gc_num = total_invocations();
duke@435 1438 best_cp = full_cp;
duke@435 1439 }
duke@435 1440 #endif // #if 0
duke@435 1441
duke@435 1442 return sd.chunk_to_addr(best_cp);
duke@435 1443 }
duke@435 1444
duke@435 1445 void PSParallelCompact::summarize_spaces_quick()
duke@435 1446 {
duke@435 1447 for (unsigned int i = 0; i < last_space_id; ++i) {
duke@435 1448 const MutableSpace* space = _space_info[i].space();
duke@435 1449 bool result = _summary_data.summarize(space->bottom(), space->end(),
duke@435 1450 space->bottom(), space->top(),
duke@435 1451 _space_info[i].new_top_addr());
duke@435 1452 assert(result, "should never fail");
duke@435 1453 _space_info[i].set_dense_prefix(space->bottom());
duke@435 1454 }
duke@435 1455 }
duke@435 1456
duke@435 1457 void PSParallelCompact::fill_dense_prefix_end(SpaceId id)
duke@435 1458 {
duke@435 1459 HeapWord* const dense_prefix_end = dense_prefix(id);
duke@435 1460 const ChunkData* chunk = _summary_data.addr_to_chunk_ptr(dense_prefix_end);
duke@435 1461 const idx_t dense_prefix_bit = _mark_bitmap.addr_to_bit(dense_prefix_end);
duke@435 1462 if (dead_space_crosses_boundary(chunk, dense_prefix_bit)) {
duke@435 1463 // Only enough dead space is filled so that any remaining dead space to the
duke@435 1464 // left is larger than the minimum filler object. (The remainder is filled
duke@435 1465 // during the copy/update phase.)
duke@435 1466 //
duke@435 1467 // The size of the dead space to the right of the boundary is not a
duke@435 1468 // concern, since compaction will be able to use whatever space is
duke@435 1469 // available.
duke@435 1470 //
duke@435 1471 // Here '||' is the boundary, 'x' represents a don't care bit and a box
duke@435 1472 // surrounds the space to be filled with an object.
duke@435 1473 //
duke@435 1474 // In the 32-bit VM, each bit represents two 32-bit words:
duke@435 1475 // +---+
duke@435 1476 // a) beg_bits: ... x x x | 0 | || 0 x x ...
duke@435 1477 // end_bits: ... x x x | 0 | || 0 x x ...
duke@435 1478 // +---+
duke@435 1479 //
duke@435 1480 // In the 64-bit VM, each bit represents one 64-bit word:
duke@435 1481 // +------------+
duke@435 1482 // b) beg_bits: ... x x x | 0 || 0 | x x ...
duke@435 1483 // end_bits: ... x x 1 | 0 || 0 | x x ...
duke@435 1484 // +------------+
duke@435 1485 // +-------+
duke@435 1486 // c) beg_bits: ... x x | 0 0 | || 0 x x ...
duke@435 1487 // end_bits: ... x 1 | 0 0 | || 0 x x ...
duke@435 1488 // +-------+
duke@435 1489 // +-----------+
duke@435 1490 // d) beg_bits: ... x | 0 0 0 | || 0 x x ...
duke@435 1491 // end_bits: ... 1 | 0 0 0 | || 0 x x ...
duke@435 1492 // +-----------+
duke@435 1493 // +-------+
duke@435 1494 // e) beg_bits: ... 0 0 | 0 0 | || 0 x x ...
duke@435 1495 // end_bits: ... 0 0 | 0 0 | || 0 x x ...
duke@435 1496 // +-------+
duke@435 1497
duke@435 1498 // Initially assume case a, c or e will apply.
duke@435 1499 size_t obj_len = (size_t)oopDesc::header_size();
duke@435 1500 HeapWord* obj_beg = dense_prefix_end - obj_len;
duke@435 1501
duke@435 1502 #ifdef _LP64
duke@435 1503 if (_mark_bitmap.is_obj_end(dense_prefix_bit - 2)) {
duke@435 1504 // Case b above.
duke@435 1505 obj_beg = dense_prefix_end - 1;
duke@435 1506 } else if (!_mark_bitmap.is_obj_end(dense_prefix_bit - 3) &&
duke@435 1507 _mark_bitmap.is_obj_end(dense_prefix_bit - 4)) {
duke@435 1508 // Case d above.
duke@435 1509 obj_beg = dense_prefix_end - 3;
duke@435 1510 obj_len = 3;
duke@435 1511 }
duke@435 1512 #endif // #ifdef _LP64
duke@435 1513
duke@435 1514 MemRegion region(obj_beg, obj_len);
duke@435 1515 SharedHeap::fill_region_with_object(region);
duke@435 1516 _mark_bitmap.mark_obj(obj_beg, obj_len);
duke@435 1517 _summary_data.add_obj(obj_beg, obj_len);
duke@435 1518 assert(start_array(id) != NULL, "sanity");
duke@435 1519 start_array(id)->allocate_block(obj_beg);
duke@435 1520 }
duke@435 1521 }
duke@435 1522
duke@435 1523 void
duke@435 1524 PSParallelCompact::summarize_space(SpaceId id, bool maximum_compaction)
duke@435 1525 {
duke@435 1526 assert(id < last_space_id, "id out of range");
jcoomes@700 1527 assert(_space_info[id].dense_prefix() == _space_info[id].space()->bottom(),
jcoomes@700 1528 "should have been set in summarize_spaces_quick()");
duke@435 1529
duke@435 1530 const MutableSpace* space = _space_info[id].space();
jcoomes@700 1531 if (_space_info[id].new_top() != space->bottom()) {
jcoomes@700 1532 HeapWord* dense_prefix_end = compute_dense_prefix(id, maximum_compaction);
jcoomes@700 1533 _space_info[id].set_dense_prefix(dense_prefix_end);
duke@435 1534
duke@435 1535 #ifndef PRODUCT
jcoomes@700 1536 if (TraceParallelOldGCDensePrefix) {
jcoomes@700 1537 print_dense_prefix_stats("ratio", id, maximum_compaction,
jcoomes@700 1538 dense_prefix_end);
jcoomes@700 1539 HeapWord* addr = compute_dense_prefix_via_density(id, maximum_compaction);
jcoomes@700 1540 print_dense_prefix_stats("density", id, maximum_compaction, addr);
jcoomes@700 1541 }
jcoomes@700 1542 #endif // #ifndef PRODUCT
jcoomes@700 1543
jcoomes@700 1544 // If dead space crosses the dense prefix boundary, it is (at least
jcoomes@700 1545 // partially) filled with a dummy object, marked live and added to the
jcoomes@700 1546 // summary data. This simplifies the copy/update phase and must be done
jcoomes@700 1547 // before the final locations of objects are determined, to prevent leaving
jcoomes@700 1548 // a fragment of dead space that is too small to fill with an object.
jcoomes@700 1549 if (!maximum_compaction && dense_prefix_end != space->bottom()) {
jcoomes@700 1550 fill_dense_prefix_end(id);
jcoomes@700 1551 }
jcoomes@700 1552
jcoomes@700 1553 // Compute the destination of each Chunk, and thus each object.
jcoomes@700 1554 _summary_data.summarize_dense_prefix(space->bottom(), dense_prefix_end);
jcoomes@700 1555 _summary_data.summarize(dense_prefix_end, space->end(),
jcoomes@700 1556 dense_prefix_end, space->top(),
jcoomes@700 1557 _space_info[id].new_top_addr());
duke@435 1558 }
duke@435 1559
duke@435 1560 if (TraceParallelOldGCSummaryPhase) {
duke@435 1561 const size_t chunk_size = ParallelCompactData::ChunkSize;
jcoomes@700 1562 HeapWord* const dense_prefix_end = _space_info[id].dense_prefix();
duke@435 1563 const size_t dp_chunk = _summary_data.addr_to_chunk_idx(dense_prefix_end);
duke@435 1564 const size_t dp_words = pointer_delta(dense_prefix_end, space->bottom());
jcoomes@700 1565 HeapWord* const new_top = _space_info[id].new_top();
jcoomes@700 1566 const HeapWord* nt_aligned_up = _summary_data.chunk_align_up(new_top);
duke@435 1567 const size_t cr_words = pointer_delta(nt_aligned_up, dense_prefix_end);
duke@435 1568 tty->print_cr("id=%d cap=" SIZE_FORMAT " dp=" PTR_FORMAT " "
duke@435 1569 "dp_chunk=" SIZE_FORMAT " " "dp_count=" SIZE_FORMAT " "
duke@435 1570 "cr_count=" SIZE_FORMAT " " "nt=" PTR_FORMAT,
duke@435 1571 id, space->capacity_in_words(), dense_prefix_end,
duke@435 1572 dp_chunk, dp_words / chunk_size,
jcoomes@700 1573 cr_words / chunk_size, new_top);
duke@435 1574 }
duke@435 1575 }
duke@435 1576
duke@435 1577 void PSParallelCompact::summary_phase(ParCompactionManager* cm,
duke@435 1578 bool maximum_compaction)
duke@435 1579 {
duke@435 1580 EventMark m("2 summarize");
duke@435 1581 TraceTime tm("summary phase", print_phases(), true, gclog_or_tty);
duke@435 1582 // trace("2");
duke@435 1583
duke@435 1584 #ifdef ASSERT
duke@435 1585 if (VerifyParallelOldWithMarkSweep &&
duke@435 1586 (PSParallelCompact::total_invocations() %
duke@435 1587 VerifyParallelOldWithMarkSweepInterval) == 0) {
duke@435 1588 verify_mark_bitmap(_mark_bitmap);
duke@435 1589 }
duke@435 1590 if (TraceParallelOldGCMarkingPhase) {
duke@435 1591 tty->print_cr("add_obj_count=" SIZE_FORMAT " "
duke@435 1592 "add_obj_bytes=" SIZE_FORMAT,
duke@435 1593 add_obj_count, add_obj_size * HeapWordSize);
duke@435 1594 tty->print_cr("mark_bitmap_count=" SIZE_FORMAT " "
duke@435 1595 "mark_bitmap_bytes=" SIZE_FORMAT,
duke@435 1596 mark_bitmap_count, mark_bitmap_size * HeapWordSize);
duke@435 1597 }
duke@435 1598 #endif // #ifdef ASSERT
duke@435 1599
duke@435 1600 // Quick summarization of each space into itself, to see how much is live.
duke@435 1601 summarize_spaces_quick();
duke@435 1602
duke@435 1603 if (TraceParallelOldGCSummaryPhase) {
duke@435 1604 tty->print_cr("summary_phase: after summarizing each space to self");
duke@435 1605 Universe::print();
duke@435 1606 NOT_PRODUCT(print_chunk_ranges());
duke@435 1607 if (Verbose) {
duke@435 1608 NOT_PRODUCT(print_initial_summary_data(_summary_data, _space_info));
duke@435 1609 }
duke@435 1610 }
duke@435 1611
duke@435 1612 // The amount of live data that will end up in old space (assuming it fits).
duke@435 1613 size_t old_space_total_live = 0;
duke@435 1614 unsigned int id;
duke@435 1615 for (id = old_space_id; id < last_space_id; ++id) {
duke@435 1616 old_space_total_live += pointer_delta(_space_info[id].new_top(),
duke@435 1617 _space_info[id].space()->bottom());
duke@435 1618 }
duke@435 1619
duke@435 1620 const MutableSpace* old_space = _space_info[old_space_id].space();
duke@435 1621 if (old_space_total_live > old_space->capacity_in_words()) {
duke@435 1622 // XXX - should also try to expand
duke@435 1623 maximum_compaction = true;
duke@435 1624 } else if (!UseParallelOldGCDensePrefix) {
duke@435 1625 maximum_compaction = true;
duke@435 1626 }
duke@435 1627
duke@435 1628 // Permanent and Old generations.
duke@435 1629 summarize_space(perm_space_id, maximum_compaction);
duke@435 1630 summarize_space(old_space_id, maximum_compaction);
duke@435 1631
duke@435 1632 // Summarize the remaining spaces (those in the young gen) into old space. If
duke@435 1633 // the live data from a space doesn't fit, the existing summarization is left
duke@435 1634 // intact, so the data is compacted down within the space itself.
duke@435 1635 HeapWord** new_top_addr = _space_info[old_space_id].new_top_addr();
duke@435 1636 HeapWord* const target_space_end = old_space->end();
duke@435 1637 for (id = eden_space_id; id < last_space_id; ++id) {
duke@435 1638 const MutableSpace* space = _space_info[id].space();
duke@435 1639 const size_t live = pointer_delta(_space_info[id].new_top(),
duke@435 1640 space->bottom());
duke@435 1641 const size_t available = pointer_delta(target_space_end, *new_top_addr);
jcoomes@701 1642 if (live > 0 && live <= available) {
duke@435 1643 // All the live data will fit.
duke@435 1644 if (TraceParallelOldGCSummaryPhase) {
duke@435 1645 tty->print_cr("summarizing %d into old_space @ " PTR_FORMAT,
duke@435 1646 id, *new_top_addr);
duke@435 1647 }
duke@435 1648 _summary_data.summarize(*new_top_addr, target_space_end,
duke@435 1649 space->bottom(), space->top(),
duke@435 1650 new_top_addr);
duke@435 1651
duke@435 1652 // Clear the source_chunk field for each chunk in the space.
jcoomes@701 1653 HeapWord* const new_top = _space_info[id].new_top();
jcoomes@701 1654 HeapWord* const clear_end = _summary_data.chunk_align_up(new_top);
duke@435 1655 ChunkData* beg_chunk = _summary_data.addr_to_chunk_ptr(space->bottom());
jcoomes@701 1656 ChunkData* end_chunk = _summary_data.addr_to_chunk_ptr(clear_end);
jcoomes@701 1657 while (beg_chunk < end_chunk) {
duke@435 1658 beg_chunk->set_source_chunk(0);
duke@435 1659 ++beg_chunk;
duke@435 1660 }
jcoomes@701 1661
jcoomes@701 1662 // Reset the new_top value for the space.
jcoomes@701 1663 _space_info[id].set_new_top(space->bottom());
duke@435 1664 }
duke@435 1665 }
duke@435 1666
duke@435 1667 // Fill in the block data after any changes to the chunks have
duke@435 1668 // been made.
duke@435 1669 #ifdef ASSERT
duke@435 1670 summarize_blocks(cm, perm_space_id);
duke@435 1671 summarize_blocks(cm, old_space_id);
duke@435 1672 #else
duke@435 1673 if (!UseParallelOldGCChunkPointerCalc) {
duke@435 1674 summarize_blocks(cm, perm_space_id);
duke@435 1675 summarize_blocks(cm, old_space_id);
duke@435 1676 }
duke@435 1677 #endif
duke@435 1678
duke@435 1679 if (TraceParallelOldGCSummaryPhase) {
duke@435 1680 tty->print_cr("summary_phase: after final summarization");
duke@435 1681 Universe::print();
duke@435 1682 NOT_PRODUCT(print_chunk_ranges());
duke@435 1683 if (Verbose) {
duke@435 1684 NOT_PRODUCT(print_generic_summary_data(_summary_data, _space_info));
duke@435 1685 }
duke@435 1686 }
duke@435 1687 }
duke@435 1688
duke@435 1689 // Fill in the BlockData.
duke@435 1690 // Iterate over the spaces and within each space iterate over
duke@435 1691 // the chunks and fill in the BlockData for each chunk.
duke@435 1692
duke@435 1693 void PSParallelCompact::summarize_blocks(ParCompactionManager* cm,
duke@435 1694 SpaceId first_compaction_space_id) {
duke@435 1695 #if 0
duke@435 1696 DEBUG_ONLY(ParallelCompactData::BlockData::set_cur_phase(1);)
duke@435 1697 for (SpaceId cur_space_id = first_compaction_space_id;
duke@435 1698 cur_space_id != last_space_id;
duke@435 1699 cur_space_id = next_compaction_space_id(cur_space_id)) {
duke@435 1700 // Iterate over the chunks in the space
duke@435 1701 size_t start_chunk_index =
duke@435 1702 _summary_data.addr_to_chunk_idx(space(cur_space_id)->bottom());
duke@435 1703 BitBlockUpdateClosure bbu(mark_bitmap(),
duke@435 1704 cm,
duke@435 1705 start_chunk_index);
duke@435 1706 // Iterate over blocks.
duke@435 1707 for (size_t chunk_index = start_chunk_index;
duke@435 1708 chunk_index < _summary_data.chunk_count() &&
duke@435 1709 _summary_data.chunk_to_addr(chunk_index) < space(cur_space_id)->top();
duke@435 1710 chunk_index++) {
duke@435 1711
duke@435 1712 // Reset the closure for the new chunk. Note that the closure
duke@435 1713 // maintains some data that does not get reset for each chunk
duke@435 1714 // so a new instance of the closure is no appropriate.
duke@435 1715 bbu.reset_chunk(chunk_index);
duke@435 1716
duke@435 1717 // Start the iteration with the first live object. This
duke@435 1718 // may return the end of the chunk. That is acceptable since
duke@435 1719 // it will properly limit the iterations.
duke@435 1720 ParMarkBitMap::idx_t left_offset = mark_bitmap()->addr_to_bit(
duke@435 1721 _summary_data.first_live_or_end_in_chunk(chunk_index));
duke@435 1722
duke@435 1723 // End the iteration at the end of the chunk.
duke@435 1724 HeapWord* chunk_addr = _summary_data.chunk_to_addr(chunk_index);
duke@435 1725 HeapWord* chunk_end = chunk_addr + ParallelCompactData::ChunkSize;
duke@435 1726 ParMarkBitMap::idx_t right_offset =
duke@435 1727 mark_bitmap()->addr_to_bit(chunk_end);
duke@435 1728
duke@435 1729 // Blocks that have not objects starting in them can be
duke@435 1730 // skipped because their data will never be used.
duke@435 1731 if (left_offset < right_offset) {
duke@435 1732
duke@435 1733 // Iterate through the objects in the chunk.
duke@435 1734 ParMarkBitMap::idx_t last_offset =
duke@435 1735 mark_bitmap()->pair_iterate(&bbu, left_offset, right_offset);
duke@435 1736
duke@435 1737 // If last_offset is less than right_offset, then the iterations
duke@435 1738 // terminated while it was looking for an end bit. "last_offset"
duke@435 1739 // is then the offset for the last start bit. In this situation
duke@435 1740 // the "offset" field for the next block to the right (_cur_block + 1)
duke@435 1741 // will not have been update although there may be live data
duke@435 1742 // to the left of the chunk.
duke@435 1743
duke@435 1744 size_t cur_block_plus_1 = bbu.cur_block() + 1;
duke@435 1745 HeapWord* cur_block_plus_1_addr =
duke@435 1746 _summary_data.block_to_addr(bbu.cur_block()) +
duke@435 1747 ParallelCompactData::BlockSize;
duke@435 1748 HeapWord* last_offset_addr = mark_bitmap()->bit_to_addr(last_offset);
duke@435 1749 #if 1 // This code works. The else doesn't but should. Why does it?
duke@435 1750 // The current block (cur_block()) has already been updated.
duke@435 1751 // The last block that may need to be updated is either the
duke@435 1752 // next block (current block + 1) or the block where the
duke@435 1753 // last object starts (which can be greater than the
duke@435 1754 // next block if there were no objects found in intervening
duke@435 1755 // blocks).
duke@435 1756 size_t last_block =
duke@435 1757 MAX2(bbu.cur_block() + 1,
duke@435 1758 _summary_data.addr_to_block_idx(last_offset_addr));
duke@435 1759 #else
duke@435 1760 // The current block has already been updated. The only block
duke@435 1761 // that remains to be updated is the block where the last
duke@435 1762 // object in the chunk starts.
duke@435 1763 size_t last_block = _summary_data.addr_to_block_idx(last_offset_addr);
duke@435 1764 #endif
duke@435 1765 assert_bit_is_start(last_offset);
duke@435 1766 assert((last_block == _summary_data.block_count()) ||
duke@435 1767 (_summary_data.block(last_block)->raw_offset() == 0),
duke@435 1768 "Should not have been set");
duke@435 1769 // Is the last block still in the current chunk? If still
duke@435 1770 // in this chunk, update the last block (the counting that
duke@435 1771 // included the current block is meant for the offset of the last
duke@435 1772 // block). If not in this chunk, do nothing. Should not
duke@435 1773 // update a block in the next chunk.
duke@435 1774 if (ParallelCompactData::chunk_contains_block(bbu.chunk_index(),
duke@435 1775 last_block)) {
duke@435 1776 if (last_offset < right_offset) {
duke@435 1777 // The last object started in this chunk but ends beyond
duke@435 1778 // this chunk. Update the block for this last object.
duke@435 1779 assert(mark_bitmap()->is_marked(last_offset), "Should be marked");
duke@435 1780 // No end bit was found. The closure takes care of
duke@435 1781 // the cases where
duke@435 1782 // an objects crosses over into the next block
duke@435 1783 // an objects starts and ends in the next block
duke@435 1784 // It does not handle the case where an object is
duke@435 1785 // the first object in a later block and extends
duke@435 1786 // past the end of the chunk (i.e., the closure
duke@435 1787 // only handles complete objects that are in the range
duke@435 1788 // it is given). That object is handed back here
duke@435 1789 // for any special consideration necessary.
duke@435 1790 //
duke@435 1791 // Is the first bit in the last block a start or end bit?
duke@435 1792 //
duke@435 1793 // If the partial object ends in the last block L,
duke@435 1794 // then the 1st bit in L may be an end bit.
duke@435 1795 //
duke@435 1796 // Else does the last object start in a block after the current
duke@435 1797 // block? A block AA will already have been updated if an
duke@435 1798 // object ends in the next block AA+1. An object found to end in
duke@435 1799 // the AA+1 is the trigger that updates AA. Objects are being
duke@435 1800 // counted in the current block for updaing a following
duke@435 1801 // block. An object may start in later block
duke@435 1802 // block but may extend beyond the last block in the chunk.
duke@435 1803 // Updates are only done when the end of an object has been
duke@435 1804 // found. If the last object (covered by block L) starts
duke@435 1805 // beyond the current block, then no object ends in L (otherwise
duke@435 1806 // L would be the current block). So the first bit in L is
duke@435 1807 // a start bit.
duke@435 1808 //
duke@435 1809 // Else the last objects start in the current block and ends
duke@435 1810 // beyond the chunk. The current block has already been
duke@435 1811 // updated and there is no later block (with an object
duke@435 1812 // starting in it) that needs to be updated.
duke@435 1813 //
duke@435 1814 if (_summary_data.partial_obj_ends_in_block(last_block)) {
duke@435 1815 _summary_data.block(last_block)->set_end_bit_offset(
duke@435 1816 bbu.live_data_left());
duke@435 1817 } else if (last_offset_addr >= cur_block_plus_1_addr) {
duke@435 1818 // The start of the object is on a later block
duke@435 1819 // (to the right of the current block and there are no
duke@435 1820 // complete live objects to the left of this last object
duke@435 1821 // within the chunk.
duke@435 1822 // The first bit in the block is for the start of the
duke@435 1823 // last object.
duke@435 1824 _summary_data.block(last_block)->set_start_bit_offset(
duke@435 1825 bbu.live_data_left());
duke@435 1826 } else {
duke@435 1827 // The start of the last object was found in
duke@435 1828 // the current chunk (which has already
duke@435 1829 // been updated).
duke@435 1830 assert(bbu.cur_block() ==
duke@435 1831 _summary_data.addr_to_block_idx(last_offset_addr),
duke@435 1832 "Should be a block already processed");
duke@435 1833 }
duke@435 1834 #ifdef ASSERT
duke@435 1835 // Is there enough block information to find this object?
duke@435 1836 // The destination of the chunk has not been set so the
duke@435 1837 // values returned by calc_new_pointer() and
duke@435 1838 // block_calc_new_pointer() will only be
duke@435 1839 // offsets. But they should agree.
duke@435 1840 HeapWord* moved_obj_with_chunks =
duke@435 1841 _summary_data.chunk_calc_new_pointer(last_offset_addr);
duke@435 1842 HeapWord* moved_obj_with_blocks =
duke@435 1843 _summary_data.calc_new_pointer(last_offset_addr);
duke@435 1844 assert(moved_obj_with_chunks == moved_obj_with_blocks,
duke@435 1845 "Block calculation is wrong");
duke@435 1846 #endif
duke@435 1847 } else if (last_block < _summary_data.block_count()) {
duke@435 1848 // Iterations ended looking for a start bit (but
duke@435 1849 // did not run off the end of the block table).
duke@435 1850 _summary_data.block(last_block)->set_start_bit_offset(
duke@435 1851 bbu.live_data_left());
duke@435 1852 }
duke@435 1853 }
duke@435 1854 #ifdef ASSERT
duke@435 1855 // Is there enough block information to find this object?
duke@435 1856 HeapWord* left_offset_addr = mark_bitmap()->bit_to_addr(left_offset);
duke@435 1857 HeapWord* moved_obj_with_chunks =
duke@435 1858 _summary_data.calc_new_pointer(left_offset_addr);
duke@435 1859 HeapWord* moved_obj_with_blocks =
duke@435 1860 _summary_data.calc_new_pointer(left_offset_addr);
duke@435 1861 assert(moved_obj_with_chunks == moved_obj_with_blocks,
duke@435 1862 "Block calculation is wrong");
duke@435 1863 #endif
duke@435 1864
duke@435 1865 // Is there another block after the end of this chunk?
duke@435 1866 #ifdef ASSERT
duke@435 1867 if (last_block < _summary_data.block_count()) {
duke@435 1868 // No object may have been found in a block. If that
duke@435 1869 // block is at the end of the chunk, the iteration will
duke@435 1870 // terminate without incrementing the current block so
duke@435 1871 // that the current block is not the last block in the
duke@435 1872 // chunk. That situation precludes asserting that the
duke@435 1873 // current block is the last block in the chunk. Assert
duke@435 1874 // the lesser condition that the current block does not
duke@435 1875 // exceed the chunk.
duke@435 1876 assert(_summary_data.block_to_addr(last_block) <=
duke@435 1877 (_summary_data.chunk_to_addr(chunk_index) +
duke@435 1878 ParallelCompactData::ChunkSize),
duke@435 1879 "Chunk and block inconsistency");
duke@435 1880 assert(last_offset <= right_offset, "Iteration over ran end");
duke@435 1881 }
duke@435 1882 #endif
duke@435 1883 }
duke@435 1884 #ifdef ASSERT
duke@435 1885 if (PrintGCDetails && Verbose) {
duke@435 1886 if (_summary_data.chunk(chunk_index)->partial_obj_size() == 1) {
duke@435 1887 size_t first_block =
duke@435 1888 chunk_index / ParallelCompactData::BlocksPerChunk;
duke@435 1889 gclog_or_tty->print_cr("first_block " PTR_FORMAT
duke@435 1890 " _offset " PTR_FORMAT
duke@435 1891 "_first_is_start_bit %d",
duke@435 1892 first_block,
duke@435 1893 _summary_data.block(first_block)->raw_offset(),
duke@435 1894 _summary_data.block(first_block)->first_is_start_bit());
duke@435 1895 }
duke@435 1896 }
duke@435 1897 #endif
duke@435 1898 }
duke@435 1899 }
duke@435 1900 DEBUG_ONLY(ParallelCompactData::BlockData::set_cur_phase(16);)
duke@435 1901 #endif // #if 0
duke@435 1902 }
duke@435 1903
duke@435 1904 // This method should contain all heap-specific policy for invoking a full
duke@435 1905 // collection. invoke_no_policy() will only attempt to compact the heap; it
duke@435 1906 // will do nothing further. If we need to bail out for policy reasons, scavenge
duke@435 1907 // before full gc, or any other specialized behavior, it needs to be added here.
duke@435 1908 //
duke@435 1909 // Note that this method should only be called from the vm_thread while at a
duke@435 1910 // safepoint.
duke@435 1911 void PSParallelCompact::invoke(bool maximum_heap_compaction) {
duke@435 1912 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
duke@435 1913 assert(Thread::current() == (Thread*)VMThread::vm_thread(),
duke@435 1914 "should be in vm thread");
duke@435 1915 ParallelScavengeHeap* heap = gc_heap();
duke@435 1916 GCCause::Cause gc_cause = heap->gc_cause();
duke@435 1917 assert(!heap->is_gc_active(), "not reentrant");
duke@435 1918
duke@435 1919 PSAdaptiveSizePolicy* policy = heap->size_policy();
duke@435 1920
duke@435 1921 // Before each allocation/collection attempt, find out from the
duke@435 1922 // policy object if GCs are, on the whole, taking too long. If so,
duke@435 1923 // bail out without attempting a collection. The exceptions are
duke@435 1924 // for explicitly requested GC's.
duke@435 1925 if (!policy->gc_time_limit_exceeded() ||
duke@435 1926 GCCause::is_user_requested_gc(gc_cause) ||
duke@435 1927 GCCause::is_serviceability_requested_gc(gc_cause)) {
duke@435 1928 IsGCActiveMark mark;
duke@435 1929
duke@435 1930 if (ScavengeBeforeFullGC) {
duke@435 1931 PSScavenge::invoke_no_policy();
duke@435 1932 }
duke@435 1933
duke@435 1934 PSParallelCompact::invoke_no_policy(maximum_heap_compaction);
duke@435 1935 }
duke@435 1936 }
duke@435 1937
duke@435 1938 bool ParallelCompactData::chunk_contains(size_t chunk_index, HeapWord* addr) {
duke@435 1939 size_t addr_chunk_index = addr_to_chunk_idx(addr);
duke@435 1940 return chunk_index == addr_chunk_index;
duke@435 1941 }
duke@435 1942
duke@435 1943 bool ParallelCompactData::chunk_contains_block(size_t chunk_index,
duke@435 1944 size_t block_index) {
duke@435 1945 size_t first_block_in_chunk = chunk_index * BlocksPerChunk;
duke@435 1946 size_t last_block_in_chunk = (chunk_index + 1) * BlocksPerChunk - 1;
duke@435 1947
duke@435 1948 return (first_block_in_chunk <= block_index) &&
duke@435 1949 (block_index <= last_block_in_chunk);
duke@435 1950 }
duke@435 1951
duke@435 1952 // This method contains no policy. You should probably
duke@435 1953 // be calling invoke() instead.
duke@435 1954 void PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
duke@435 1955 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
duke@435 1956 assert(ref_processor() != NULL, "Sanity");
duke@435 1957
apetrusenko@574 1958 if (GC_locker::check_active_before_gc()) {
duke@435 1959 return;
duke@435 1960 }
duke@435 1961
duke@435 1962 TimeStamp marking_start;
duke@435 1963 TimeStamp compaction_start;
duke@435 1964 TimeStamp collection_exit;
duke@435 1965
duke@435 1966 ParallelScavengeHeap* heap = gc_heap();
duke@435 1967 GCCause::Cause gc_cause = heap->gc_cause();
duke@435 1968 PSYoungGen* young_gen = heap->young_gen();
duke@435 1969 PSOldGen* old_gen = heap->old_gen();
duke@435 1970 PSPermGen* perm_gen = heap->perm_gen();
duke@435 1971 PSAdaptiveSizePolicy* size_policy = heap->size_policy();
duke@435 1972
jmasa@698 1973 if (ZapUnusedHeapArea) {
jmasa@698 1974 // Save information needed to minimize mangling
jmasa@698 1975 heap->record_gen_tops_before_GC();
jmasa@698 1976 }
jmasa@698 1977
duke@435 1978 _print_phases = PrintGCDetails && PrintParallelOldGCPhaseTimes;
duke@435 1979
duke@435 1980 // Make sure data structures are sane, make the heap parsable, and do other
duke@435 1981 // miscellaneous bookkeeping.
duke@435 1982 PreGCValues pre_gc_values;
duke@435 1983 pre_compact(&pre_gc_values);
duke@435 1984
jcoomes@645 1985 // Get the compaction manager reserved for the VM thread.
jcoomes@645 1986 ParCompactionManager* const vmthread_cm =
jcoomes@645 1987 ParCompactionManager::manager_array(gc_task_manager()->workers());
jcoomes@645 1988
duke@435 1989 // Place after pre_compact() where the number of invocations is incremented.
duke@435 1990 AdaptiveSizePolicyOutput(size_policy, heap->total_collections());
duke@435 1991
duke@435 1992 {
duke@435 1993 ResourceMark rm;
duke@435 1994 HandleMark hm;
duke@435 1995
duke@435 1996 const bool is_system_gc = gc_cause == GCCause::_java_lang_system_gc;
duke@435 1997
duke@435 1998 // This is useful for debugging but don't change the output the
duke@435 1999 // the customer sees.
duke@435 2000 const char* gc_cause_str = "Full GC";
duke@435 2001 if (is_system_gc && PrintGCDetails) {
duke@435 2002 gc_cause_str = "Full GC (System)";
duke@435 2003 }
duke@435 2004 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
duke@435 2005 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
duke@435 2006 TraceTime t1(gc_cause_str, PrintGC, !PrintGCDetails, gclog_or_tty);
duke@435 2007 TraceCollectorStats tcs(counters());
duke@435 2008 TraceMemoryManagerStats tms(true /* Full GC */);
duke@435 2009
duke@435 2010 if (TraceGen1Time) accumulated_time()->start();
duke@435 2011
duke@435 2012 // Let the size policy know we're starting
duke@435 2013 size_policy->major_collection_begin();
duke@435 2014
duke@435 2015 // When collecting the permanent generation methodOops may be moving,
duke@435 2016 // so we either have to flush all bcp data or convert it into bci.
duke@435 2017 CodeCache::gc_prologue();
duke@435 2018 Threads::gc_prologue();
duke@435 2019
duke@435 2020 NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
duke@435 2021 COMPILER2_PRESENT(DerivedPointerTable::clear());
duke@435 2022
duke@435 2023 ref_processor()->enable_discovery();
duke@435 2024
duke@435 2025 bool marked_for_unloading = false;
duke@435 2026
duke@435 2027 marking_start.update();
jcoomes@645 2028 marking_phase(vmthread_cm, maximum_heap_compaction);
duke@435 2029
duke@435 2030 #ifndef PRODUCT
duke@435 2031 if (TraceParallelOldGCMarkingPhase) {
duke@435 2032 gclog_or_tty->print_cr("marking_phase: cas_tries %d cas_retries %d "
duke@435 2033 "cas_by_another %d",
duke@435 2034 mark_bitmap()->cas_tries(), mark_bitmap()->cas_retries(),
duke@435 2035 mark_bitmap()->cas_by_another());
duke@435 2036 }
duke@435 2037 #endif // #ifndef PRODUCT
duke@435 2038
duke@435 2039 #ifdef ASSERT
duke@435 2040 if (VerifyParallelOldWithMarkSweep &&
duke@435 2041 (PSParallelCompact::total_invocations() %
duke@435 2042 VerifyParallelOldWithMarkSweepInterval) == 0) {
duke@435 2043 gclog_or_tty->print_cr("Verify marking with mark_sweep_phase1()");
duke@435 2044 if (PrintGCDetails && Verbose) {
duke@435 2045 gclog_or_tty->print_cr("mark_sweep_phase1:");
duke@435 2046 }
duke@435 2047 // Clear the discovered lists so that discovered objects
duke@435 2048 // don't look like they have been discovered twice.
duke@435 2049 ref_processor()->clear_discovered_references();
duke@435 2050
duke@435 2051 PSMarkSweep::allocate_stacks();
duke@435 2052 MemRegion mr = Universe::heap()->reserved_region();
duke@435 2053 PSMarkSweep::ref_processor()->enable_discovery();
duke@435 2054 PSMarkSweep::mark_sweep_phase1(maximum_heap_compaction);
duke@435 2055 }
duke@435 2056 #endif
duke@435 2057
duke@435 2058 bool max_on_system_gc = UseMaximumCompactionOnSystemGC && is_system_gc;
jcoomes@645 2059 summary_phase(vmthread_cm, maximum_heap_compaction || max_on_system_gc);
duke@435 2060
duke@435 2061 #ifdef ASSERT
duke@435 2062 if (VerifyParallelOldWithMarkSweep &&
duke@435 2063 (PSParallelCompact::total_invocations() %
duke@435 2064 VerifyParallelOldWithMarkSweepInterval) == 0) {
duke@435 2065 if (PrintGCDetails && Verbose) {
duke@435 2066 gclog_or_tty->print_cr("mark_sweep_phase2:");
duke@435 2067 }
duke@435 2068 PSMarkSweep::mark_sweep_phase2();
duke@435 2069 }
duke@435 2070 #endif
duke@435 2071
duke@435 2072 COMPILER2_PRESENT(assert(DerivedPointerTable::is_active(), "Sanity"));
duke@435 2073 COMPILER2_PRESENT(DerivedPointerTable::set_active(false));
duke@435 2074
duke@435 2075 // adjust_roots() updates Universe::_intArrayKlassObj which is
duke@435 2076 // needed by the compaction for filling holes in the dense prefix.
duke@435 2077 adjust_roots();
duke@435 2078
duke@435 2079 #ifdef ASSERT
duke@435 2080 if (VerifyParallelOldWithMarkSweep &&
duke@435 2081 (PSParallelCompact::total_invocations() %
duke@435 2082 VerifyParallelOldWithMarkSweepInterval) == 0) {
duke@435 2083 // Do a separate verify phase so that the verify
duke@435 2084 // code can use the the forwarding pointers to
duke@435 2085 // check the new pointer calculation. The restore_marks()
duke@435 2086 // has to be done before the real compact.
jcoomes@645 2087 vmthread_cm->set_action(ParCompactionManager::VerifyUpdate);
jcoomes@645 2088 compact_perm(vmthread_cm);
jcoomes@645 2089 compact_serial(vmthread_cm);
jcoomes@645 2090 vmthread_cm->set_action(ParCompactionManager::ResetObjects);
jcoomes@645 2091 compact_perm(vmthread_cm);
jcoomes@645 2092 compact_serial(vmthread_cm);
jcoomes@645 2093 vmthread_cm->set_action(ParCompactionManager::UpdateAndCopy);
duke@435 2094
duke@435 2095 // For debugging only
duke@435 2096 PSMarkSweep::restore_marks();
duke@435 2097 PSMarkSweep::deallocate_stacks();
duke@435 2098 }
duke@435 2099 #endif
duke@435 2100
duke@435 2101 compaction_start.update();
duke@435 2102 // Does the perm gen always have to be done serially because
duke@435 2103 // klasses are used in the update of an object?
jcoomes@645 2104 compact_perm(vmthread_cm);
duke@435 2105
duke@435 2106 if (UseParallelOldGCCompacting) {
duke@435 2107 compact();
duke@435 2108 } else {
jcoomes@645 2109 compact_serial(vmthread_cm);
duke@435 2110 }
duke@435 2111
duke@435 2112 // Reset the mark bitmap, summary data, and do other bookkeeping. Must be
duke@435 2113 // done before resizing.
duke@435 2114 post_compact();
duke@435 2115
duke@435 2116 // Let the size policy know we're done
duke@435 2117 size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause);
duke@435 2118
duke@435 2119 if (UseAdaptiveSizePolicy) {
duke@435 2120 if (PrintAdaptiveSizePolicy) {
duke@435 2121 gclog_or_tty->print("AdaptiveSizeStart: ");
duke@435 2122 gclog_or_tty->stamp();
duke@435 2123 gclog_or_tty->print_cr(" collection: %d ",
duke@435 2124 heap->total_collections());
duke@435 2125 if (Verbose) {
duke@435 2126 gclog_or_tty->print("old_gen_capacity: %d young_gen_capacity: %d"
duke@435 2127 " perm_gen_capacity: %d ",
duke@435 2128 old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes(),
duke@435 2129 perm_gen->capacity_in_bytes());
duke@435 2130 }
duke@435 2131 }
duke@435 2132
duke@435 2133 // Don't check if the size_policy is ready here. Let
duke@435 2134 // the size_policy check that internally.
duke@435 2135 if (UseAdaptiveGenerationSizePolicyAtMajorCollection &&
duke@435 2136 ((gc_cause != GCCause::_java_lang_system_gc) ||
duke@435 2137 UseAdaptiveSizePolicyWithSystemGC)) {
duke@435 2138 // Calculate optimal free space amounts
duke@435 2139 assert(young_gen->max_size() >
duke@435 2140 young_gen->from_space()->capacity_in_bytes() +
duke@435 2141 young_gen->to_space()->capacity_in_bytes(),
duke@435 2142 "Sizes of space in young gen are out-of-bounds");
duke@435 2143 size_t max_eden_size = young_gen->max_size() -
duke@435 2144 young_gen->from_space()->capacity_in_bytes() -
duke@435 2145 young_gen->to_space()->capacity_in_bytes();
jmasa@698 2146 size_policy->compute_generation_free_space(
jmasa@698 2147 young_gen->used_in_bytes(),
jmasa@698 2148 young_gen->eden_space()->used_in_bytes(),
jmasa@698 2149 old_gen->used_in_bytes(),
jmasa@698 2150 perm_gen->used_in_bytes(),
jmasa@698 2151 young_gen->eden_space()->capacity_in_bytes(),
jmasa@698 2152 old_gen->max_gen_size(),
jmasa@698 2153 max_eden_size,
jmasa@698 2154 true /* full gc*/,
jmasa@698 2155 gc_cause);
jmasa@698 2156
jmasa@698 2157 heap->resize_old_gen(
jmasa@698 2158 size_policy->calculated_old_free_size_in_bytes());
duke@435 2159
duke@435 2160 // Don't resize the young generation at an major collection. A
duke@435 2161 // desired young generation size may have been calculated but
duke@435 2162 // resizing the young generation complicates the code because the
duke@435 2163 // resizing of the old generation may have moved the boundary
duke@435 2164 // between the young generation and the old generation. Let the
duke@435 2165 // young generation resizing happen at the minor collections.
duke@435 2166 }
duke@435 2167 if (PrintAdaptiveSizePolicy) {
duke@435 2168 gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ",
duke@435 2169 heap->total_collections());
duke@435 2170 }
duke@435 2171 }
duke@435 2172
duke@435 2173 if (UsePerfData) {
duke@435 2174 PSGCAdaptivePolicyCounters* const counters = heap->gc_policy_counters();
duke@435 2175 counters->update_counters();
duke@435 2176 counters->update_old_capacity(old_gen->capacity_in_bytes());
duke@435 2177 counters->update_young_capacity(young_gen->capacity_in_bytes());
duke@435 2178 }
duke@435 2179
duke@435 2180 heap->resize_all_tlabs();
duke@435 2181
duke@435 2182 // We collected the perm gen, so we'll resize it here.
duke@435 2183 perm_gen->compute_new_size(pre_gc_values.perm_gen_used());
duke@435 2184
duke@435 2185 if (TraceGen1Time) accumulated_time()->stop();
duke@435 2186
duke@435 2187 if (PrintGC) {
duke@435 2188 if (PrintGCDetails) {
duke@435 2189 // No GC timestamp here. This is after GC so it would be confusing.
duke@435 2190 young_gen->print_used_change(pre_gc_values.young_gen_used());
duke@435 2191 old_gen->print_used_change(pre_gc_values.old_gen_used());
duke@435 2192 heap->print_heap_change(pre_gc_values.heap_used());
duke@435 2193 // Print perm gen last (print_heap_change() excludes the perm gen).
duke@435 2194 perm_gen->print_used_change(pre_gc_values.perm_gen_used());
duke@435 2195 } else {
duke@435 2196 heap->print_heap_change(pre_gc_values.heap_used());
duke@435 2197 }
duke@435 2198 }
duke@435 2199
duke@435 2200 // Track memory usage and detect low memory
duke@435 2201 MemoryService::track_memory_usage();
duke@435 2202 heap->update_counters();
duke@435 2203
duke@435 2204 if (PrintGCDetails) {
duke@435 2205 if (size_policy->print_gc_time_limit_would_be_exceeded()) {
duke@435 2206 if (size_policy->gc_time_limit_exceeded()) {
duke@435 2207 gclog_or_tty->print_cr(" GC time is exceeding GCTimeLimit "
duke@435 2208 "of %d%%", GCTimeLimit);
duke@435 2209 } else {
duke@435 2210 gclog_or_tty->print_cr(" GC time would exceed GCTimeLimit "
duke@435 2211 "of %d%%", GCTimeLimit);
duke@435 2212 }
duke@435 2213 }
duke@435 2214 size_policy->set_print_gc_time_limit_would_be_exceeded(false);
duke@435 2215 }
duke@435 2216 }
duke@435 2217
duke@435 2218 if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
duke@435 2219 HandleMark hm; // Discard invalid handles created during verification
duke@435 2220 gclog_or_tty->print(" VerifyAfterGC:");
duke@435 2221 Universe::verify(false);
duke@435 2222 }
duke@435 2223
duke@435 2224 // Re-verify object start arrays
duke@435 2225 if (VerifyObjectStartArray &&
duke@435 2226 VerifyAfterGC) {
duke@435 2227 old_gen->verify_object_start_array();
duke@435 2228 perm_gen->verify_object_start_array();
duke@435 2229 }
duke@435 2230
jmasa@698 2231 if (ZapUnusedHeapArea) {
jmasa@698 2232 old_gen->object_space()->check_mangled_unused_area_complete();
jmasa@698 2233 perm_gen->object_space()->check_mangled_unused_area_complete();
jmasa@698 2234 }
jmasa@698 2235
duke@435 2236 NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
duke@435 2237
duke@435 2238 collection_exit.update();
duke@435 2239
duke@435 2240 if (PrintHeapAtGC) {
duke@435 2241 Universe::print_heap_after_gc();
duke@435 2242 }
duke@435 2243 if (PrintGCTaskTimeStamps) {
duke@435 2244 gclog_or_tty->print_cr("VM-Thread " INT64_FORMAT " " INT64_FORMAT " "
duke@435 2245 INT64_FORMAT,
duke@435 2246 marking_start.ticks(), compaction_start.ticks(),
duke@435 2247 collection_exit.ticks());
duke@435 2248 gc_task_manager()->print_task_time_stamps();
duke@435 2249 }
duke@435 2250 }
duke@435 2251
duke@435 2252 bool PSParallelCompact::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
duke@435 2253 PSYoungGen* young_gen,
duke@435 2254 PSOldGen* old_gen) {
duke@435 2255 MutableSpace* const eden_space = young_gen->eden_space();
duke@435 2256 assert(!eden_space->is_empty(), "eden must be non-empty");
duke@435 2257 assert(young_gen->virtual_space()->alignment() ==
duke@435 2258 old_gen->virtual_space()->alignment(), "alignments do not match");
duke@435 2259
duke@435 2260 if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary)) {
duke@435 2261 return false;
duke@435 2262 }
duke@435 2263
duke@435 2264 // Both generations must be completely committed.
duke@435 2265 if (young_gen->virtual_space()->uncommitted_size() != 0) {
duke@435 2266 return false;
duke@435 2267 }
duke@435 2268 if (old_gen->virtual_space()->uncommitted_size() != 0) {
duke@435 2269 return false;
duke@435 2270 }
duke@435 2271
duke@435 2272 // Figure out how much to take from eden. Include the average amount promoted
duke@435 2273 // in the total; otherwise the next young gen GC will simply bail out to a
duke@435 2274 // full GC.
duke@435 2275 const size_t alignment = old_gen->virtual_space()->alignment();
duke@435 2276 const size_t eden_used = eden_space->used_in_bytes();
duke@435 2277 const size_t promoted = (size_t)size_policy->avg_promoted()->padded_average();
duke@435 2278 const size_t absorb_size = align_size_up(eden_used + promoted, alignment);
duke@435 2279 const size_t eden_capacity = eden_space->capacity_in_bytes();
duke@435 2280
duke@435 2281 if (absorb_size >= eden_capacity) {
duke@435 2282 return false; // Must leave some space in eden.
duke@435 2283 }
duke@435 2284
duke@435 2285 const size_t new_young_size = young_gen->capacity_in_bytes() - absorb_size;
duke@435 2286 if (new_young_size < young_gen->min_gen_size()) {
duke@435 2287 return false; // Respect young gen minimum size.
duke@435 2288 }
duke@435 2289
duke@435 2290 if (TraceAdaptiveGCBoundary && Verbose) {
duke@435 2291 gclog_or_tty->print(" absorbing " SIZE_FORMAT "K: "
duke@435 2292 "eden " SIZE_FORMAT "K->" SIZE_FORMAT "K "
duke@435 2293 "from " SIZE_FORMAT "K, to " SIZE_FORMAT "K "
duke@435 2294 "young_gen " SIZE_FORMAT "K->" SIZE_FORMAT "K ",
duke@435 2295 absorb_size / K,
duke@435 2296 eden_capacity / K, (eden_capacity - absorb_size) / K,
duke@435 2297 young_gen->from_space()->used_in_bytes() / K,
duke@435 2298 young_gen->to_space()->used_in_bytes() / K,
duke@435 2299 young_gen->capacity_in_bytes() / K, new_young_size / K);
duke@435 2300 }
duke@435 2301
duke@435 2302 // Fill the unused part of the old gen.
duke@435 2303 MutableSpace* const old_space = old_gen->object_space();
duke@435 2304 MemRegion old_gen_unused(old_space->top(), old_space->end());
duke@435 2305 if (!old_gen_unused.is_empty()) {
duke@435 2306 SharedHeap::fill_region_with_object(old_gen_unused);
duke@435 2307 }
duke@435 2308
duke@435 2309 // Take the live data from eden and set both top and end in the old gen to
duke@435 2310 // eden top. (Need to set end because reset_after_change() mangles the region
duke@435 2311 // from end to virtual_space->high() in debug builds).
duke@435 2312 HeapWord* const new_top = eden_space->top();
duke@435 2313 old_gen->virtual_space()->expand_into(young_gen->virtual_space(),
duke@435 2314 absorb_size);
duke@435 2315 young_gen->reset_after_change();
duke@435 2316 old_space->set_top(new_top);
duke@435 2317 old_space->set_end(new_top);
duke@435 2318 old_gen->reset_after_change();
duke@435 2319
duke@435 2320 // Update the object start array for the filler object and the data from eden.
duke@435 2321 ObjectStartArray* const start_array = old_gen->start_array();
duke@435 2322 HeapWord* const start = old_gen_unused.start();
duke@435 2323 for (HeapWord* addr = start; addr < new_top; addr += oop(addr)->size()) {
duke@435 2324 start_array->allocate_block(addr);
duke@435 2325 }
duke@435 2326
duke@435 2327 // Could update the promoted average here, but it is not typically updated at
duke@435 2328 // full GCs and the value to use is unclear. Something like
duke@435 2329 //
duke@435 2330 // cur_promoted_avg + absorb_size / number_of_scavenges_since_last_full_gc.
duke@435 2331
duke@435 2332 size_policy->set_bytes_absorbed_from_eden(absorb_size);
duke@435 2333 return true;
duke@435 2334 }
duke@435 2335
duke@435 2336 GCTaskManager* const PSParallelCompact::gc_task_manager() {
duke@435 2337 assert(ParallelScavengeHeap::gc_task_manager() != NULL,
duke@435 2338 "shouldn't return NULL");
duke@435 2339 return ParallelScavengeHeap::gc_task_manager();
duke@435 2340 }
duke@435 2341
duke@435 2342 void PSParallelCompact::marking_phase(ParCompactionManager* cm,
duke@435 2343 bool maximum_heap_compaction) {
duke@435 2344 // Recursively traverse all live objects and mark them
duke@435 2345 EventMark m("1 mark object");
duke@435 2346 TraceTime tm("marking phase", print_phases(), true, gclog_or_tty);
duke@435 2347
duke@435 2348 ParallelScavengeHeap* heap = gc_heap();
duke@435 2349 uint parallel_gc_threads = heap->gc_task_manager()->workers();
duke@435 2350 TaskQueueSetSuper* qset = ParCompactionManager::chunk_array();
duke@435 2351 ParallelTaskTerminator terminator(parallel_gc_threads, qset);
duke@435 2352
duke@435 2353 PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
duke@435 2354 PSParallelCompact::FollowStackClosure follow_stack_closure(cm);
duke@435 2355
duke@435 2356 {
duke@435 2357 TraceTime tm_m("par mark", print_phases(), true, gclog_or_tty);
duke@435 2358
duke@435 2359 GCTaskQueue* q = GCTaskQueue::create();
duke@435 2360
duke@435 2361 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::universe));
duke@435 2362 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::jni_handles));
duke@435 2363 // We scan the thread roots in parallel
duke@435 2364 Threads::create_thread_roots_marking_tasks(q);
duke@435 2365 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::object_synchronizer));
duke@435 2366 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::flat_profiler));
duke@435 2367 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::management));
duke@435 2368 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::system_dictionary));
duke@435 2369 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::jvmti));
duke@435 2370 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::vm_symbols));
duke@435 2371
duke@435 2372 if (parallel_gc_threads > 1) {
duke@435 2373 for (uint j = 0; j < parallel_gc_threads; j++) {
duke@435 2374 q->enqueue(new StealMarkingTask(&terminator));
duke@435 2375 }
duke@435 2376 }
duke@435 2377
duke@435 2378 WaitForBarrierGCTask* fin = WaitForBarrierGCTask::create();
duke@435 2379 q->enqueue(fin);
duke@435 2380
duke@435 2381 gc_task_manager()->add_list(q);
duke@435 2382
duke@435 2383 fin->wait_for();
duke@435 2384
duke@435 2385 // We have to release the barrier tasks!
duke@435 2386 WaitForBarrierGCTask::destroy(fin);
duke@435 2387 }
duke@435 2388
duke@435 2389 // Process reference objects found during marking
duke@435 2390 {
duke@435 2391 TraceTime tm_r("reference processing", print_phases(), true, gclog_or_tty);
duke@435 2392 ReferencePolicy *soft_ref_policy;
duke@435 2393 if (maximum_heap_compaction) {
duke@435 2394 soft_ref_policy = new AlwaysClearPolicy();
duke@435 2395 } else {
duke@435 2396 #ifdef COMPILER2
duke@435 2397 soft_ref_policy = new LRUMaxHeapPolicy();
duke@435 2398 #else
duke@435 2399 soft_ref_policy = new LRUCurrentHeapPolicy();
duke@435 2400 #endif // COMPILER2
duke@435 2401 }
duke@435 2402 assert(soft_ref_policy != NULL, "No soft reference policy");
duke@435 2403 if (ref_processor()->processing_is_mt()) {
duke@435 2404 RefProcTaskExecutor task_executor;
duke@435 2405 ref_processor()->process_discovered_references(
duke@435 2406 soft_ref_policy, is_alive_closure(), &mark_and_push_closure,
duke@435 2407 &follow_stack_closure, &task_executor);
duke@435 2408 } else {
duke@435 2409 ref_processor()->process_discovered_references(
duke@435 2410 soft_ref_policy, is_alive_closure(), &mark_and_push_closure,
duke@435 2411 &follow_stack_closure, NULL);
duke@435 2412 }
duke@435 2413 }
duke@435 2414
duke@435 2415 TraceTime tm_c("class unloading", print_phases(), true, gclog_or_tty);
duke@435 2416 // Follow system dictionary roots and unload classes.
duke@435 2417 bool purged_class = SystemDictionary::do_unloading(is_alive_closure());
duke@435 2418
duke@435 2419 // Follow code cache roots.
duke@435 2420 CodeCache::do_unloading(is_alive_closure(), &mark_and_push_closure,
duke@435 2421 purged_class);
duke@435 2422 follow_stack(cm); // Flush marking stack.
duke@435 2423
duke@435 2424 // Update subklass/sibling/implementor links of live klasses
duke@435 2425 // revisit_klass_stack is used in follow_weak_klass_links().
duke@435 2426 follow_weak_klass_links(cm);
duke@435 2427
duke@435 2428 // Visit symbol and interned string tables and delete unmarked oops
duke@435 2429 SymbolTable::unlink(is_alive_closure());
duke@435 2430 StringTable::unlink(is_alive_closure());
duke@435 2431
duke@435 2432 assert(cm->marking_stack()->size() == 0, "stack should be empty by now");
duke@435 2433 assert(cm->overflow_stack()->is_empty(), "stack should be empty by now");
duke@435 2434 }
duke@435 2435
duke@435 2436 // This should be moved to the shared markSweep code!
duke@435 2437 class PSAlwaysTrueClosure: public BoolObjectClosure {
duke@435 2438 public:
duke@435 2439 void do_object(oop p) { ShouldNotReachHere(); }
duke@435 2440 bool do_object_b(oop p) { return true; }
duke@435 2441 };
duke@435 2442 static PSAlwaysTrueClosure always_true;
duke@435 2443
duke@435 2444 void PSParallelCompact::adjust_roots() {
duke@435 2445 // Adjust the pointers to reflect the new locations
duke@435 2446 EventMark m("3 adjust roots");
duke@435 2447 TraceTime tm("adjust roots", print_phases(), true, gclog_or_tty);
duke@435 2448
duke@435 2449 // General strong roots.
duke@435 2450 Universe::oops_do(adjust_root_pointer_closure());
duke@435 2451 ReferenceProcessor::oops_do(adjust_root_pointer_closure());
duke@435 2452 JNIHandles::oops_do(adjust_root_pointer_closure()); // Global (strong) JNI handles
duke@435 2453 Threads::oops_do(adjust_root_pointer_closure());
duke@435 2454 ObjectSynchronizer::oops_do(adjust_root_pointer_closure());
duke@435 2455 FlatProfiler::oops_do(adjust_root_pointer_closure());
duke@435 2456 Management::oops_do(adjust_root_pointer_closure());
duke@435 2457 JvmtiExport::oops_do(adjust_root_pointer_closure());
duke@435 2458 // SO_AllClasses
duke@435 2459 SystemDictionary::oops_do(adjust_root_pointer_closure());
duke@435 2460 vmSymbols::oops_do(adjust_root_pointer_closure());
duke@435 2461
duke@435 2462 // Now adjust pointers in remaining weak roots. (All of which should
duke@435 2463 // have been cleared if they pointed to non-surviving objects.)
duke@435 2464 // Global (weak) JNI handles
duke@435 2465 JNIHandles::weak_oops_do(&always_true, adjust_root_pointer_closure());
duke@435 2466
duke@435 2467 CodeCache::oops_do(adjust_pointer_closure());
duke@435 2468 SymbolTable::oops_do(adjust_root_pointer_closure());
duke@435 2469 StringTable::oops_do(adjust_root_pointer_closure());
duke@435 2470 ref_processor()->weak_oops_do(adjust_root_pointer_closure());
duke@435 2471 // Roots were visited so references into the young gen in roots
duke@435 2472 // may have been scanned. Process them also.
duke@435 2473 // Should the reference processor have a span that excludes
duke@435 2474 // young gen objects?
duke@435 2475 PSScavenge::reference_processor()->weak_oops_do(
duke@435 2476 adjust_root_pointer_closure());
duke@435 2477 }
duke@435 2478
duke@435 2479 void PSParallelCompact::compact_perm(ParCompactionManager* cm) {
duke@435 2480 EventMark m("4 compact perm");
duke@435 2481 TraceTime tm("compact perm gen", print_phases(), true, gclog_or_tty);
duke@435 2482 // trace("4");
duke@435 2483
duke@435 2484 gc_heap()->perm_gen()->start_array()->reset();
duke@435 2485 move_and_update(cm, perm_space_id);
duke@435 2486 }
duke@435 2487
duke@435 2488 void PSParallelCompact::enqueue_chunk_draining_tasks(GCTaskQueue* q,
duke@435 2489 uint parallel_gc_threads) {
duke@435 2490 TraceTime tm("drain task setup", print_phases(), true, gclog_or_tty);
duke@435 2491
duke@435 2492 const unsigned int task_count = MAX2(parallel_gc_threads, 1U);
duke@435 2493 for (unsigned int j = 0; j < task_count; j++) {
duke@435 2494 q->enqueue(new DrainStacksCompactionTask());
duke@435 2495 }
duke@435 2496
duke@435 2497 // Find all chunks that are available (can be filled immediately) and
duke@435 2498 // distribute them to the thread stacks. The iteration is done in reverse
duke@435 2499 // order (high to low) so the chunks will be removed in ascending order.
duke@435 2500
duke@435 2501 const ParallelCompactData& sd = PSParallelCompact::summary_data();
duke@435 2502
duke@435 2503 size_t fillable_chunks = 0; // A count for diagnostic purposes.
duke@435 2504 unsigned int which = 0; // The worker thread number.
duke@435 2505
duke@435 2506 for (unsigned int id = to_space_id; id > perm_space_id; --id) {
duke@435 2507 SpaceInfo* const space_info = _space_info + id;
duke@435 2508 MutableSpace* const space = space_info->space();
duke@435 2509 HeapWord* const new_top = space_info->new_top();
duke@435 2510
duke@435 2511 const size_t beg_chunk = sd.addr_to_chunk_idx(space_info->dense_prefix());
duke@435 2512 const size_t end_chunk = sd.addr_to_chunk_idx(sd.chunk_align_up(new_top));
duke@435 2513 assert(end_chunk > 0, "perm gen cannot be empty");
duke@435 2514
duke@435 2515 for (size_t cur = end_chunk - 1; cur >= beg_chunk; --cur) {
duke@435 2516 if (sd.chunk(cur)->claim_unsafe()) {
duke@435 2517 ParCompactionManager* cm = ParCompactionManager::manager_array(which);
duke@435 2518 cm->save_for_processing(cur);
duke@435 2519
duke@435 2520 if (TraceParallelOldGCCompactionPhase && Verbose) {
duke@435 2521 const size_t count_mod_8 = fillable_chunks & 7;
duke@435 2522 if (count_mod_8 == 0) gclog_or_tty->print("fillable: ");
jcoomes@699 2523 gclog_or_tty->print(" " SIZE_FORMAT_W(7), cur);
duke@435 2524 if (count_mod_8 == 7) gclog_or_tty->cr();
duke@435 2525 }
duke@435 2526
duke@435 2527 NOT_PRODUCT(++fillable_chunks;)
duke@435 2528
duke@435 2529 // Assign chunks to threads in round-robin fashion.
duke@435 2530 if (++which == task_count) {
duke@435 2531 which = 0;
duke@435 2532 }
duke@435 2533 }
duke@435 2534 }
duke@435 2535 }
duke@435 2536
duke@435 2537 if (TraceParallelOldGCCompactionPhase) {
duke@435 2538 if (Verbose && (fillable_chunks & 7) != 0) gclog_or_tty->cr();
duke@435 2539 gclog_or_tty->print_cr("%u initially fillable chunks", fillable_chunks);
duke@435 2540 }
duke@435 2541 }
duke@435 2542
duke@435 2543 #define PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING 4
duke@435 2544
duke@435 2545 void PSParallelCompact::enqueue_dense_prefix_tasks(GCTaskQueue* q,
duke@435 2546 uint parallel_gc_threads) {
duke@435 2547 TraceTime tm("dense prefix task setup", print_phases(), true, gclog_or_tty);
duke@435 2548
duke@435 2549 ParallelCompactData& sd = PSParallelCompact::summary_data();
duke@435 2550
duke@435 2551 // Iterate over all the spaces adding tasks for updating
duke@435 2552 // chunks in the dense prefix. Assume that 1 gc thread
duke@435 2553 // will work on opening the gaps and the remaining gc threads
duke@435 2554 // will work on the dense prefix.
duke@435 2555 SpaceId space_id = old_space_id;
duke@435 2556 while (space_id != last_space_id) {
duke@435 2557 HeapWord* const dense_prefix_end = _space_info[space_id].dense_prefix();
duke@435 2558 const MutableSpace* const space = _space_info[space_id].space();
duke@435 2559
duke@435 2560 if (dense_prefix_end == space->bottom()) {
duke@435 2561 // There is no dense prefix for this space.
duke@435 2562 space_id = next_compaction_space_id(space_id);
duke@435 2563 continue;
duke@435 2564 }
duke@435 2565
duke@435 2566 // The dense prefix is before this chunk.
duke@435 2567 size_t chunk_index_end_dense_prefix =
duke@435 2568 sd.addr_to_chunk_idx(dense_prefix_end);
duke@435 2569 ChunkData* const dense_prefix_cp = sd.chunk(chunk_index_end_dense_prefix);
duke@435 2570 assert(dense_prefix_end == space->end() ||
duke@435 2571 dense_prefix_cp->available() ||
duke@435 2572 dense_prefix_cp->claimed(),
duke@435 2573 "The chunk after the dense prefix should always be ready to fill");
duke@435 2574
duke@435 2575 size_t chunk_index_start = sd.addr_to_chunk_idx(space->bottom());
duke@435 2576
duke@435 2577 // Is there dense prefix work?
duke@435 2578 size_t total_dense_prefix_chunks =
duke@435 2579 chunk_index_end_dense_prefix - chunk_index_start;
duke@435 2580 // How many chunks of the dense prefix should be given to
duke@435 2581 // each thread?
duke@435 2582 if (total_dense_prefix_chunks > 0) {
duke@435 2583 uint tasks_for_dense_prefix = 1;
duke@435 2584 if (UseParallelDensePrefixUpdate) {
duke@435 2585 if (total_dense_prefix_chunks <=
duke@435 2586 (parallel_gc_threads * PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING)) {
duke@435 2587 // Don't over partition. This assumes that
duke@435 2588 // PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING is a small integer value
duke@435 2589 // so there are not many chunks to process.
duke@435 2590 tasks_for_dense_prefix = parallel_gc_threads;
duke@435 2591 } else {
duke@435 2592 // Over partition
duke@435 2593 tasks_for_dense_prefix = parallel_gc_threads *
duke@435 2594 PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING;
duke@435 2595 }
duke@435 2596 }
duke@435 2597 size_t chunks_per_thread = total_dense_prefix_chunks /
duke@435 2598 tasks_for_dense_prefix;
duke@435 2599 // Give each thread at least 1 chunk.
duke@435 2600 if (chunks_per_thread == 0) {
duke@435 2601 chunks_per_thread = 1;
duke@435 2602 }
duke@435 2603
duke@435 2604 for (uint k = 0; k < tasks_for_dense_prefix; k++) {
duke@435 2605 if (chunk_index_start >= chunk_index_end_dense_prefix) {
duke@435 2606 break;
duke@435 2607 }
duke@435 2608 // chunk_index_end is not processed
duke@435 2609 size_t chunk_index_end = MIN2(chunk_index_start + chunks_per_thread,
duke@435 2610 chunk_index_end_dense_prefix);
duke@435 2611 q->enqueue(new UpdateDensePrefixTask(
duke@435 2612 space_id,
duke@435 2613 chunk_index_start,
duke@435 2614 chunk_index_end));
duke@435 2615 chunk_index_start = chunk_index_end;
duke@435 2616 }
duke@435 2617 }
duke@435 2618 // This gets any part of the dense prefix that did not
duke@435 2619 // fit evenly.
duke@435 2620 if (chunk_index_start < chunk_index_end_dense_prefix) {
duke@435 2621 q->enqueue(new UpdateDensePrefixTask(
duke@435 2622 space_id,
duke@435 2623 chunk_index_start,
duke@435 2624 chunk_index_end_dense_prefix));
duke@435 2625 }
duke@435 2626 space_id = next_compaction_space_id(space_id);
duke@435 2627 } // End tasks for dense prefix
duke@435 2628 }
duke@435 2629
duke@435 2630 void PSParallelCompact::enqueue_chunk_stealing_tasks(
duke@435 2631 GCTaskQueue* q,
duke@435 2632 ParallelTaskTerminator* terminator_ptr,
duke@435 2633 uint parallel_gc_threads) {
duke@435 2634 TraceTime tm("steal task setup", print_phases(), true, gclog_or_tty);
duke@435 2635
duke@435 2636 // Once a thread has drained it's stack, it should try to steal chunks from
duke@435 2637 // other threads.
duke@435 2638 if (parallel_gc_threads > 1) {
duke@435 2639 for (uint j = 0; j < parallel_gc_threads; j++) {
duke@435 2640 q->enqueue(new StealChunkCompactionTask(terminator_ptr));
duke@435 2641 }
duke@435 2642 }
duke@435 2643 }
duke@435 2644
duke@435 2645 void PSParallelCompact::compact() {
duke@435 2646 EventMark m("5 compact");
duke@435 2647 // trace("5");
duke@435 2648 TraceTime tm("compaction phase", print_phases(), true, gclog_or_tty);
duke@435 2649
duke@435 2650 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
duke@435 2651 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
duke@435 2652 PSOldGen* old_gen = heap->old_gen();
duke@435 2653 old_gen->start_array()->reset();
duke@435 2654 uint parallel_gc_threads = heap->gc_task_manager()->workers();
duke@435 2655 TaskQueueSetSuper* qset = ParCompactionManager::chunk_array();
duke@435 2656 ParallelTaskTerminator terminator(parallel_gc_threads, qset);
duke@435 2657
duke@435 2658 GCTaskQueue* q = GCTaskQueue::create();
duke@435 2659 enqueue_chunk_draining_tasks(q, parallel_gc_threads);
duke@435 2660 enqueue_dense_prefix_tasks(q, parallel_gc_threads);
duke@435 2661 enqueue_chunk_stealing_tasks(q, &terminator, parallel_gc_threads);
duke@435 2662
duke@435 2663 {
duke@435 2664 TraceTime tm_pc("par compact", print_phases(), true, gclog_or_tty);
duke@435 2665
duke@435 2666 WaitForBarrierGCTask* fin = WaitForBarrierGCTask::create();
duke@435 2667 q->enqueue(fin);
duke@435 2668
duke@435 2669 gc_task_manager()->add_list(q);
duke@435 2670
duke@435 2671 fin->wait_for();
duke@435 2672
duke@435 2673 // We have to release the barrier tasks!
duke@435 2674 WaitForBarrierGCTask::destroy(fin);
duke@435 2675
duke@435 2676 #ifdef ASSERT
duke@435 2677 // Verify that all chunks have been processed before the deferred updates.
duke@435 2678 // Note that perm_space_id is skipped; this type of verification is not
duke@435 2679 // valid until the perm gen is compacted by chunks.
duke@435 2680 for (unsigned int id = old_space_id; id < last_space_id; ++id) {
duke@435 2681 verify_complete(SpaceId(id));
duke@435 2682 }
duke@435 2683 #endif
duke@435 2684 }
duke@435 2685
duke@435 2686 {
duke@435 2687 // Update the deferred objects, if any. Any compaction manager can be used.
duke@435 2688 TraceTime tm_du("deferred updates", print_phases(), true, gclog_or_tty);
duke@435 2689 ParCompactionManager* cm = ParCompactionManager::manager_array(0);
duke@435 2690 for (unsigned int id = old_space_id; id < last_space_id; ++id) {
duke@435 2691 update_deferred_objects(cm, SpaceId(id));
duke@435 2692 }
duke@435 2693 }
duke@435 2694 }
duke@435 2695
duke@435 2696 #ifdef ASSERT
duke@435 2697 void PSParallelCompact::verify_complete(SpaceId space_id) {
duke@435 2698 // All Chunks between space bottom() to new_top() should be marked as filled
duke@435 2699 // and all Chunks between new_top() and top() should be available (i.e.,
duke@435 2700 // should have been emptied).
duke@435 2701 ParallelCompactData& sd = summary_data();
duke@435 2702 SpaceInfo si = _space_info[space_id];
duke@435 2703 HeapWord* new_top_addr = sd.chunk_align_up(si.new_top());
duke@435 2704 HeapWord* old_top_addr = sd.chunk_align_up(si.space()->top());
duke@435 2705 const size_t beg_chunk = sd.addr_to_chunk_idx(si.space()->bottom());
duke@435 2706 const size_t new_top_chunk = sd.addr_to_chunk_idx(new_top_addr);
duke@435 2707 const size_t old_top_chunk = sd.addr_to_chunk_idx(old_top_addr);
duke@435 2708
duke@435 2709 bool issued_a_warning = false;
duke@435 2710
duke@435 2711 size_t cur_chunk;
duke@435 2712 for (cur_chunk = beg_chunk; cur_chunk < new_top_chunk; ++cur_chunk) {
duke@435 2713 const ChunkData* const c = sd.chunk(cur_chunk);
duke@435 2714 if (!c->completed()) {
duke@435 2715 warning("chunk " SIZE_FORMAT " not filled: "
duke@435 2716 "destination_count=" SIZE_FORMAT,
duke@435 2717 cur_chunk, c->destination_count());
duke@435 2718 issued_a_warning = true;
duke@435 2719 }
duke@435 2720 }
duke@435 2721
duke@435 2722 for (cur_chunk = new_top_chunk; cur_chunk < old_top_chunk; ++cur_chunk) {
duke@435 2723 const ChunkData* const c = sd.chunk(cur_chunk);
duke@435 2724 if (!c->available()) {
duke@435 2725 warning("chunk " SIZE_FORMAT " not empty: "
duke@435 2726 "destination_count=" SIZE_FORMAT,
duke@435 2727 cur_chunk, c->destination_count());
duke@435 2728 issued_a_warning = true;
duke@435 2729 }
duke@435 2730 }
duke@435 2731
duke@435 2732 if (issued_a_warning) {
duke@435 2733 print_chunk_ranges();
duke@435 2734 }
duke@435 2735 }
duke@435 2736 #endif // #ifdef ASSERT
duke@435 2737
duke@435 2738 void PSParallelCompact::compact_serial(ParCompactionManager* cm) {
duke@435 2739 EventMark m("5 compact serial");
duke@435 2740 TraceTime tm("compact serial", print_phases(), true, gclog_or_tty);
duke@435 2741
duke@435 2742 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
duke@435 2743 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
duke@435 2744
duke@435 2745 PSYoungGen* young_gen = heap->young_gen();
duke@435 2746 PSOldGen* old_gen = heap->old_gen();
duke@435 2747
duke@435 2748 old_gen->start_array()->reset();
duke@435 2749 old_gen->move_and_update(cm);
duke@435 2750 young_gen->move_and_update(cm);
duke@435 2751 }
duke@435 2752
duke@435 2753
duke@435 2754 void PSParallelCompact::follow_stack(ParCompactionManager* cm) {
duke@435 2755 while(!cm->overflow_stack()->is_empty()) {
duke@435 2756 oop obj = cm->overflow_stack()->pop();
duke@435 2757 obj->follow_contents(cm);
duke@435 2758 }
duke@435 2759
duke@435 2760 oop obj;
duke@435 2761 // obj is a reference!!!
duke@435 2762 while (cm->marking_stack()->pop_local(obj)) {
duke@435 2763 // It would be nice to assert about the type of objects we might
duke@435 2764 // pop, but they can come from anywhere, unfortunately.
duke@435 2765 obj->follow_contents(cm);
duke@435 2766 }
duke@435 2767 }
duke@435 2768
duke@435 2769 void
duke@435 2770 PSParallelCompact::follow_weak_klass_links(ParCompactionManager* serial_cm) {
duke@435 2771 // All klasses on the revisit stack are marked at this point.
duke@435 2772 // Update and follow all subklass, sibling and implementor links.
duke@435 2773 for (uint i = 0; i < ParallelGCThreads+1; i++) {
duke@435 2774 ParCompactionManager* cm = ParCompactionManager::manager_array(i);
duke@435 2775 KeepAliveClosure keep_alive_closure(cm);
duke@435 2776 for (int i = 0; i < cm->revisit_klass_stack()->length(); i++) {
duke@435 2777 cm->revisit_klass_stack()->at(i)->follow_weak_klass_links(
duke@435 2778 is_alive_closure(),
duke@435 2779 &keep_alive_closure);
duke@435 2780 }
duke@435 2781 follow_stack(cm);
duke@435 2782 }
duke@435 2783 }
duke@435 2784
duke@435 2785 void
duke@435 2786 PSParallelCompact::revisit_weak_klass_link(ParCompactionManager* cm, Klass* k) {
duke@435 2787 cm->revisit_klass_stack()->push(k);
duke@435 2788 }
duke@435 2789
duke@435 2790 #ifdef VALIDATE_MARK_SWEEP
duke@435 2791
coleenp@548 2792 void PSParallelCompact::track_adjusted_pointer(void* p, bool isroot) {
duke@435 2793 if (!ValidateMarkSweep)
duke@435 2794 return;
duke@435 2795
duke@435 2796 if (!isroot) {
duke@435 2797 if (_pointer_tracking) {
duke@435 2798 guarantee(_adjusted_pointers->contains(p), "should have seen this pointer");
duke@435 2799 _adjusted_pointers->remove(p);
duke@435 2800 }
duke@435 2801 } else {
duke@435 2802 ptrdiff_t index = _root_refs_stack->find(p);
duke@435 2803 if (index != -1) {
duke@435 2804 int l = _root_refs_stack->length();
duke@435 2805 if (l > 0 && l - 1 != index) {
coleenp@548 2806 void* last = _root_refs_stack->pop();
duke@435 2807 assert(last != p, "should be different");
duke@435 2808 _root_refs_stack->at_put(index, last);
duke@435 2809 } else {
duke@435 2810 _root_refs_stack->remove(p);
duke@435 2811 }
duke@435 2812 }
duke@435 2813 }
duke@435 2814 }
duke@435 2815
duke@435 2816
coleenp@548 2817 void PSParallelCompact::check_adjust_pointer(void* p) {
duke@435 2818 _adjusted_pointers->push(p);
duke@435 2819 }
duke@435 2820
duke@435 2821
duke@435 2822 class AdjusterTracker: public OopClosure {
duke@435 2823 public:
duke@435 2824 AdjusterTracker() {};
coleenp@548 2825 void do_oop(oop* o) { PSParallelCompact::check_adjust_pointer(o); }
coleenp@548 2826 void do_oop(narrowOop* o) { PSParallelCompact::check_adjust_pointer(o); }
duke@435 2827 };
duke@435 2828
duke@435 2829
duke@435 2830 void PSParallelCompact::track_interior_pointers(oop obj) {
duke@435 2831 if (ValidateMarkSweep) {
duke@435 2832 _adjusted_pointers->clear();
duke@435 2833 _pointer_tracking = true;
duke@435 2834
duke@435 2835 AdjusterTracker checker;
duke@435 2836 obj->oop_iterate(&checker);
duke@435 2837 }
duke@435 2838 }
duke@435 2839
duke@435 2840
duke@435 2841 void PSParallelCompact::check_interior_pointers() {
duke@435 2842 if (ValidateMarkSweep) {
duke@435 2843 _pointer_tracking = false;
duke@435 2844 guarantee(_adjusted_pointers->length() == 0, "should have processed the same pointers");
duke@435 2845 }
duke@435 2846 }
duke@435 2847
duke@435 2848
duke@435 2849 void PSParallelCompact::reset_live_oop_tracking(bool at_perm) {
duke@435 2850 if (ValidateMarkSweep) {
duke@435 2851 guarantee((size_t)_live_oops->length() == _live_oops_index, "should be at end of live oops");
duke@435 2852 _live_oops_index = at_perm ? _live_oops_index_at_perm : 0;
duke@435 2853 }
duke@435 2854 }
duke@435 2855
duke@435 2856
duke@435 2857 void PSParallelCompact::register_live_oop(oop p, size_t size) {
duke@435 2858 if (ValidateMarkSweep) {
duke@435 2859 _live_oops->push(p);
duke@435 2860 _live_oops_size->push(size);
duke@435 2861 _live_oops_index++;
duke@435 2862 }
duke@435 2863 }
duke@435 2864
duke@435 2865 void PSParallelCompact::validate_live_oop(oop p, size_t size) {
duke@435 2866 if (ValidateMarkSweep) {
duke@435 2867 oop obj = _live_oops->at((int)_live_oops_index);
duke@435 2868 guarantee(obj == p, "should be the same object");
duke@435 2869 guarantee(_live_oops_size->at((int)_live_oops_index) == size, "should be the same size");
duke@435 2870 _live_oops_index++;
duke@435 2871 }
duke@435 2872 }
duke@435 2873
duke@435 2874 void PSParallelCompact::live_oop_moved_to(HeapWord* q, size_t size,
duke@435 2875 HeapWord* compaction_top) {
duke@435 2876 assert(oop(q)->forwardee() == NULL || oop(q)->forwardee() == oop(compaction_top),
duke@435 2877 "should be moved to forwarded location");
duke@435 2878 if (ValidateMarkSweep) {
duke@435 2879 PSParallelCompact::validate_live_oop(oop(q), size);
duke@435 2880 _live_oops_moved_to->push(oop(compaction_top));
duke@435 2881 }
duke@435 2882 if (RecordMarkSweepCompaction) {
duke@435 2883 _cur_gc_live_oops->push(q);
duke@435 2884 _cur_gc_live_oops_moved_to->push(compaction_top);
duke@435 2885 _cur_gc_live_oops_size->push(size);
duke@435 2886 }
duke@435 2887 }
duke@435 2888
duke@435 2889
duke@435 2890 void PSParallelCompact::compaction_complete() {
duke@435 2891 if (RecordMarkSweepCompaction) {
duke@435 2892 GrowableArray<HeapWord*>* _tmp_live_oops = _cur_gc_live_oops;
duke@435 2893 GrowableArray<HeapWord*>* _tmp_live_oops_moved_to = _cur_gc_live_oops_moved_to;
duke@435 2894 GrowableArray<size_t> * _tmp_live_oops_size = _cur_gc_live_oops_size;
duke@435 2895
duke@435 2896 _cur_gc_live_oops = _last_gc_live_oops;
duke@435 2897 _cur_gc_live_oops_moved_to = _last_gc_live_oops_moved_to;
duke@435 2898 _cur_gc_live_oops_size = _last_gc_live_oops_size;
duke@435 2899 _last_gc_live_oops = _tmp_live_oops;
duke@435 2900 _last_gc_live_oops_moved_to = _tmp_live_oops_moved_to;
duke@435 2901 _last_gc_live_oops_size = _tmp_live_oops_size;
duke@435 2902 }
duke@435 2903 }
duke@435 2904
duke@435 2905
duke@435 2906 void PSParallelCompact::print_new_location_of_heap_address(HeapWord* q) {
duke@435 2907 if (!RecordMarkSweepCompaction) {
duke@435 2908 tty->print_cr("Requires RecordMarkSweepCompaction to be enabled");
duke@435 2909 return;
duke@435 2910 }
duke@435 2911
duke@435 2912 if (_last_gc_live_oops == NULL) {
duke@435 2913 tty->print_cr("No compaction information gathered yet");
duke@435 2914 return;
duke@435 2915 }
duke@435 2916
duke@435 2917 for (int i = 0; i < _last_gc_live_oops->length(); i++) {
duke@435 2918 HeapWord* old_oop = _last_gc_live_oops->at(i);
duke@435 2919 size_t sz = _last_gc_live_oops_size->at(i);
duke@435 2920 if (old_oop <= q && q < (old_oop + sz)) {
duke@435 2921 HeapWord* new_oop = _last_gc_live_oops_moved_to->at(i);
duke@435 2922 size_t offset = (q - old_oop);
duke@435 2923 tty->print_cr("Address " PTR_FORMAT, q);
duke@435 2924 tty->print_cr(" Was in oop " PTR_FORMAT ", size %d, at offset %d", old_oop, sz, offset);
duke@435 2925 tty->print_cr(" Now in oop " PTR_FORMAT ", actual address " PTR_FORMAT, new_oop, new_oop + offset);
duke@435 2926 return;
duke@435 2927 }
duke@435 2928 }
duke@435 2929
duke@435 2930 tty->print_cr("Address " PTR_FORMAT " not found in live oop information from last GC", q);
duke@435 2931 }
duke@435 2932 #endif //VALIDATE_MARK_SWEEP
duke@435 2933
duke@435 2934 // Update interior oops in the ranges of chunks [beg_chunk, end_chunk).
duke@435 2935 void
duke@435 2936 PSParallelCompact::update_and_deadwood_in_dense_prefix(ParCompactionManager* cm,
duke@435 2937 SpaceId space_id,
duke@435 2938 size_t beg_chunk,
duke@435 2939 size_t end_chunk) {
duke@435 2940 ParallelCompactData& sd = summary_data();
duke@435 2941 ParMarkBitMap* const mbm = mark_bitmap();
duke@435 2942
duke@435 2943 HeapWord* beg_addr = sd.chunk_to_addr(beg_chunk);
duke@435 2944 HeapWord* const end_addr = sd.chunk_to_addr(end_chunk);
duke@435 2945 assert(beg_chunk <= end_chunk, "bad chunk range");
duke@435 2946 assert(end_addr <= dense_prefix(space_id), "not in the dense prefix");
duke@435 2947
duke@435 2948 #ifdef ASSERT
duke@435 2949 // Claim the chunks to avoid triggering an assert when they are marked as
duke@435 2950 // filled.
duke@435 2951 for (size_t claim_chunk = beg_chunk; claim_chunk < end_chunk; ++claim_chunk) {
duke@435 2952 assert(sd.chunk(claim_chunk)->claim_unsafe(), "claim() failed");
duke@435 2953 }
duke@435 2954 #endif // #ifdef ASSERT
duke@435 2955
duke@435 2956 if (beg_addr != space(space_id)->bottom()) {
duke@435 2957 // Find the first live object or block of dead space that *starts* in this
duke@435 2958 // range of chunks. If a partial object crosses onto the chunk, skip it; it
duke@435 2959 // will be marked for 'deferred update' when the object head is processed.
duke@435 2960 // If dead space crosses onto the chunk, it is also skipped; it will be
duke@435 2961 // filled when the prior chunk is processed. If neither of those apply, the
duke@435 2962 // first word in the chunk is the start of a live object or dead space.
duke@435 2963 assert(beg_addr > space(space_id)->bottom(), "sanity");
duke@435 2964 const ChunkData* const cp = sd.chunk(beg_chunk);
duke@435 2965 if (cp->partial_obj_size() != 0) {
duke@435 2966 beg_addr = sd.partial_obj_end(beg_chunk);
duke@435 2967 } else if (dead_space_crosses_boundary(cp, mbm->addr_to_bit(beg_addr))) {
duke@435 2968 beg_addr = mbm->find_obj_beg(beg_addr, end_addr);
duke@435 2969 }
duke@435 2970 }
duke@435 2971
duke@435 2972 if (beg_addr < end_addr) {
duke@435 2973 // A live object or block of dead space starts in this range of Chunks.
duke@435 2974 HeapWord* const dense_prefix_end = dense_prefix(space_id);
duke@435 2975
duke@435 2976 // Create closures and iterate.
duke@435 2977 UpdateOnlyClosure update_closure(mbm, cm, space_id);
duke@435 2978 FillClosure fill_closure(cm, space_id);
duke@435 2979 ParMarkBitMap::IterationStatus status;
duke@435 2980 status = mbm->iterate(&update_closure, &fill_closure, beg_addr, end_addr,
duke@435 2981 dense_prefix_end);
duke@435 2982 if (status == ParMarkBitMap::incomplete) {
duke@435 2983 update_closure.do_addr(update_closure.source());
duke@435 2984 }
duke@435 2985 }
duke@435 2986
duke@435 2987 // Mark the chunks as filled.
duke@435 2988 ChunkData* const beg_cp = sd.chunk(beg_chunk);
duke@435 2989 ChunkData* const end_cp = sd.chunk(end_chunk);
duke@435 2990 for (ChunkData* cp = beg_cp; cp < end_cp; ++cp) {
duke@435 2991 cp->set_completed();
duke@435 2992 }
duke@435 2993 }
duke@435 2994
duke@435 2995 // Return the SpaceId for the space containing addr. If addr is not in the
duke@435 2996 // heap, last_space_id is returned. In debug mode it expects the address to be
duke@435 2997 // in the heap and asserts such.
duke@435 2998 PSParallelCompact::SpaceId PSParallelCompact::space_id(HeapWord* addr) {
duke@435 2999 assert(Universe::heap()->is_in_reserved(addr), "addr not in the heap");
duke@435 3000
duke@435 3001 for (unsigned int id = perm_space_id; id < last_space_id; ++id) {
duke@435 3002 if (_space_info[id].space()->contains(addr)) {
duke@435 3003 return SpaceId(id);
duke@435 3004 }
duke@435 3005 }
duke@435 3006
duke@435 3007 assert(false, "no space contains the addr");
duke@435 3008 return last_space_id;
duke@435 3009 }
duke@435 3010
duke@435 3011 void PSParallelCompact::update_deferred_objects(ParCompactionManager* cm,
duke@435 3012 SpaceId id) {
duke@435 3013 assert(id < last_space_id, "bad space id");
duke@435 3014
duke@435 3015 ParallelCompactData& sd = summary_data();
duke@435 3016 const SpaceInfo* const space_info = _space_info + id;
duke@435 3017 ObjectStartArray* const start_array = space_info->start_array();
duke@435 3018
duke@435 3019 const MutableSpace* const space = space_info->space();
duke@435 3020 assert(space_info->dense_prefix() >= space->bottom(), "dense_prefix not set");
duke@435 3021 HeapWord* const beg_addr = space_info->dense_prefix();
duke@435 3022 HeapWord* const end_addr = sd.chunk_align_up(space_info->new_top());
duke@435 3023
duke@435 3024 const ChunkData* const beg_chunk = sd.addr_to_chunk_ptr(beg_addr);
duke@435 3025 const ChunkData* const end_chunk = sd.addr_to_chunk_ptr(end_addr);
duke@435 3026 const ChunkData* cur_chunk;
duke@435 3027 for (cur_chunk = beg_chunk; cur_chunk < end_chunk; ++cur_chunk) {
duke@435 3028 HeapWord* const addr = cur_chunk->deferred_obj_addr();
duke@435 3029 if (addr != NULL) {
duke@435 3030 if (start_array != NULL) {
duke@435 3031 start_array->allocate_block(addr);
duke@435 3032 }
duke@435 3033 oop(addr)->update_contents(cm);
duke@435 3034 assert(oop(addr)->is_oop_or_null(), "should be an oop now");
duke@435 3035 }
duke@435 3036 }
duke@435 3037 }
duke@435 3038
duke@435 3039 // Skip over count live words starting from beg, and return the address of the
duke@435 3040 // next live word. Unless marked, the word corresponding to beg is assumed to
duke@435 3041 // be dead. Callers must either ensure beg does not correspond to the middle of
duke@435 3042 // an object, or account for those live words in some other way. Callers must
duke@435 3043 // also ensure that there are enough live words in the range [beg, end) to skip.
duke@435 3044 HeapWord*
duke@435 3045 PSParallelCompact::skip_live_words(HeapWord* beg, HeapWord* end, size_t count)
duke@435 3046 {
duke@435 3047 assert(count > 0, "sanity");
duke@435 3048
duke@435 3049 ParMarkBitMap* m = mark_bitmap();
duke@435 3050 idx_t bits_to_skip = m->words_to_bits(count);
duke@435 3051 idx_t cur_beg = m->addr_to_bit(beg);
duke@435 3052 const idx_t search_end = BitMap::word_align_up(m->addr_to_bit(end));
duke@435 3053
duke@435 3054 do {
duke@435 3055 cur_beg = m->find_obj_beg(cur_beg, search_end);
duke@435 3056 idx_t cur_end = m->find_obj_end(cur_beg, search_end);
duke@435 3057 const size_t obj_bits = cur_end - cur_beg + 1;
duke@435 3058 if (obj_bits > bits_to_skip) {
duke@435 3059 return m->bit_to_addr(cur_beg + bits_to_skip);
duke@435 3060 }
duke@435 3061 bits_to_skip -= obj_bits;
duke@435 3062 cur_beg = cur_end + 1;
duke@435 3063 } while (bits_to_skip > 0);
duke@435 3064
duke@435 3065 // Skipping the desired number of words landed just past the end of an object.
duke@435 3066 // Find the start of the next object.
duke@435 3067 cur_beg = m->find_obj_beg(cur_beg, search_end);
duke@435 3068 assert(cur_beg < m->addr_to_bit(end), "not enough live words to skip");
duke@435 3069 return m->bit_to_addr(cur_beg);
duke@435 3070 }
duke@435 3071
duke@435 3072 HeapWord*
duke@435 3073 PSParallelCompact::first_src_addr(HeapWord* const dest_addr,
duke@435 3074 size_t src_chunk_idx)
duke@435 3075 {
duke@435 3076 ParMarkBitMap* const bitmap = mark_bitmap();
duke@435 3077 const ParallelCompactData& sd = summary_data();
duke@435 3078 const size_t ChunkSize = ParallelCompactData::ChunkSize;
duke@435 3079
duke@435 3080 assert(sd.is_chunk_aligned(dest_addr), "not aligned");
duke@435 3081
duke@435 3082 const ChunkData* const src_chunk_ptr = sd.chunk(src_chunk_idx);
duke@435 3083 const size_t partial_obj_size = src_chunk_ptr->partial_obj_size();
duke@435 3084 HeapWord* const src_chunk_destination = src_chunk_ptr->destination();
duke@435 3085
duke@435 3086 assert(dest_addr >= src_chunk_destination, "wrong src chunk");
duke@435 3087 assert(src_chunk_ptr->data_size() > 0, "src chunk cannot be empty");
duke@435 3088
duke@435 3089 HeapWord* const src_chunk_beg = sd.chunk_to_addr(src_chunk_idx);
duke@435 3090 HeapWord* const src_chunk_end = src_chunk_beg + ChunkSize;
duke@435 3091
duke@435 3092 HeapWord* addr = src_chunk_beg;
duke@435 3093 if (dest_addr == src_chunk_destination) {
duke@435 3094 // Return the first live word in the source chunk.
duke@435 3095 if (partial_obj_size == 0) {
duke@435 3096 addr = bitmap->find_obj_beg(addr, src_chunk_end);
duke@435 3097 assert(addr < src_chunk_end, "no objects start in src chunk");
duke@435 3098 }
duke@435 3099 return addr;
duke@435 3100 }
duke@435 3101
duke@435 3102 // Must skip some live data.
duke@435 3103 size_t words_to_skip = dest_addr - src_chunk_destination;
duke@435 3104 assert(src_chunk_ptr->data_size() > words_to_skip, "wrong src chunk");
duke@435 3105
duke@435 3106 if (partial_obj_size >= words_to_skip) {
duke@435 3107 // All the live words to skip are part of the partial object.
duke@435 3108 addr += words_to_skip;
duke@435 3109 if (partial_obj_size == words_to_skip) {
duke@435 3110 // Find the first live word past the partial object.
duke@435 3111 addr = bitmap->find_obj_beg(addr, src_chunk_end);
duke@435 3112 assert(addr < src_chunk_end, "wrong src chunk");
duke@435 3113 }
duke@435 3114 return addr;
duke@435 3115 }
duke@435 3116
duke@435 3117 // Skip over the partial object (if any).
duke@435 3118 if (partial_obj_size != 0) {
duke@435 3119 words_to_skip -= partial_obj_size;
duke@435 3120 addr += partial_obj_size;
duke@435 3121 }
duke@435 3122
duke@435 3123 // Skip over live words due to objects that start in the chunk.
duke@435 3124 addr = skip_live_words(addr, src_chunk_end, words_to_skip);
duke@435 3125 assert(addr < src_chunk_end, "wrong src chunk");
duke@435 3126 return addr;
duke@435 3127 }
duke@435 3128
duke@435 3129 void PSParallelCompact::decrement_destination_counts(ParCompactionManager* cm,
duke@435 3130 size_t beg_chunk,
duke@435 3131 HeapWord* end_addr)
duke@435 3132 {
duke@435 3133 ParallelCompactData& sd = summary_data();
duke@435 3134 ChunkData* const beg = sd.chunk(beg_chunk);
duke@435 3135 HeapWord* const end_addr_aligned_up = sd.chunk_align_up(end_addr);
duke@435 3136 ChunkData* const end = sd.addr_to_chunk_ptr(end_addr_aligned_up);
duke@435 3137 size_t cur_idx = beg_chunk;
duke@435 3138 for (ChunkData* cur = beg; cur < end; ++cur, ++cur_idx) {
duke@435 3139 assert(cur->data_size() > 0, "chunk must have live data");
duke@435 3140 cur->decrement_destination_count();
duke@435 3141 if (cur_idx <= cur->source_chunk() && cur->available() && cur->claim()) {
duke@435 3142 cm->save_for_processing(cur_idx);
duke@435 3143 }
duke@435 3144 }
duke@435 3145 }
duke@435 3146
duke@435 3147 size_t PSParallelCompact::next_src_chunk(MoveAndUpdateClosure& closure,
duke@435 3148 SpaceId& src_space_id,
duke@435 3149 HeapWord*& src_space_top,
duke@435 3150 HeapWord* end_addr)
duke@435 3151 {
duke@435 3152 typedef ParallelCompactData::ChunkData ChunkData;
duke@435 3153
duke@435 3154 ParallelCompactData& sd = PSParallelCompact::summary_data();
duke@435 3155 const size_t chunk_size = ParallelCompactData::ChunkSize;
duke@435 3156
duke@435 3157 size_t src_chunk_idx = 0;
duke@435 3158
duke@435 3159 // Skip empty chunks (if any) up to the top of the space.
duke@435 3160 HeapWord* const src_aligned_up = sd.chunk_align_up(end_addr);
duke@435 3161 ChunkData* src_chunk_ptr = sd.addr_to_chunk_ptr(src_aligned_up);
duke@435 3162 HeapWord* const top_aligned_up = sd.chunk_align_up(src_space_top);
duke@435 3163 const ChunkData* const top_chunk_ptr = sd.addr_to_chunk_ptr(top_aligned_up);
duke@435 3164 while (src_chunk_ptr < top_chunk_ptr && src_chunk_ptr->data_size() == 0) {
duke@435 3165 ++src_chunk_ptr;
duke@435 3166 }
duke@435 3167
duke@435 3168 if (src_chunk_ptr < top_chunk_ptr) {
duke@435 3169 // The next source chunk is in the current space. Update src_chunk_idx and
duke@435 3170 // the source address to match src_chunk_ptr.
duke@435 3171 src_chunk_idx = sd.chunk(src_chunk_ptr);
duke@435 3172 HeapWord* const src_chunk_addr = sd.chunk_to_addr(src_chunk_idx);
duke@435 3173 if (src_chunk_addr > closure.source()) {
duke@435 3174 closure.set_source(src_chunk_addr);
duke@435 3175 }
duke@435 3176 return src_chunk_idx;
duke@435 3177 }
duke@435 3178
duke@435 3179 // Switch to a new source space and find the first non-empty chunk.
duke@435 3180 unsigned int space_id = src_space_id + 1;
duke@435 3181 assert(space_id < last_space_id, "not enough spaces");
duke@435 3182
duke@435 3183 HeapWord* const destination = closure.destination();
duke@435 3184
duke@435 3185 do {
duke@435 3186 MutableSpace* space = _space_info[space_id].space();
duke@435 3187 HeapWord* const bottom = space->bottom();
duke@435 3188 const ChunkData* const bottom_cp = sd.addr_to_chunk_ptr(bottom);
duke@435 3189
duke@435 3190 // Iterate over the spaces that do not compact into themselves.
duke@435 3191 if (bottom_cp->destination() != bottom) {
duke@435 3192 HeapWord* const top_aligned_up = sd.chunk_align_up(space->top());
duke@435 3193 const ChunkData* const top_cp = sd.addr_to_chunk_ptr(top_aligned_up);
duke@435 3194
duke@435 3195 for (const ChunkData* src_cp = bottom_cp; src_cp < top_cp; ++src_cp) {
duke@435 3196 if (src_cp->live_obj_size() > 0) {
duke@435 3197 // Found it.
duke@435 3198 assert(src_cp->destination() == destination,
duke@435 3199 "first live obj in the space must match the destination");
duke@435 3200 assert(src_cp->partial_obj_size() == 0,
duke@435 3201 "a space cannot begin with a partial obj");
duke@435 3202
duke@435 3203 src_space_id = SpaceId(space_id);
duke@435 3204 src_space_top = space->top();
duke@435 3205 const size_t src_chunk_idx = sd.chunk(src_cp);
duke@435 3206 closure.set_source(sd.chunk_to_addr(src_chunk_idx));
duke@435 3207 return src_chunk_idx;
duke@435 3208 } else {
duke@435 3209 assert(src_cp->data_size() == 0, "sanity");
duke@435 3210 }
duke@435 3211 }
duke@435 3212 }
duke@435 3213 } while (++space_id < last_space_id);
duke@435 3214
duke@435 3215 assert(false, "no source chunk was found");
duke@435 3216 return 0;
duke@435 3217 }
duke@435 3218
duke@435 3219 void PSParallelCompact::fill_chunk(ParCompactionManager* cm, size_t chunk_idx)
duke@435 3220 {
duke@435 3221 typedef ParMarkBitMap::IterationStatus IterationStatus;
duke@435 3222 const size_t ChunkSize = ParallelCompactData::ChunkSize;
duke@435 3223 ParMarkBitMap* const bitmap = mark_bitmap();
duke@435 3224 ParallelCompactData& sd = summary_data();
duke@435 3225 ChunkData* const chunk_ptr = sd.chunk(chunk_idx);
duke@435 3226
duke@435 3227 // Get the items needed to construct the closure.
duke@435 3228 HeapWord* dest_addr = sd.chunk_to_addr(chunk_idx);
duke@435 3229 SpaceId dest_space_id = space_id(dest_addr);
duke@435 3230 ObjectStartArray* start_array = _space_info[dest_space_id].start_array();
duke@435 3231 HeapWord* new_top = _space_info[dest_space_id].new_top();
duke@435 3232 assert(dest_addr < new_top, "sanity");
duke@435 3233 const size_t words = MIN2(pointer_delta(new_top, dest_addr), ChunkSize);
duke@435 3234
duke@435 3235 // Get the source chunk and related info.
duke@435 3236 size_t src_chunk_idx = chunk_ptr->source_chunk();
duke@435 3237 SpaceId src_space_id = space_id(sd.chunk_to_addr(src_chunk_idx));
duke@435 3238 HeapWord* src_space_top = _space_info[src_space_id].space()->top();
duke@435 3239
duke@435 3240 MoveAndUpdateClosure closure(bitmap, cm, start_array, dest_addr, words);
duke@435 3241 closure.set_source(first_src_addr(dest_addr, src_chunk_idx));
duke@435 3242
duke@435 3243 // Adjust src_chunk_idx to prepare for decrementing destination counts (the
duke@435 3244 // destination count is not decremented when a chunk is copied to itself).
duke@435 3245 if (src_chunk_idx == chunk_idx) {
duke@435 3246 src_chunk_idx += 1;
duke@435 3247 }
duke@435 3248
duke@435 3249 if (bitmap->is_unmarked(closure.source())) {
duke@435 3250 // The first source word is in the middle of an object; copy the remainder
duke@435 3251 // of the object or as much as will fit. The fact that pointer updates were
duke@435 3252 // deferred will be noted when the object header is processed.
duke@435 3253 HeapWord* const old_src_addr = closure.source();
duke@435 3254 closure.copy_partial_obj();
duke@435 3255 if (closure.is_full()) {
duke@435 3256 decrement_destination_counts(cm, src_chunk_idx, closure.source());
duke@435 3257 chunk_ptr->set_deferred_obj_addr(NULL);
duke@435 3258 chunk_ptr->set_completed();
duke@435 3259 return;
duke@435 3260 }
duke@435 3261
duke@435 3262 HeapWord* const end_addr = sd.chunk_align_down(closure.source());
duke@435 3263 if (sd.chunk_align_down(old_src_addr) != end_addr) {
duke@435 3264 // The partial object was copied from more than one source chunk.
duke@435 3265 decrement_destination_counts(cm, src_chunk_idx, end_addr);
duke@435 3266
duke@435 3267 // Move to the next source chunk, possibly switching spaces as well. All
duke@435 3268 // args except end_addr may be modified.
duke@435 3269 src_chunk_idx = next_src_chunk(closure, src_space_id, src_space_top,
duke@435 3270 end_addr);
duke@435 3271 }
duke@435 3272 }
duke@435 3273
duke@435 3274 do {
duke@435 3275 HeapWord* const cur_addr = closure.source();
duke@435 3276 HeapWord* const end_addr = MIN2(sd.chunk_align_up(cur_addr + 1),
duke@435 3277 src_space_top);
duke@435 3278 IterationStatus status = bitmap->iterate(&closure, cur_addr, end_addr);
duke@435 3279
duke@435 3280 if (status == ParMarkBitMap::incomplete) {
duke@435 3281 // The last obj that starts in the source chunk does not end in the chunk.
duke@435 3282 assert(closure.source() < end_addr, "sanity")
duke@435 3283 HeapWord* const obj_beg = closure.source();
duke@435 3284 HeapWord* const range_end = MIN2(obj_beg + closure.words_remaining(),
duke@435 3285 src_space_top);
duke@435 3286 HeapWord* const obj_end = bitmap->find_obj_end(obj_beg, range_end);
duke@435 3287 if (obj_end < range_end) {
duke@435 3288 // The end was found; the entire object will fit.
duke@435 3289 status = closure.do_addr(obj_beg, bitmap->obj_size(obj_beg, obj_end));
duke@435 3290 assert(status != ParMarkBitMap::would_overflow, "sanity");
duke@435 3291 } else {
duke@435 3292 // The end was not found; the object will not fit.
duke@435 3293 assert(range_end < src_space_top, "obj cannot cross space boundary");
duke@435 3294 status = ParMarkBitMap::would_overflow;
duke@435 3295 }
duke@435 3296 }
duke@435 3297
duke@435 3298 if (status == ParMarkBitMap::would_overflow) {
duke@435 3299 // The last object did not fit. Note that interior oop updates were
duke@435 3300 // deferred, then copy enough of the object to fill the chunk.
duke@435 3301 chunk_ptr->set_deferred_obj_addr(closure.destination());
duke@435 3302 status = closure.copy_until_full(); // copies from closure.source()
duke@435 3303
duke@435 3304 decrement_destination_counts(cm, src_chunk_idx, closure.source());
duke@435 3305 chunk_ptr->set_completed();
duke@435 3306 return;
duke@435 3307 }
duke@435 3308
duke@435 3309 if (status == ParMarkBitMap::full) {
duke@435 3310 decrement_destination_counts(cm, src_chunk_idx, closure.source());
duke@435 3311 chunk_ptr->set_deferred_obj_addr(NULL);
duke@435 3312 chunk_ptr->set_completed();
duke@435 3313 return;
duke@435 3314 }
duke@435 3315
duke@435 3316 decrement_destination_counts(cm, src_chunk_idx, end_addr);
duke@435 3317
duke@435 3318 // Move to the next source chunk, possibly switching spaces as well. All
duke@435 3319 // args except end_addr may be modified.
duke@435 3320 src_chunk_idx = next_src_chunk(closure, src_space_id, src_space_top,
duke@435 3321 end_addr);
duke@435 3322 } while (true);
duke@435 3323 }
duke@435 3324
duke@435 3325 void
duke@435 3326 PSParallelCompact::move_and_update(ParCompactionManager* cm, SpaceId space_id) {
duke@435 3327 const MutableSpace* sp = space(space_id);
duke@435 3328 if (sp->is_empty()) {
duke@435 3329 return;
duke@435 3330 }
duke@435 3331
duke@435 3332 ParallelCompactData& sd = PSParallelCompact::summary_data();
duke@435 3333 ParMarkBitMap* const bitmap = mark_bitmap();
duke@435 3334 HeapWord* const dp_addr = dense_prefix(space_id);
duke@435 3335 HeapWord* beg_addr = sp->bottom();
duke@435 3336 HeapWord* end_addr = sp->top();
duke@435 3337
duke@435 3338 #ifdef ASSERT
duke@435 3339 assert(beg_addr <= dp_addr && dp_addr <= end_addr, "bad dense prefix");
duke@435 3340 if (cm->should_verify_only()) {
duke@435 3341 VerifyUpdateClosure verify_update(cm, sp);
duke@435 3342 bitmap->iterate(&verify_update, beg_addr, end_addr);
duke@435 3343 return;
duke@435 3344 }
duke@435 3345
duke@435 3346 if (cm->should_reset_only()) {
duke@435 3347 ResetObjectsClosure reset_objects(cm);
duke@435 3348 bitmap->iterate(&reset_objects, beg_addr, end_addr);
duke@435 3349 return;
duke@435 3350 }
duke@435 3351 #endif
duke@435 3352
duke@435 3353 const size_t beg_chunk = sd.addr_to_chunk_idx(beg_addr);
duke@435 3354 const size_t dp_chunk = sd.addr_to_chunk_idx(dp_addr);
duke@435 3355 if (beg_chunk < dp_chunk) {
duke@435 3356 update_and_deadwood_in_dense_prefix(cm, space_id, beg_chunk, dp_chunk);
duke@435 3357 }
duke@435 3358
duke@435 3359 // The destination of the first live object that starts in the chunk is one
duke@435 3360 // past the end of the partial object entering the chunk (if any).
duke@435 3361 HeapWord* const dest_addr = sd.partial_obj_end(dp_chunk);
duke@435 3362 HeapWord* const new_top = _space_info[space_id].new_top();
duke@435 3363 assert(new_top >= dest_addr, "bad new_top value");
duke@435 3364 const size_t words = pointer_delta(new_top, dest_addr);
duke@435 3365
duke@435 3366 if (words > 0) {
duke@435 3367 ObjectStartArray* start_array = _space_info[space_id].start_array();
duke@435 3368 MoveAndUpdateClosure closure(bitmap, cm, start_array, dest_addr, words);
duke@435 3369
duke@435 3370 ParMarkBitMap::IterationStatus status;
duke@435 3371 status = bitmap->iterate(&closure, dest_addr, end_addr);
duke@435 3372 assert(status == ParMarkBitMap::full, "iteration not complete");
duke@435 3373 assert(bitmap->find_obj_beg(closure.source(), end_addr) == end_addr,
duke@435 3374 "live objects skipped because closure is full");
duke@435 3375 }
duke@435 3376 }
duke@435 3377
duke@435 3378 jlong PSParallelCompact::millis_since_last_gc() {
duke@435 3379 jlong ret_val = os::javaTimeMillis() - _time_of_last_gc;
duke@435 3380 // XXX See note in genCollectedHeap::millis_since_last_gc().
duke@435 3381 if (ret_val < 0) {
duke@435 3382 NOT_PRODUCT(warning("time warp: %d", ret_val);)
duke@435 3383 return 0;
duke@435 3384 }
duke@435 3385 return ret_val;
duke@435 3386 }
duke@435 3387
duke@435 3388 void PSParallelCompact::reset_millis_since_last_gc() {
duke@435 3389 _time_of_last_gc = os::javaTimeMillis();
duke@435 3390 }
duke@435 3391
duke@435 3392 ParMarkBitMap::IterationStatus MoveAndUpdateClosure::copy_until_full()
duke@435 3393 {
duke@435 3394 if (source() != destination()) {
duke@435 3395 assert(source() > destination(), "must copy to the left");
duke@435 3396 Copy::aligned_conjoint_words(source(), destination(), words_remaining());
duke@435 3397 }
duke@435 3398 update_state(words_remaining());
duke@435 3399 assert(is_full(), "sanity");
duke@435 3400 return ParMarkBitMap::full;
duke@435 3401 }
duke@435 3402
duke@435 3403 void MoveAndUpdateClosure::copy_partial_obj()
duke@435 3404 {
duke@435 3405 size_t words = words_remaining();
duke@435 3406
duke@435 3407 HeapWord* const range_end = MIN2(source() + words, bitmap()->region_end());
duke@435 3408 HeapWord* const end_addr = bitmap()->find_obj_end(source(), range_end);
duke@435 3409 if (end_addr < range_end) {
duke@435 3410 words = bitmap()->obj_size(source(), end_addr);
duke@435 3411 }
duke@435 3412
duke@435 3413 // This test is necessary; if omitted, the pointer updates to a partial object
duke@435 3414 // that crosses the dense prefix boundary could be overwritten.
duke@435 3415 if (source() != destination()) {
duke@435 3416 assert(source() > destination(), "must copy to the left");
duke@435 3417 Copy::aligned_conjoint_words(source(), destination(), words);
duke@435 3418 }
duke@435 3419 update_state(words);
duke@435 3420 }
duke@435 3421
duke@435 3422 ParMarkBitMapClosure::IterationStatus
duke@435 3423 MoveAndUpdateClosure::do_addr(HeapWord* addr, size_t words) {
duke@435 3424 assert(destination() != NULL, "sanity");
duke@435 3425 assert(bitmap()->obj_size(addr) == words, "bad size");
duke@435 3426
duke@435 3427 _source = addr;
duke@435 3428 assert(PSParallelCompact::summary_data().calc_new_pointer(source()) ==
duke@435 3429 destination(), "wrong destination");
duke@435 3430
duke@435 3431 if (words > words_remaining()) {
duke@435 3432 return ParMarkBitMap::would_overflow;
duke@435 3433 }
duke@435 3434
duke@435 3435 // The start_array must be updated even if the object is not moving.
duke@435 3436 if (_start_array != NULL) {
duke@435 3437 _start_array->allocate_block(destination());
duke@435 3438 }
duke@435 3439
duke@435 3440 if (destination() != source()) {
duke@435 3441 assert(destination() < source(), "must copy to the left");
duke@435 3442 Copy::aligned_conjoint_words(source(), destination(), words);
duke@435 3443 }
duke@435 3444
duke@435 3445 oop moved_oop = (oop) destination();
duke@435 3446 moved_oop->update_contents(compaction_manager());
duke@435 3447 assert(moved_oop->is_oop_or_null(), "Object should be whole at this point");
duke@435 3448
duke@435 3449 update_state(words);
duke@435 3450 assert(destination() == (HeapWord*)moved_oop + moved_oop->size(), "sanity");
duke@435 3451 return is_full() ? ParMarkBitMap::full : ParMarkBitMap::incomplete;
duke@435 3452 }
duke@435 3453
duke@435 3454 UpdateOnlyClosure::UpdateOnlyClosure(ParMarkBitMap* mbm,
duke@435 3455 ParCompactionManager* cm,
duke@435 3456 PSParallelCompact::SpaceId space_id) :
duke@435 3457 ParMarkBitMapClosure(mbm, cm),
duke@435 3458 _space_id(space_id),
duke@435 3459 _start_array(PSParallelCompact::start_array(space_id))
duke@435 3460 {
duke@435 3461 }
duke@435 3462
duke@435 3463 // Updates the references in the object to their new values.
duke@435 3464 ParMarkBitMapClosure::IterationStatus
duke@435 3465 UpdateOnlyClosure::do_addr(HeapWord* addr, size_t words) {
duke@435 3466 do_addr(addr);
duke@435 3467 return ParMarkBitMap::incomplete;
duke@435 3468 }
duke@435 3469
duke@435 3470 BitBlockUpdateClosure::BitBlockUpdateClosure(ParMarkBitMap* mbm,
duke@435 3471 ParCompactionManager* cm,
duke@435 3472 size_t chunk_index) :
duke@435 3473 ParMarkBitMapClosure(mbm, cm),
duke@435 3474 _live_data_left(0),
duke@435 3475 _cur_block(0) {
duke@435 3476 _chunk_start =
duke@435 3477 PSParallelCompact::summary_data().chunk_to_addr(chunk_index);
duke@435 3478 _chunk_end =
duke@435 3479 PSParallelCompact::summary_data().chunk_to_addr(chunk_index) +
duke@435 3480 ParallelCompactData::ChunkSize;
duke@435 3481 _chunk_index = chunk_index;
duke@435 3482 _cur_block =
duke@435 3483 PSParallelCompact::summary_data().addr_to_block_idx(_chunk_start);
duke@435 3484 }
duke@435 3485
duke@435 3486 bool BitBlockUpdateClosure::chunk_contains_cur_block() {
duke@435 3487 return ParallelCompactData::chunk_contains_block(_chunk_index, _cur_block);
duke@435 3488 }
duke@435 3489
duke@435 3490 void BitBlockUpdateClosure::reset_chunk(size_t chunk_index) {
duke@435 3491 DEBUG_ONLY(ParallelCompactData::BlockData::set_cur_phase(7);)
duke@435 3492 ParallelCompactData& sd = PSParallelCompact::summary_data();
duke@435 3493 _chunk_index = chunk_index;
duke@435 3494 _live_data_left = 0;
duke@435 3495 _chunk_start = sd.chunk_to_addr(chunk_index);
duke@435 3496 _chunk_end = sd.chunk_to_addr(chunk_index) + ParallelCompactData::ChunkSize;
duke@435 3497
duke@435 3498 // The first block in this chunk
duke@435 3499 size_t first_block = sd.addr_to_block_idx(_chunk_start);
duke@435 3500 size_t partial_live_size = sd.chunk(chunk_index)->partial_obj_size();
duke@435 3501
duke@435 3502 // Set the offset to 0. By definition it should have that value
duke@435 3503 // but it may have been written while processing an earlier chunk.
duke@435 3504 if (partial_live_size == 0) {
duke@435 3505 // No live object extends onto the chunk. The first bit
duke@435 3506 // in the bit map for the first chunk must be a start bit.
duke@435 3507 // Although there may not be any marked bits, it is safe
duke@435 3508 // to set it as a start bit.
duke@435 3509 sd.block(first_block)->set_start_bit_offset(0);
duke@435 3510 sd.block(first_block)->set_first_is_start_bit(true);
duke@435 3511 } else if (sd.partial_obj_ends_in_block(first_block)) {
duke@435 3512 sd.block(first_block)->set_end_bit_offset(0);
duke@435 3513 sd.block(first_block)->set_first_is_start_bit(false);
duke@435 3514 } else {
duke@435 3515 // The partial object extends beyond the first block.
duke@435 3516 // There is no object starting in the first block
duke@435 3517 // so the offset and bit parity are not needed.
duke@435 3518 // Set the the bit parity to start bit so assertions
duke@435 3519 // work when not bit is found.
duke@435 3520 sd.block(first_block)->set_end_bit_offset(0);
duke@435 3521 sd.block(first_block)->set_first_is_start_bit(false);
duke@435 3522 }
duke@435 3523 _cur_block = first_block;
duke@435 3524 #ifdef ASSERT
duke@435 3525 if (sd.block(first_block)->first_is_start_bit()) {
duke@435 3526 assert(!sd.partial_obj_ends_in_block(first_block),
duke@435 3527 "Partial object cannot end in first block");
duke@435 3528 }
duke@435 3529
duke@435 3530 if (PrintGCDetails && Verbose) {
duke@435 3531 if (partial_live_size == 1) {
duke@435 3532 gclog_or_tty->print_cr("first_block " PTR_FORMAT
duke@435 3533 " _offset " PTR_FORMAT
duke@435 3534 " _first_is_start_bit %d",
duke@435 3535 first_block,
duke@435 3536 sd.block(first_block)->raw_offset(),
duke@435 3537 sd.block(first_block)->first_is_start_bit());
duke@435 3538 }
duke@435 3539 }
duke@435 3540 #endif
duke@435 3541 DEBUG_ONLY(ParallelCompactData::BlockData::set_cur_phase(17);)
duke@435 3542 }
duke@435 3543
duke@435 3544 // This method is called when a object has been found (both beginning
duke@435 3545 // and end of the object) in the range of iteration. This method is
duke@435 3546 // calculating the words of live data to the left of a block. That live
duke@435 3547 // data includes any object starting to the left of the block (i.e.,
duke@435 3548 // the live-data-to-the-left of block AAA will include the full size
duke@435 3549 // of any object entering AAA).
duke@435 3550
duke@435 3551 ParMarkBitMapClosure::IterationStatus
duke@435 3552 BitBlockUpdateClosure::do_addr(HeapWord* addr, size_t words) {
duke@435 3553 // add the size to the block data.
duke@435 3554 HeapWord* obj = addr;
duke@435 3555 ParallelCompactData& sd = PSParallelCompact::summary_data();
duke@435 3556
duke@435 3557 assert(bitmap()->obj_size(obj) == words, "bad size");
duke@435 3558 assert(_chunk_start <= obj, "object is not in chunk");
duke@435 3559 assert(obj + words <= _chunk_end, "object is not in chunk");
duke@435 3560
duke@435 3561 // Update the live data to the left
duke@435 3562 size_t prev_live_data_left = _live_data_left;
duke@435 3563 _live_data_left = _live_data_left + words;
duke@435 3564
duke@435 3565 // Is this object in the current block.
duke@435 3566 size_t block_of_obj = sd.addr_to_block_idx(obj);
duke@435 3567 size_t block_of_obj_last = sd.addr_to_block_idx(obj + words - 1);
duke@435 3568 HeapWord* block_of_obj_last_addr = sd.block_to_addr(block_of_obj_last);
duke@435 3569 if (_cur_block < block_of_obj) {
duke@435 3570
duke@435 3571 //
duke@435 3572 // No object crossed the block boundary and this object was found
duke@435 3573 // on the other side of the block boundary. Update the offset for
duke@435 3574 // the new block with the data size that does not include this object.
duke@435 3575 //
duke@435 3576 // The first bit in block_of_obj is a start bit except in the
duke@435 3577 // case where the partial object for the chunk extends into
duke@435 3578 // this block.
duke@435 3579 if (sd.partial_obj_ends_in_block(block_of_obj)) {
duke@435 3580 sd.block(block_of_obj)->set_end_bit_offset(prev_live_data_left);
duke@435 3581 } else {
duke@435 3582 sd.block(block_of_obj)->set_start_bit_offset(prev_live_data_left);
duke@435 3583 }
duke@435 3584
duke@435 3585 // Does this object pass beyond the its block?
duke@435 3586 if (block_of_obj < block_of_obj_last) {
duke@435 3587 // Object crosses block boundary. Two blocks need to be udpated:
duke@435 3588 // the current block where the object started
duke@435 3589 // the block where the object ends
duke@435 3590 //
duke@435 3591 // The offset for blocks with no objects starting in them
duke@435 3592 // (e.g., blocks between _cur_block and block_of_obj_last)
duke@435 3593 // should not be needed.
duke@435 3594 // Note that block_of_obj_last may be in another chunk. If so,
duke@435 3595 // it should be overwritten later. This is a problem (writting
duke@435 3596 // into a block in a later chunk) for parallel execution.
duke@435 3597 assert(obj < block_of_obj_last_addr,
duke@435 3598 "Object should start in previous block");
duke@435 3599
duke@435 3600 // obj is crossing into block_of_obj_last so the first bit
duke@435 3601 // is and end bit.
duke@435 3602 sd.block(block_of_obj_last)->set_end_bit_offset(_live_data_left);
duke@435 3603
duke@435 3604 _cur_block = block_of_obj_last;
duke@435 3605 } else {
duke@435 3606 // _first_is_start_bit has already been set correctly
duke@435 3607 // in the if-then-else above so don't reset it here.
duke@435 3608 _cur_block = block_of_obj;
duke@435 3609 }
duke@435 3610 } else {
duke@435 3611 // The current block only changes if the object extends beyound
duke@435 3612 // the block it starts in.
duke@435 3613 //
duke@435 3614 // The object starts in the current block.
duke@435 3615 // Does this object pass beyond the end of it?
duke@435 3616 if (block_of_obj < block_of_obj_last) {
duke@435 3617 // Object crosses block boundary.
duke@435 3618 // See note above on possible blocks between block_of_obj and
duke@435 3619 // block_of_obj_last
duke@435 3620 assert(obj < block_of_obj_last_addr,
duke@435 3621 "Object should start in previous block");
duke@435 3622
duke@435 3623 sd.block(block_of_obj_last)->set_end_bit_offset(_live_data_left);
duke@435 3624
duke@435 3625 _cur_block = block_of_obj_last;
duke@435 3626 }
duke@435 3627 }
duke@435 3628
duke@435 3629 // Return incomplete if there are more blocks to be done.
duke@435 3630 if (chunk_contains_cur_block()) {
duke@435 3631 return ParMarkBitMap::incomplete;
duke@435 3632 }
duke@435 3633 return ParMarkBitMap::complete;
duke@435 3634 }
duke@435 3635
duke@435 3636 // Verify the new location using the forwarding pointer
duke@435 3637 // from MarkSweep::mark_sweep_phase2(). Set the mark_word
duke@435 3638 // to the initial value.
duke@435 3639 ParMarkBitMapClosure::IterationStatus
duke@435 3640 PSParallelCompact::VerifyUpdateClosure::do_addr(HeapWord* addr, size_t words) {
duke@435 3641 // The second arg (words) is not used.
duke@435 3642 oop obj = (oop) addr;
duke@435 3643 HeapWord* forwarding_ptr = (HeapWord*) obj->mark()->decode_pointer();
duke@435 3644 HeapWord* new_pointer = summary_data().calc_new_pointer(obj);
duke@435 3645 if (forwarding_ptr == NULL) {
duke@435 3646 // The object is dead or not moving.
duke@435 3647 assert(bitmap()->is_unmarked(obj) || (new_pointer == (HeapWord*) obj),
duke@435 3648 "Object liveness is wrong.");
duke@435 3649 return ParMarkBitMap::incomplete;
duke@435 3650 }
duke@435 3651 assert(UseParallelOldGCDensePrefix ||
duke@435 3652 (HeapMaximumCompactionInterval > 1) ||
duke@435 3653 (MarkSweepAlwaysCompactCount > 1) ||
duke@435 3654 (forwarding_ptr == new_pointer),
duke@435 3655 "Calculation of new location is incorrect");
duke@435 3656 return ParMarkBitMap::incomplete;
duke@435 3657 }
duke@435 3658
duke@435 3659 // Reset objects modified for debug checking.
duke@435 3660 ParMarkBitMapClosure::IterationStatus
duke@435 3661 PSParallelCompact::ResetObjectsClosure::do_addr(HeapWord* addr, size_t words) {
duke@435 3662 // The second arg (words) is not used.
duke@435 3663 oop obj = (oop) addr;
duke@435 3664 obj->init_mark();
duke@435 3665 return ParMarkBitMap::incomplete;
duke@435 3666 }
duke@435 3667
duke@435 3668 // Prepare for compaction. This method is executed once
duke@435 3669 // (i.e., by a single thread) before compaction.
duke@435 3670 // Save the updated location of the intArrayKlassObj for
duke@435 3671 // filling holes in the dense prefix.
duke@435 3672 void PSParallelCompact::compact_prologue() {
duke@435 3673 _updated_int_array_klass_obj = (klassOop)
duke@435 3674 summary_data().calc_new_pointer(Universe::intArrayKlassObj());
duke@435 3675 }
duke@435 3676
duke@435 3677 // The initial implementation of this method created a field
duke@435 3678 // _next_compaction_space_id in SpaceInfo and initialized
duke@435 3679 // that field in SpaceInfo::initialize_space_info(). That
duke@435 3680 // required that _next_compaction_space_id be declared a
duke@435 3681 // SpaceId in SpaceInfo and that would have required that
duke@435 3682 // either SpaceId be declared in a separate class or that
duke@435 3683 // it be declared in SpaceInfo. It didn't seem consistent
duke@435 3684 // to declare it in SpaceInfo (didn't really fit logically).
duke@435 3685 // Alternatively, defining a separate class to define SpaceId
duke@435 3686 // seem excessive. This implementation is simple and localizes
duke@435 3687 // the knowledge.
duke@435 3688
duke@435 3689 PSParallelCompact::SpaceId
duke@435 3690 PSParallelCompact::next_compaction_space_id(SpaceId id) {
duke@435 3691 assert(id < last_space_id, "id out of range");
duke@435 3692 switch (id) {
duke@435 3693 case perm_space_id :
duke@435 3694 return last_space_id;
duke@435 3695 case old_space_id :
duke@435 3696 return eden_space_id;
duke@435 3697 case eden_space_id :
duke@435 3698 return from_space_id;
duke@435 3699 case from_space_id :
duke@435 3700 return to_space_id;
duke@435 3701 case to_space_id :
duke@435 3702 return last_space_id;
duke@435 3703 default:
duke@435 3704 assert(false, "Bad space id");
duke@435 3705 return last_space_id;
duke@435 3706 }
duke@435 3707 }
duke@435 3708
duke@435 3709 // Here temporarily for debugging
duke@435 3710 #ifdef ASSERT
duke@435 3711 size_t ParallelCompactData::block_idx(BlockData* block) {
duke@435 3712 size_t index = pointer_delta(block,
duke@435 3713 PSParallelCompact::summary_data()._block_data, sizeof(BlockData));
duke@435 3714 return index;
duke@435 3715 }
duke@435 3716 #endif

mercurial