src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp

Fri, 28 Mar 2008 23:35:42 -0700

author
jcoomes
date
Fri, 28 Mar 2008 23:35:42 -0700
changeset 514
82db0859acbe
parent 435
a61af66fc99e
child 548
ba764ed4b6f2
child 575
3febac328d82
permissions
-rw-r--r--

6642862: Code cache allocation fails with large pages after 6588638
Reviewed-by: apetrusenko

duke@435 1 /*
duke@435 2 * Copyright 2005-2007 Sun Microsystems, Inc. All Rights Reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
duke@435 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
duke@435 20 * CA 95054 USA or visit www.sun.com if you need additional information or
duke@435 21 * have any questions.
duke@435 22 *
duke@435 23 */
duke@435 24
duke@435 25 #include "incls/_precompiled.incl"
duke@435 26 #include "incls/_psParallelCompact.cpp.incl"
duke@435 27
duke@435 28 #include <math.h>
duke@435 29
duke@435 30 // All sizes are in HeapWords.
duke@435 31 const size_t ParallelCompactData::Log2ChunkSize = 9; // 512 words
duke@435 32 const size_t ParallelCompactData::ChunkSize = (size_t)1 << Log2ChunkSize;
duke@435 33 const size_t ParallelCompactData::ChunkSizeBytes = ChunkSize << LogHeapWordSize;
duke@435 34 const size_t ParallelCompactData::ChunkSizeOffsetMask = ChunkSize - 1;
duke@435 35 const size_t ParallelCompactData::ChunkAddrOffsetMask = ChunkSizeBytes - 1;
duke@435 36 const size_t ParallelCompactData::ChunkAddrMask = ~ChunkAddrOffsetMask;
duke@435 37
duke@435 38 // 32-bit: 128 words covers 4 bitmap words
duke@435 39 // 64-bit: 128 words covers 2 bitmap words
duke@435 40 const size_t ParallelCompactData::Log2BlockSize = 7; // 128 words
duke@435 41 const size_t ParallelCompactData::BlockSize = (size_t)1 << Log2BlockSize;
duke@435 42 const size_t ParallelCompactData::BlockOffsetMask = BlockSize - 1;
duke@435 43 const size_t ParallelCompactData::BlockMask = ~BlockOffsetMask;
duke@435 44
duke@435 45 const size_t ParallelCompactData::BlocksPerChunk = ChunkSize / BlockSize;
duke@435 46
duke@435 47 const ParallelCompactData::ChunkData::chunk_sz_t
duke@435 48 ParallelCompactData::ChunkData::dc_shift = 27;
duke@435 49
duke@435 50 const ParallelCompactData::ChunkData::chunk_sz_t
duke@435 51 ParallelCompactData::ChunkData::dc_mask = ~0U << dc_shift;
duke@435 52
duke@435 53 const ParallelCompactData::ChunkData::chunk_sz_t
duke@435 54 ParallelCompactData::ChunkData::dc_one = 0x1U << dc_shift;
duke@435 55
duke@435 56 const ParallelCompactData::ChunkData::chunk_sz_t
duke@435 57 ParallelCompactData::ChunkData::los_mask = ~dc_mask;
duke@435 58
duke@435 59 const ParallelCompactData::ChunkData::chunk_sz_t
duke@435 60 ParallelCompactData::ChunkData::dc_claimed = 0x8U << dc_shift;
duke@435 61
duke@435 62 const ParallelCompactData::ChunkData::chunk_sz_t
duke@435 63 ParallelCompactData::ChunkData::dc_completed = 0xcU << dc_shift;
duke@435 64
duke@435 65 #ifdef ASSERT
duke@435 66 short ParallelCompactData::BlockData::_cur_phase = 0;
duke@435 67 #endif
duke@435 68
duke@435 69 SpaceInfo PSParallelCompact::_space_info[PSParallelCompact::last_space_id];
duke@435 70 bool PSParallelCompact::_print_phases = false;
duke@435 71
duke@435 72 ReferenceProcessor* PSParallelCompact::_ref_processor = NULL;
duke@435 73 klassOop PSParallelCompact::_updated_int_array_klass_obj = NULL;
duke@435 74
duke@435 75 double PSParallelCompact::_dwl_mean;
duke@435 76 double PSParallelCompact::_dwl_std_dev;
duke@435 77 double PSParallelCompact::_dwl_first_term;
duke@435 78 double PSParallelCompact::_dwl_adjustment;
duke@435 79 #ifdef ASSERT
duke@435 80 bool PSParallelCompact::_dwl_initialized = false;
duke@435 81 #endif // #ifdef ASSERT
duke@435 82
duke@435 83 #ifdef VALIDATE_MARK_SWEEP
duke@435 84 GrowableArray<oop*>* PSParallelCompact::_root_refs_stack = NULL;
duke@435 85 GrowableArray<oop> * PSParallelCompact::_live_oops = NULL;
duke@435 86 GrowableArray<oop> * PSParallelCompact::_live_oops_moved_to = NULL;
duke@435 87 GrowableArray<size_t>* PSParallelCompact::_live_oops_size = NULL;
duke@435 88 size_t PSParallelCompact::_live_oops_index = 0;
duke@435 89 size_t PSParallelCompact::_live_oops_index_at_perm = 0;
duke@435 90 GrowableArray<oop*>* PSParallelCompact::_other_refs_stack = NULL;
duke@435 91 GrowableArray<oop*>* PSParallelCompact::_adjusted_pointers = NULL;
duke@435 92 bool PSParallelCompact::_pointer_tracking = false;
duke@435 93 bool PSParallelCompact::_root_tracking = true;
duke@435 94
duke@435 95 GrowableArray<HeapWord*>* PSParallelCompact::_cur_gc_live_oops = NULL;
duke@435 96 GrowableArray<HeapWord*>* PSParallelCompact::_cur_gc_live_oops_moved_to = NULL;
duke@435 97 GrowableArray<size_t> * PSParallelCompact::_cur_gc_live_oops_size = NULL;
duke@435 98 GrowableArray<HeapWord*>* PSParallelCompact::_last_gc_live_oops = NULL;
duke@435 99 GrowableArray<HeapWord*>* PSParallelCompact::_last_gc_live_oops_moved_to = NULL;
duke@435 100 GrowableArray<size_t> * PSParallelCompact::_last_gc_live_oops_size = NULL;
duke@435 101 #endif
duke@435 102
duke@435 103 // XXX beg - verification code; only works while we also mark in object headers
duke@435 104 static void
duke@435 105 verify_mark_bitmap(ParMarkBitMap& _mark_bitmap)
duke@435 106 {
duke@435 107 ParallelScavengeHeap* heap = PSParallelCompact::gc_heap();
duke@435 108
duke@435 109 PSPermGen* perm_gen = heap->perm_gen();
duke@435 110 PSOldGen* old_gen = heap->old_gen();
duke@435 111 PSYoungGen* young_gen = heap->young_gen();
duke@435 112
duke@435 113 MutableSpace* perm_space = perm_gen->object_space();
duke@435 114 MutableSpace* old_space = old_gen->object_space();
duke@435 115 MutableSpace* eden_space = young_gen->eden_space();
duke@435 116 MutableSpace* from_space = young_gen->from_space();
duke@435 117 MutableSpace* to_space = young_gen->to_space();
duke@435 118
duke@435 119 // 'from_space' here is the survivor space at the lower address.
duke@435 120 if (to_space->bottom() < from_space->bottom()) {
duke@435 121 from_space = to_space;
duke@435 122 to_space = young_gen->from_space();
duke@435 123 }
duke@435 124
duke@435 125 HeapWord* boundaries[12];
duke@435 126 unsigned int bidx = 0;
duke@435 127 const unsigned int bidx_max = sizeof(boundaries) / sizeof(boundaries[0]);
duke@435 128
duke@435 129 boundaries[0] = perm_space->bottom();
duke@435 130 boundaries[1] = perm_space->top();
duke@435 131 boundaries[2] = old_space->bottom();
duke@435 132 boundaries[3] = old_space->top();
duke@435 133 boundaries[4] = eden_space->bottom();
duke@435 134 boundaries[5] = eden_space->top();
duke@435 135 boundaries[6] = from_space->bottom();
duke@435 136 boundaries[7] = from_space->top();
duke@435 137 boundaries[8] = to_space->bottom();
duke@435 138 boundaries[9] = to_space->top();
duke@435 139 boundaries[10] = to_space->end();
duke@435 140 boundaries[11] = to_space->end();
duke@435 141
duke@435 142 BitMap::idx_t beg_bit = 0;
duke@435 143 BitMap::idx_t end_bit;
duke@435 144 BitMap::idx_t tmp_bit;
duke@435 145 const BitMap::idx_t last_bit = _mark_bitmap.size();
duke@435 146 do {
duke@435 147 HeapWord* addr = _mark_bitmap.bit_to_addr(beg_bit);
duke@435 148 if (_mark_bitmap.is_marked(beg_bit)) {
duke@435 149 oop obj = (oop)addr;
duke@435 150 assert(obj->is_gc_marked(), "obj header is not marked");
duke@435 151 end_bit = _mark_bitmap.find_obj_end(beg_bit, last_bit);
duke@435 152 const size_t size = _mark_bitmap.obj_size(beg_bit, end_bit);
duke@435 153 assert(size == (size_t)obj->size(), "end bit wrong?");
duke@435 154 beg_bit = _mark_bitmap.find_obj_beg(beg_bit + 1, last_bit);
duke@435 155 assert(beg_bit > end_bit, "bit set in middle of an obj");
duke@435 156 } else {
duke@435 157 if (addr >= boundaries[bidx] && addr < boundaries[bidx + 1]) {
duke@435 158 // a dead object in the current space.
duke@435 159 oop obj = (oop)addr;
duke@435 160 end_bit = _mark_bitmap.addr_to_bit(addr + obj->size());
duke@435 161 assert(!obj->is_gc_marked(), "obj marked in header, not in bitmap");
duke@435 162 tmp_bit = beg_bit + 1;
duke@435 163 beg_bit = _mark_bitmap.find_obj_beg(tmp_bit, end_bit);
duke@435 164 assert(beg_bit == end_bit, "beg bit set in unmarked obj");
duke@435 165 beg_bit = _mark_bitmap.find_obj_end(tmp_bit, end_bit);
duke@435 166 assert(beg_bit == end_bit, "end bit set in unmarked obj");
duke@435 167 } else if (addr < boundaries[bidx + 2]) {
duke@435 168 // addr is between top in the current space and bottom in the next.
duke@435 169 end_bit = beg_bit + pointer_delta(boundaries[bidx + 2], addr);
duke@435 170 tmp_bit = beg_bit;
duke@435 171 beg_bit = _mark_bitmap.find_obj_beg(tmp_bit, end_bit);
duke@435 172 assert(beg_bit == end_bit, "beg bit set above top");
duke@435 173 beg_bit = _mark_bitmap.find_obj_end(tmp_bit, end_bit);
duke@435 174 assert(beg_bit == end_bit, "end bit set above top");
duke@435 175 bidx += 2;
duke@435 176 } else if (bidx < bidx_max - 2) {
duke@435 177 bidx += 2; // ???
duke@435 178 } else {
duke@435 179 tmp_bit = beg_bit;
duke@435 180 beg_bit = _mark_bitmap.find_obj_beg(tmp_bit, last_bit);
duke@435 181 assert(beg_bit == last_bit, "beg bit set outside heap");
duke@435 182 beg_bit = _mark_bitmap.find_obj_end(tmp_bit, last_bit);
duke@435 183 assert(beg_bit == last_bit, "end bit set outside heap");
duke@435 184 }
duke@435 185 }
duke@435 186 } while (beg_bit < last_bit);
duke@435 187 }
duke@435 188 // XXX end - verification code; only works while we also mark in object headers
duke@435 189
duke@435 190 #ifndef PRODUCT
duke@435 191 const char* PSParallelCompact::space_names[] = {
duke@435 192 "perm", "old ", "eden", "from", "to "
duke@435 193 };
duke@435 194
duke@435 195 void PSParallelCompact::print_chunk_ranges()
duke@435 196 {
duke@435 197 tty->print_cr("space bottom top end new_top");
duke@435 198 tty->print_cr("------ ---------- ---------- ---------- ----------");
duke@435 199
duke@435 200 for (unsigned int id = 0; id < last_space_id; ++id) {
duke@435 201 const MutableSpace* space = _space_info[id].space();
duke@435 202 tty->print_cr("%u %s "
duke@435 203 SIZE_FORMAT_W("10") " " SIZE_FORMAT_W("10") " "
duke@435 204 SIZE_FORMAT_W("10") " " SIZE_FORMAT_W("10") " ",
duke@435 205 id, space_names[id],
duke@435 206 summary_data().addr_to_chunk_idx(space->bottom()),
duke@435 207 summary_data().addr_to_chunk_idx(space->top()),
duke@435 208 summary_data().addr_to_chunk_idx(space->end()),
duke@435 209 summary_data().addr_to_chunk_idx(_space_info[id].new_top()));
duke@435 210 }
duke@435 211 }
duke@435 212
duke@435 213 void
duke@435 214 print_generic_summary_chunk(size_t i, const ParallelCompactData::ChunkData* c)
duke@435 215 {
duke@435 216 #define CHUNK_IDX_FORMAT SIZE_FORMAT_W("7")
duke@435 217 #define CHUNK_DATA_FORMAT SIZE_FORMAT_W("5")
duke@435 218
duke@435 219 ParallelCompactData& sd = PSParallelCompact::summary_data();
duke@435 220 size_t dci = c->destination() ? sd.addr_to_chunk_idx(c->destination()) : 0;
duke@435 221 tty->print_cr(CHUNK_IDX_FORMAT " " PTR_FORMAT " "
duke@435 222 CHUNK_IDX_FORMAT " " PTR_FORMAT " "
duke@435 223 CHUNK_DATA_FORMAT " " CHUNK_DATA_FORMAT " "
duke@435 224 CHUNK_DATA_FORMAT " " CHUNK_IDX_FORMAT " %d",
duke@435 225 i, c->data_location(), dci, c->destination(),
duke@435 226 c->partial_obj_size(), c->live_obj_size(),
duke@435 227 c->data_size(), c->source_chunk(), c->destination_count());
duke@435 228
duke@435 229 #undef CHUNK_IDX_FORMAT
duke@435 230 #undef CHUNK_DATA_FORMAT
duke@435 231 }
duke@435 232
duke@435 233 void
duke@435 234 print_generic_summary_data(ParallelCompactData& summary_data,
duke@435 235 HeapWord* const beg_addr,
duke@435 236 HeapWord* const end_addr)
duke@435 237 {
duke@435 238 size_t total_words = 0;
duke@435 239 size_t i = summary_data.addr_to_chunk_idx(beg_addr);
duke@435 240 const size_t last = summary_data.addr_to_chunk_idx(end_addr);
duke@435 241 HeapWord* pdest = 0;
duke@435 242
duke@435 243 while (i <= last) {
duke@435 244 ParallelCompactData::ChunkData* c = summary_data.chunk(i);
duke@435 245 if (c->data_size() != 0 || c->destination() != pdest) {
duke@435 246 print_generic_summary_chunk(i, c);
duke@435 247 total_words += c->data_size();
duke@435 248 pdest = c->destination();
duke@435 249 }
duke@435 250 ++i;
duke@435 251 }
duke@435 252
duke@435 253 tty->print_cr("summary_data_bytes=" SIZE_FORMAT, total_words * HeapWordSize);
duke@435 254 }
duke@435 255
duke@435 256 void
duke@435 257 print_generic_summary_data(ParallelCompactData& summary_data,
duke@435 258 SpaceInfo* space_info)
duke@435 259 {
duke@435 260 for (unsigned int id = 0; id < PSParallelCompact::last_space_id; ++id) {
duke@435 261 const MutableSpace* space = space_info[id].space();
duke@435 262 print_generic_summary_data(summary_data, space->bottom(),
duke@435 263 MAX2(space->top(), space_info[id].new_top()));
duke@435 264 }
duke@435 265 }
duke@435 266
duke@435 267 void
duke@435 268 print_initial_summary_chunk(size_t i,
duke@435 269 const ParallelCompactData::ChunkData* c,
duke@435 270 bool newline = true)
duke@435 271 {
duke@435 272 tty->print(SIZE_FORMAT_W("5") " " PTR_FORMAT " "
duke@435 273 SIZE_FORMAT_W("5") " " SIZE_FORMAT_W("5") " "
duke@435 274 SIZE_FORMAT_W("5") " " SIZE_FORMAT_W("5") " %d",
duke@435 275 i, c->destination(),
duke@435 276 c->partial_obj_size(), c->live_obj_size(),
duke@435 277 c->data_size(), c->source_chunk(), c->destination_count());
duke@435 278 if (newline) tty->cr();
duke@435 279 }
duke@435 280
duke@435 281 void
duke@435 282 print_initial_summary_data(ParallelCompactData& summary_data,
duke@435 283 const MutableSpace* space) {
duke@435 284 if (space->top() == space->bottom()) {
duke@435 285 return;
duke@435 286 }
duke@435 287
duke@435 288 const size_t chunk_size = ParallelCompactData::ChunkSize;
duke@435 289 HeapWord* const top_aligned_up = summary_data.chunk_align_up(space->top());
duke@435 290 const size_t end_chunk = summary_data.addr_to_chunk_idx(top_aligned_up);
duke@435 291 const ParallelCompactData::ChunkData* c = summary_data.chunk(end_chunk - 1);
duke@435 292 HeapWord* end_addr = c->destination() + c->data_size();
duke@435 293 const size_t live_in_space = pointer_delta(end_addr, space->bottom());
duke@435 294
duke@435 295 // Print (and count) the full chunks at the beginning of the space.
duke@435 296 size_t full_chunk_count = 0;
duke@435 297 size_t i = summary_data.addr_to_chunk_idx(space->bottom());
duke@435 298 while (i < end_chunk && summary_data.chunk(i)->data_size() == chunk_size) {
duke@435 299 print_initial_summary_chunk(i, summary_data.chunk(i));
duke@435 300 ++full_chunk_count;
duke@435 301 ++i;
duke@435 302 }
duke@435 303
duke@435 304 size_t live_to_right = live_in_space - full_chunk_count * chunk_size;
duke@435 305
duke@435 306 double max_reclaimed_ratio = 0.0;
duke@435 307 size_t max_reclaimed_ratio_chunk = 0;
duke@435 308 size_t max_dead_to_right = 0;
duke@435 309 size_t max_live_to_right = 0;
duke@435 310
duke@435 311 // Print the 'reclaimed ratio' for chunks while there is something live in the
duke@435 312 // chunk or to the right of it. The remaining chunks are empty (and
duke@435 313 // uninteresting), and computing the ratio will result in division by 0.
duke@435 314 while (i < end_chunk && live_to_right > 0) {
duke@435 315 c = summary_data.chunk(i);
duke@435 316 HeapWord* const chunk_addr = summary_data.chunk_to_addr(i);
duke@435 317 const size_t used_to_right = pointer_delta(space->top(), chunk_addr);
duke@435 318 const size_t dead_to_right = used_to_right - live_to_right;
duke@435 319 const double reclaimed_ratio = double(dead_to_right) / live_to_right;
duke@435 320
duke@435 321 if (reclaimed_ratio > max_reclaimed_ratio) {
duke@435 322 max_reclaimed_ratio = reclaimed_ratio;
duke@435 323 max_reclaimed_ratio_chunk = i;
duke@435 324 max_dead_to_right = dead_to_right;
duke@435 325 max_live_to_right = live_to_right;
duke@435 326 }
duke@435 327
duke@435 328 print_initial_summary_chunk(i, c, false);
duke@435 329 tty->print_cr(" %12.10f " SIZE_FORMAT_W("10") " " SIZE_FORMAT_W("10"),
duke@435 330 reclaimed_ratio, dead_to_right, live_to_right);
duke@435 331
duke@435 332 live_to_right -= c->data_size();
duke@435 333 ++i;
duke@435 334 }
duke@435 335
duke@435 336 // Any remaining chunks are empty. Print one more if there is one.
duke@435 337 if (i < end_chunk) {
duke@435 338 print_initial_summary_chunk(i, summary_data.chunk(i));
duke@435 339 }
duke@435 340
duke@435 341 tty->print_cr("max: " SIZE_FORMAT_W("4") " d2r=" SIZE_FORMAT_W("10") " "
duke@435 342 "l2r=" SIZE_FORMAT_W("10") " max_ratio=%14.12f",
duke@435 343 max_reclaimed_ratio_chunk, max_dead_to_right,
duke@435 344 max_live_to_right, max_reclaimed_ratio);
duke@435 345 }
duke@435 346
duke@435 347 void
duke@435 348 print_initial_summary_data(ParallelCompactData& summary_data,
duke@435 349 SpaceInfo* space_info) {
duke@435 350 unsigned int id = PSParallelCompact::perm_space_id;
duke@435 351 const MutableSpace* space;
duke@435 352 do {
duke@435 353 space = space_info[id].space();
duke@435 354 print_initial_summary_data(summary_data, space);
duke@435 355 } while (++id < PSParallelCompact::eden_space_id);
duke@435 356
duke@435 357 do {
duke@435 358 space = space_info[id].space();
duke@435 359 print_generic_summary_data(summary_data, space->bottom(), space->top());
duke@435 360 } while (++id < PSParallelCompact::last_space_id);
duke@435 361 }
duke@435 362 #endif // #ifndef PRODUCT
duke@435 363
duke@435 364 #ifdef ASSERT
duke@435 365 size_t add_obj_count;
duke@435 366 size_t add_obj_size;
duke@435 367 size_t mark_bitmap_count;
duke@435 368 size_t mark_bitmap_size;
duke@435 369 #endif // #ifdef ASSERT
duke@435 370
duke@435 371 ParallelCompactData::ParallelCompactData()
duke@435 372 {
duke@435 373 _region_start = 0;
duke@435 374
duke@435 375 _chunk_vspace = 0;
duke@435 376 _chunk_data = 0;
duke@435 377 _chunk_count = 0;
duke@435 378
duke@435 379 _block_vspace = 0;
duke@435 380 _block_data = 0;
duke@435 381 _block_count = 0;
duke@435 382 }
duke@435 383
duke@435 384 bool ParallelCompactData::initialize(MemRegion covered_region)
duke@435 385 {
duke@435 386 _region_start = covered_region.start();
duke@435 387 const size_t region_size = covered_region.word_size();
duke@435 388 DEBUG_ONLY(_region_end = _region_start + region_size;)
duke@435 389
duke@435 390 assert(chunk_align_down(_region_start) == _region_start,
duke@435 391 "region start not aligned");
duke@435 392 assert((region_size & ChunkSizeOffsetMask) == 0,
duke@435 393 "region size not a multiple of ChunkSize");
duke@435 394
duke@435 395 bool result = initialize_chunk_data(region_size);
duke@435 396
duke@435 397 // Initialize the block data if it will be used for updating pointers, or if
duke@435 398 // this is a debug build.
duke@435 399 if (!UseParallelOldGCChunkPointerCalc || trueInDebug) {
duke@435 400 result = result && initialize_block_data(region_size);
duke@435 401 }
duke@435 402
duke@435 403 return result;
duke@435 404 }
duke@435 405
duke@435 406 PSVirtualSpace*
duke@435 407 ParallelCompactData::create_vspace(size_t count, size_t element_size)
duke@435 408 {
duke@435 409 const size_t raw_bytes = count * element_size;
duke@435 410 const size_t page_sz = os::page_size_for_region(raw_bytes, raw_bytes, 10);
duke@435 411 const size_t granularity = os::vm_allocation_granularity();
duke@435 412 const size_t bytes = align_size_up(raw_bytes, MAX2(page_sz, granularity));
duke@435 413
duke@435 414 const size_t rs_align = page_sz == (size_t) os::vm_page_size() ? 0 :
duke@435 415 MAX2(page_sz, granularity);
jcoomes@514 416 ReservedSpace rs(bytes, rs_align, rs_align > 0);
duke@435 417 os::trace_page_sizes("par compact", raw_bytes, raw_bytes, page_sz, rs.base(),
duke@435 418 rs.size());
duke@435 419 PSVirtualSpace* vspace = new PSVirtualSpace(rs, page_sz);
duke@435 420 if (vspace != 0) {
duke@435 421 if (vspace->expand_by(bytes)) {
duke@435 422 return vspace;
duke@435 423 }
duke@435 424 delete vspace;
duke@435 425 }
duke@435 426
duke@435 427 return 0;
duke@435 428 }
duke@435 429
duke@435 430 bool ParallelCompactData::initialize_chunk_data(size_t region_size)
duke@435 431 {
duke@435 432 const size_t count = (region_size + ChunkSizeOffsetMask) >> Log2ChunkSize;
duke@435 433 _chunk_vspace = create_vspace(count, sizeof(ChunkData));
duke@435 434 if (_chunk_vspace != 0) {
duke@435 435 _chunk_data = (ChunkData*)_chunk_vspace->reserved_low_addr();
duke@435 436 _chunk_count = count;
duke@435 437 return true;
duke@435 438 }
duke@435 439 return false;
duke@435 440 }
duke@435 441
duke@435 442 bool ParallelCompactData::initialize_block_data(size_t region_size)
duke@435 443 {
duke@435 444 const size_t count = (region_size + BlockOffsetMask) >> Log2BlockSize;
duke@435 445 _block_vspace = create_vspace(count, sizeof(BlockData));
duke@435 446 if (_block_vspace != 0) {
duke@435 447 _block_data = (BlockData*)_block_vspace->reserved_low_addr();
duke@435 448 _block_count = count;
duke@435 449 return true;
duke@435 450 }
duke@435 451 return false;
duke@435 452 }
duke@435 453
duke@435 454 void ParallelCompactData::clear()
duke@435 455 {
duke@435 456 if (_block_data) {
duke@435 457 memset(_block_data, 0, _block_vspace->committed_size());
duke@435 458 }
duke@435 459 memset(_chunk_data, 0, _chunk_vspace->committed_size());
duke@435 460 }
duke@435 461
duke@435 462 void ParallelCompactData::clear_range(size_t beg_chunk, size_t end_chunk) {
duke@435 463 assert(beg_chunk <= _chunk_count, "beg_chunk out of range");
duke@435 464 assert(end_chunk <= _chunk_count, "end_chunk out of range");
duke@435 465 assert(ChunkSize % BlockSize == 0, "ChunkSize not a multiple of BlockSize");
duke@435 466
duke@435 467 const size_t chunk_cnt = end_chunk - beg_chunk;
duke@435 468
duke@435 469 if (_block_data) {
duke@435 470 const size_t blocks_per_chunk = ChunkSize / BlockSize;
duke@435 471 const size_t beg_block = beg_chunk * blocks_per_chunk;
duke@435 472 const size_t block_cnt = chunk_cnt * blocks_per_chunk;
duke@435 473 memset(_block_data + beg_block, 0, block_cnt * sizeof(BlockData));
duke@435 474 }
duke@435 475 memset(_chunk_data + beg_chunk, 0, chunk_cnt * sizeof(ChunkData));
duke@435 476 }
duke@435 477
duke@435 478 HeapWord* ParallelCompactData::partial_obj_end(size_t chunk_idx) const
duke@435 479 {
duke@435 480 const ChunkData* cur_cp = chunk(chunk_idx);
duke@435 481 const ChunkData* const end_cp = chunk(chunk_count() - 1);
duke@435 482
duke@435 483 HeapWord* result = chunk_to_addr(chunk_idx);
duke@435 484 if (cur_cp < end_cp) {
duke@435 485 do {
duke@435 486 result += cur_cp->partial_obj_size();
duke@435 487 } while (cur_cp->partial_obj_size() == ChunkSize && ++cur_cp < end_cp);
duke@435 488 }
duke@435 489 return result;
duke@435 490 }
duke@435 491
duke@435 492 void ParallelCompactData::add_obj(HeapWord* addr, size_t len)
duke@435 493 {
duke@435 494 const size_t obj_ofs = pointer_delta(addr, _region_start);
duke@435 495 const size_t beg_chunk = obj_ofs >> Log2ChunkSize;
duke@435 496 const size_t end_chunk = (obj_ofs + len - 1) >> Log2ChunkSize;
duke@435 497
duke@435 498 DEBUG_ONLY(Atomic::inc_ptr(&add_obj_count);)
duke@435 499 DEBUG_ONLY(Atomic::add_ptr(len, &add_obj_size);)
duke@435 500
duke@435 501 if (beg_chunk == end_chunk) {
duke@435 502 // All in one chunk.
duke@435 503 _chunk_data[beg_chunk].add_live_obj(len);
duke@435 504 return;
duke@435 505 }
duke@435 506
duke@435 507 // First chunk.
duke@435 508 const size_t beg_ofs = chunk_offset(addr);
duke@435 509 _chunk_data[beg_chunk].add_live_obj(ChunkSize - beg_ofs);
duke@435 510
duke@435 511 klassOop klass = ((oop)addr)->klass();
duke@435 512 // Middle chunks--completely spanned by this object.
duke@435 513 for (size_t chunk = beg_chunk + 1; chunk < end_chunk; ++chunk) {
duke@435 514 _chunk_data[chunk].set_partial_obj_size(ChunkSize);
duke@435 515 _chunk_data[chunk].set_partial_obj_addr(addr);
duke@435 516 }
duke@435 517
duke@435 518 // Last chunk.
duke@435 519 const size_t end_ofs = chunk_offset(addr + len - 1);
duke@435 520 _chunk_data[end_chunk].set_partial_obj_size(end_ofs + 1);
duke@435 521 _chunk_data[end_chunk].set_partial_obj_addr(addr);
duke@435 522 }
duke@435 523
duke@435 524 void
duke@435 525 ParallelCompactData::summarize_dense_prefix(HeapWord* beg, HeapWord* end)
duke@435 526 {
duke@435 527 assert(chunk_offset(beg) == 0, "not ChunkSize aligned");
duke@435 528 assert(chunk_offset(end) == 0, "not ChunkSize aligned");
duke@435 529
duke@435 530 size_t cur_chunk = addr_to_chunk_idx(beg);
duke@435 531 const size_t end_chunk = addr_to_chunk_idx(end);
duke@435 532 HeapWord* addr = beg;
duke@435 533 while (cur_chunk < end_chunk) {
duke@435 534 _chunk_data[cur_chunk].set_destination(addr);
duke@435 535 _chunk_data[cur_chunk].set_destination_count(0);
duke@435 536 _chunk_data[cur_chunk].set_source_chunk(cur_chunk);
duke@435 537 _chunk_data[cur_chunk].set_data_location(addr);
duke@435 538
duke@435 539 // Update live_obj_size so the chunk appears completely full.
duke@435 540 size_t live_size = ChunkSize - _chunk_data[cur_chunk].partial_obj_size();
duke@435 541 _chunk_data[cur_chunk].set_live_obj_size(live_size);
duke@435 542
duke@435 543 ++cur_chunk;
duke@435 544 addr += ChunkSize;
duke@435 545 }
duke@435 546 }
duke@435 547
duke@435 548 bool ParallelCompactData::summarize(HeapWord* target_beg, HeapWord* target_end,
duke@435 549 HeapWord* source_beg, HeapWord* source_end,
duke@435 550 HeapWord** target_next,
duke@435 551 HeapWord** source_next) {
duke@435 552 // This is too strict.
duke@435 553 // assert(chunk_offset(source_beg) == 0, "not ChunkSize aligned");
duke@435 554
duke@435 555 if (TraceParallelOldGCSummaryPhase) {
duke@435 556 tty->print_cr("tb=" PTR_FORMAT " te=" PTR_FORMAT " "
duke@435 557 "sb=" PTR_FORMAT " se=" PTR_FORMAT " "
duke@435 558 "tn=" PTR_FORMAT " sn=" PTR_FORMAT,
duke@435 559 target_beg, target_end,
duke@435 560 source_beg, source_end,
duke@435 561 target_next != 0 ? *target_next : (HeapWord*) 0,
duke@435 562 source_next != 0 ? *source_next : (HeapWord*) 0);
duke@435 563 }
duke@435 564
duke@435 565 size_t cur_chunk = addr_to_chunk_idx(source_beg);
duke@435 566 const size_t end_chunk = addr_to_chunk_idx(chunk_align_up(source_end));
duke@435 567
duke@435 568 HeapWord *dest_addr = target_beg;
duke@435 569 while (cur_chunk < end_chunk) {
duke@435 570 size_t words = _chunk_data[cur_chunk].data_size();
duke@435 571
duke@435 572 #if 1
duke@435 573 assert(pointer_delta(target_end, dest_addr) >= words,
duke@435 574 "source region does not fit into target region");
duke@435 575 #else
duke@435 576 // XXX - need some work on the corner cases here. If the chunk does not
duke@435 577 // fit, then must either make sure any partial_obj from the chunk fits, or
duke@435 578 // 'undo' the initial part of the partial_obj that is in the previous chunk.
duke@435 579 if (dest_addr + words >= target_end) {
duke@435 580 // Let the caller know where to continue.
duke@435 581 *target_next = dest_addr;
duke@435 582 *source_next = chunk_to_addr(cur_chunk);
duke@435 583 return false;
duke@435 584 }
duke@435 585 #endif // #if 1
duke@435 586
duke@435 587 _chunk_data[cur_chunk].set_destination(dest_addr);
duke@435 588
duke@435 589 // Set the destination_count for cur_chunk, and if necessary, update
duke@435 590 // source_chunk for a destination chunk. The source_chunk field is updated
duke@435 591 // if cur_chunk is the first (left-most) chunk to be copied to a destination
duke@435 592 // chunk.
duke@435 593 //
duke@435 594 // The destination_count calculation is a bit subtle. A chunk that has data
duke@435 595 // that compacts into itself does not count itself as a destination. This
duke@435 596 // maintains the invariant that a zero count means the chunk is available
duke@435 597 // and can be claimed and then filled.
duke@435 598 if (words > 0) {
duke@435 599 HeapWord* const last_addr = dest_addr + words - 1;
duke@435 600 const size_t dest_chunk_1 = addr_to_chunk_idx(dest_addr);
duke@435 601 const size_t dest_chunk_2 = addr_to_chunk_idx(last_addr);
duke@435 602 #if 0
duke@435 603 // Initially assume that the destination chunks will be the same and
duke@435 604 // adjust the value below if necessary. Under this assumption, if
duke@435 605 // cur_chunk == dest_chunk_2, then cur_chunk will be compacted completely
duke@435 606 // into itself.
duke@435 607 uint destination_count = cur_chunk == dest_chunk_2 ? 0 : 1;
duke@435 608 if (dest_chunk_1 != dest_chunk_2) {
duke@435 609 // Destination chunks differ; adjust destination_count.
duke@435 610 destination_count += 1;
duke@435 611 // Data from cur_chunk will be copied to the start of dest_chunk_2.
duke@435 612 _chunk_data[dest_chunk_2].set_source_chunk(cur_chunk);
duke@435 613 } else if (chunk_offset(dest_addr) == 0) {
duke@435 614 // Data from cur_chunk will be copied to the start of the destination
duke@435 615 // chunk.
duke@435 616 _chunk_data[dest_chunk_1].set_source_chunk(cur_chunk);
duke@435 617 }
duke@435 618 #else
duke@435 619 // Initially assume that the destination chunks will be different and
duke@435 620 // adjust the value below if necessary. Under this assumption, if
duke@435 621 // cur_chunk == dest_chunk2, then cur_chunk will be compacted partially
duke@435 622 // into dest_chunk_1 and partially into itself.
duke@435 623 uint destination_count = cur_chunk == dest_chunk_2 ? 1 : 2;
duke@435 624 if (dest_chunk_1 != dest_chunk_2) {
duke@435 625 // Data from cur_chunk will be copied to the start of dest_chunk_2.
duke@435 626 _chunk_data[dest_chunk_2].set_source_chunk(cur_chunk);
duke@435 627 } else {
duke@435 628 // Destination chunks are the same; adjust destination_count.
duke@435 629 destination_count -= 1;
duke@435 630 if (chunk_offset(dest_addr) == 0) {
duke@435 631 // Data from cur_chunk will be copied to the start of the destination
duke@435 632 // chunk.
duke@435 633 _chunk_data[dest_chunk_1].set_source_chunk(cur_chunk);
duke@435 634 }
duke@435 635 }
duke@435 636 #endif // #if 0
duke@435 637
duke@435 638 _chunk_data[cur_chunk].set_destination_count(destination_count);
duke@435 639 _chunk_data[cur_chunk].set_data_location(chunk_to_addr(cur_chunk));
duke@435 640 dest_addr += words;
duke@435 641 }
duke@435 642
duke@435 643 ++cur_chunk;
duke@435 644 }
duke@435 645
duke@435 646 *target_next = dest_addr;
duke@435 647 return true;
duke@435 648 }
duke@435 649
duke@435 650 bool ParallelCompactData::partial_obj_ends_in_block(size_t block_index) {
duke@435 651 HeapWord* block_addr = block_to_addr(block_index);
duke@435 652 HeapWord* block_end_addr = block_addr + BlockSize;
duke@435 653 size_t chunk_index = addr_to_chunk_idx(block_addr);
duke@435 654 HeapWord* partial_obj_end_addr = partial_obj_end(chunk_index);
duke@435 655
duke@435 656 // An object that ends at the end of the block, ends
duke@435 657 // in the block (the last word of the object is to
duke@435 658 // the left of the end).
duke@435 659 if ((block_addr < partial_obj_end_addr) &&
duke@435 660 (partial_obj_end_addr <= block_end_addr)) {
duke@435 661 return true;
duke@435 662 }
duke@435 663
duke@435 664 return false;
duke@435 665 }
duke@435 666
duke@435 667 HeapWord* ParallelCompactData::calc_new_pointer(HeapWord* addr) {
duke@435 668 HeapWord* result = NULL;
duke@435 669 if (UseParallelOldGCChunkPointerCalc) {
duke@435 670 result = chunk_calc_new_pointer(addr);
duke@435 671 } else {
duke@435 672 result = block_calc_new_pointer(addr);
duke@435 673 }
duke@435 674 return result;
duke@435 675 }
duke@435 676
duke@435 677 // This method is overly complicated (expensive) to be called
duke@435 678 // for every reference.
duke@435 679 // Try to restructure this so that a NULL is returned if
duke@435 680 // the object is dead. But don't wast the cycles to explicitly check
duke@435 681 // that it is dead since only live objects should be passed in.
duke@435 682
duke@435 683 HeapWord* ParallelCompactData::chunk_calc_new_pointer(HeapWord* addr) {
duke@435 684 assert(addr != NULL, "Should detect NULL oop earlier");
duke@435 685 assert(PSParallelCompact::gc_heap()->is_in(addr), "addr not in heap");
duke@435 686 #ifdef ASSERT
duke@435 687 if (PSParallelCompact::mark_bitmap()->is_unmarked(addr)) {
duke@435 688 gclog_or_tty->print_cr("calc_new_pointer:: addr " PTR_FORMAT, addr);
duke@435 689 }
duke@435 690 #endif
duke@435 691 assert(PSParallelCompact::mark_bitmap()->is_marked(addr), "obj not marked");
duke@435 692
duke@435 693 // Chunk covering the object.
duke@435 694 size_t chunk_index = addr_to_chunk_idx(addr);
duke@435 695 const ChunkData* const chunk_ptr = chunk(chunk_index);
duke@435 696 HeapWord* const chunk_addr = chunk_align_down(addr);
duke@435 697
duke@435 698 assert(addr < chunk_addr + ChunkSize, "Chunk does not cover object");
duke@435 699 assert(addr_to_chunk_ptr(chunk_addr) == chunk_ptr, "sanity check");
duke@435 700
duke@435 701 HeapWord* result = chunk_ptr->destination();
duke@435 702
duke@435 703 // If all the data in the chunk is live, then the new location of the object
duke@435 704 // can be calculated from the destination of the chunk plus the offset of the
duke@435 705 // object in the chunk.
duke@435 706 if (chunk_ptr->data_size() == ChunkSize) {
duke@435 707 result += pointer_delta(addr, chunk_addr);
duke@435 708 return result;
duke@435 709 }
duke@435 710
duke@435 711 // The new location of the object is
duke@435 712 // chunk destination +
duke@435 713 // size of the partial object extending onto the chunk +
duke@435 714 // sizes of the live objects in the Chunk that are to the left of addr
duke@435 715 const size_t partial_obj_size = chunk_ptr->partial_obj_size();
duke@435 716 HeapWord* const search_start = chunk_addr + partial_obj_size;
duke@435 717
duke@435 718 const ParMarkBitMap* bitmap = PSParallelCompact::mark_bitmap();
duke@435 719 size_t live_to_left = bitmap->live_words_in_range(search_start, oop(addr));
duke@435 720
duke@435 721 result += partial_obj_size + live_to_left;
duke@435 722 assert(result <= addr, "object cannot move to the right");
duke@435 723 return result;
duke@435 724 }
duke@435 725
duke@435 726 HeapWord* ParallelCompactData::block_calc_new_pointer(HeapWord* addr) {
duke@435 727 assert(addr != NULL, "Should detect NULL oop earlier");
duke@435 728 assert(PSParallelCompact::gc_heap()->is_in(addr), "addr not in heap");
duke@435 729 #ifdef ASSERT
duke@435 730 if (PSParallelCompact::mark_bitmap()->is_unmarked(addr)) {
duke@435 731 gclog_or_tty->print_cr("calc_new_pointer:: addr " PTR_FORMAT, addr);
duke@435 732 }
duke@435 733 #endif
duke@435 734 assert(PSParallelCompact::mark_bitmap()->is_marked(addr), "obj not marked");
duke@435 735
duke@435 736 // Chunk covering the object.
duke@435 737 size_t chunk_index = addr_to_chunk_idx(addr);
duke@435 738 const ChunkData* const chunk_ptr = chunk(chunk_index);
duke@435 739 HeapWord* const chunk_addr = chunk_align_down(addr);
duke@435 740
duke@435 741 assert(addr < chunk_addr + ChunkSize, "Chunk does not cover object");
duke@435 742 assert(addr_to_chunk_ptr(chunk_addr) == chunk_ptr, "sanity check");
duke@435 743
duke@435 744 HeapWord* result = chunk_ptr->destination();
duke@435 745
duke@435 746 // If all the data in the chunk is live, then the new location of the object
duke@435 747 // can be calculated from the destination of the chunk plus the offset of the
duke@435 748 // object in the chunk.
duke@435 749 if (chunk_ptr->data_size() == ChunkSize) {
duke@435 750 result += pointer_delta(addr, chunk_addr);
duke@435 751 return result;
duke@435 752 }
duke@435 753
duke@435 754 // The new location of the object is
duke@435 755 // chunk destination +
duke@435 756 // block offset +
duke@435 757 // sizes of the live objects in the Block that are to the left of addr
duke@435 758 const size_t block_offset = addr_to_block_ptr(addr)->offset();
duke@435 759 HeapWord* const search_start = chunk_addr + block_offset;
duke@435 760
duke@435 761 const ParMarkBitMap* bitmap = PSParallelCompact::mark_bitmap();
duke@435 762 size_t live_to_left = bitmap->live_words_in_range(search_start, oop(addr));
duke@435 763
duke@435 764 result += block_offset + live_to_left;
duke@435 765 assert(result <= addr, "object cannot move to the right");
duke@435 766 assert(result == chunk_calc_new_pointer(addr), "Should match");
duke@435 767 return result;
duke@435 768 }
duke@435 769
duke@435 770 klassOop ParallelCompactData::calc_new_klass(klassOop old_klass) {
duke@435 771 klassOop updated_klass;
duke@435 772 if (PSParallelCompact::should_update_klass(old_klass)) {
duke@435 773 updated_klass = (klassOop) calc_new_pointer(old_klass);
duke@435 774 } else {
duke@435 775 updated_klass = old_klass;
duke@435 776 }
duke@435 777
duke@435 778 return updated_klass;
duke@435 779 }
duke@435 780
duke@435 781 #ifdef ASSERT
duke@435 782 void ParallelCompactData::verify_clear(const PSVirtualSpace* vspace)
duke@435 783 {
duke@435 784 const size_t* const beg = (const size_t*)vspace->committed_low_addr();
duke@435 785 const size_t* const end = (const size_t*)vspace->committed_high_addr();
duke@435 786 for (const size_t* p = beg; p < end; ++p) {
duke@435 787 assert(*p == 0, "not zero");
duke@435 788 }
duke@435 789 }
duke@435 790
duke@435 791 void ParallelCompactData::verify_clear()
duke@435 792 {
duke@435 793 verify_clear(_chunk_vspace);
duke@435 794 verify_clear(_block_vspace);
duke@435 795 }
duke@435 796 #endif // #ifdef ASSERT
duke@435 797
duke@435 798 #ifdef NOT_PRODUCT
duke@435 799 ParallelCompactData::ChunkData* debug_chunk(size_t chunk_index) {
duke@435 800 ParallelCompactData& sd = PSParallelCompact::summary_data();
duke@435 801 return sd.chunk(chunk_index);
duke@435 802 }
duke@435 803 #endif
duke@435 804
duke@435 805 elapsedTimer PSParallelCompact::_accumulated_time;
duke@435 806 unsigned int PSParallelCompact::_total_invocations = 0;
duke@435 807 unsigned int PSParallelCompact::_maximum_compaction_gc_num = 0;
duke@435 808 jlong PSParallelCompact::_time_of_last_gc = 0;
duke@435 809 CollectorCounters* PSParallelCompact::_counters = NULL;
duke@435 810 ParMarkBitMap PSParallelCompact::_mark_bitmap;
duke@435 811 ParallelCompactData PSParallelCompact::_summary_data;
duke@435 812
duke@435 813 PSParallelCompact::IsAliveClosure PSParallelCompact::_is_alive_closure;
duke@435 814 PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_root_pointer_closure(true);
duke@435 815 PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_pointer_closure(false);
duke@435 816
duke@435 817 void PSParallelCompact::KeepAliveClosure::do_oop(oop* p) {
duke@435 818 #ifdef VALIDATE_MARK_SWEEP
duke@435 819 if (ValidateMarkSweep) {
duke@435 820 if (!Universe::heap()->is_in_reserved(p)) {
duke@435 821 _root_refs_stack->push(p);
duke@435 822 } else {
duke@435 823 _other_refs_stack->push(p);
duke@435 824 }
duke@435 825 }
duke@435 826 #endif
duke@435 827 mark_and_push(_compaction_manager, p);
duke@435 828 }
duke@435 829
duke@435 830 void PSParallelCompact::mark_and_follow(ParCompactionManager* cm,
duke@435 831 oop* p) {
duke@435 832 assert(Universe::heap()->is_in_reserved(p),
duke@435 833 "we should only be traversing objects here");
duke@435 834 oop m = *p;
duke@435 835 if (m != NULL && mark_bitmap()->is_unmarked(m)) {
duke@435 836 if (mark_obj(m)) {
duke@435 837 m->follow_contents(cm); // Follow contents of the marked object
duke@435 838 }
duke@435 839 }
duke@435 840 }
duke@435 841
duke@435 842 // Anything associated with this variable is temporary.
duke@435 843
duke@435 844 void PSParallelCompact::mark_and_push_internal(ParCompactionManager* cm,
duke@435 845 oop* p) {
duke@435 846 // Push marked object, contents will be followed later
duke@435 847 oop m = *p;
duke@435 848 if (mark_obj(m)) {
duke@435 849 // This thread marked the object and
duke@435 850 // owns the subsequent processing of it.
duke@435 851 cm->save_for_scanning(m);
duke@435 852 }
duke@435 853 }
duke@435 854
duke@435 855 void PSParallelCompact::post_initialize() {
duke@435 856 ParallelScavengeHeap* heap = gc_heap();
duke@435 857 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
duke@435 858
duke@435 859 MemRegion mr = heap->reserved_region();
duke@435 860 _ref_processor = ReferenceProcessor::create_ref_processor(
duke@435 861 mr, // span
duke@435 862 true, // atomic_discovery
duke@435 863 true, // mt_discovery
duke@435 864 &_is_alive_closure,
duke@435 865 ParallelGCThreads,
duke@435 866 ParallelRefProcEnabled);
duke@435 867 _counters = new CollectorCounters("PSParallelCompact", 1);
duke@435 868
duke@435 869 // Initialize static fields in ParCompactionManager.
duke@435 870 ParCompactionManager::initialize(mark_bitmap());
duke@435 871 }
duke@435 872
duke@435 873 bool PSParallelCompact::initialize() {
duke@435 874 ParallelScavengeHeap* heap = gc_heap();
duke@435 875 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
duke@435 876 MemRegion mr = heap->reserved_region();
duke@435 877
duke@435 878 // Was the old gen get allocated successfully?
duke@435 879 if (!heap->old_gen()->is_allocated()) {
duke@435 880 return false;
duke@435 881 }
duke@435 882
duke@435 883 initialize_space_info();
duke@435 884 initialize_dead_wood_limiter();
duke@435 885
duke@435 886 if (!_mark_bitmap.initialize(mr)) {
duke@435 887 vm_shutdown_during_initialization("Unable to allocate bit map for "
duke@435 888 "parallel garbage collection for the requested heap size.");
duke@435 889 return false;
duke@435 890 }
duke@435 891
duke@435 892 if (!_summary_data.initialize(mr)) {
duke@435 893 vm_shutdown_during_initialization("Unable to allocate tables for "
duke@435 894 "parallel garbage collection for the requested heap size.");
duke@435 895 return false;
duke@435 896 }
duke@435 897
duke@435 898 return true;
duke@435 899 }
duke@435 900
duke@435 901 void PSParallelCompact::initialize_space_info()
duke@435 902 {
duke@435 903 memset(&_space_info, 0, sizeof(_space_info));
duke@435 904
duke@435 905 ParallelScavengeHeap* heap = gc_heap();
duke@435 906 PSYoungGen* young_gen = heap->young_gen();
duke@435 907 MutableSpace* perm_space = heap->perm_gen()->object_space();
duke@435 908
duke@435 909 _space_info[perm_space_id].set_space(perm_space);
duke@435 910 _space_info[old_space_id].set_space(heap->old_gen()->object_space());
duke@435 911 _space_info[eden_space_id].set_space(young_gen->eden_space());
duke@435 912 _space_info[from_space_id].set_space(young_gen->from_space());
duke@435 913 _space_info[to_space_id].set_space(young_gen->to_space());
duke@435 914
duke@435 915 _space_info[perm_space_id].set_start_array(heap->perm_gen()->start_array());
duke@435 916 _space_info[old_space_id].set_start_array(heap->old_gen()->start_array());
duke@435 917
duke@435 918 _space_info[perm_space_id].set_min_dense_prefix(perm_space->top());
duke@435 919 if (TraceParallelOldGCDensePrefix) {
duke@435 920 tty->print_cr("perm min_dense_prefix=" PTR_FORMAT,
duke@435 921 _space_info[perm_space_id].min_dense_prefix());
duke@435 922 }
duke@435 923 }
duke@435 924
duke@435 925 void PSParallelCompact::initialize_dead_wood_limiter()
duke@435 926 {
duke@435 927 const size_t max = 100;
duke@435 928 _dwl_mean = double(MIN2(ParallelOldDeadWoodLimiterMean, max)) / 100.0;
duke@435 929 _dwl_std_dev = double(MIN2(ParallelOldDeadWoodLimiterStdDev, max)) / 100.0;
duke@435 930 _dwl_first_term = 1.0 / (sqrt(2.0 * M_PI) * _dwl_std_dev);
duke@435 931 DEBUG_ONLY(_dwl_initialized = true;)
duke@435 932 _dwl_adjustment = normal_distribution(1.0);
duke@435 933 }
duke@435 934
duke@435 935 // Simple class for storing info about the heap at the start of GC, to be used
duke@435 936 // after GC for comparison/printing.
duke@435 937 class PreGCValues {
duke@435 938 public:
duke@435 939 PreGCValues() { }
duke@435 940 PreGCValues(ParallelScavengeHeap* heap) { fill(heap); }
duke@435 941
duke@435 942 void fill(ParallelScavengeHeap* heap) {
duke@435 943 _heap_used = heap->used();
duke@435 944 _young_gen_used = heap->young_gen()->used_in_bytes();
duke@435 945 _old_gen_used = heap->old_gen()->used_in_bytes();
duke@435 946 _perm_gen_used = heap->perm_gen()->used_in_bytes();
duke@435 947 };
duke@435 948
duke@435 949 size_t heap_used() const { return _heap_used; }
duke@435 950 size_t young_gen_used() const { return _young_gen_used; }
duke@435 951 size_t old_gen_used() const { return _old_gen_used; }
duke@435 952 size_t perm_gen_used() const { return _perm_gen_used; }
duke@435 953
duke@435 954 private:
duke@435 955 size_t _heap_used;
duke@435 956 size_t _young_gen_used;
duke@435 957 size_t _old_gen_used;
duke@435 958 size_t _perm_gen_used;
duke@435 959 };
duke@435 960
duke@435 961 void
duke@435 962 PSParallelCompact::clear_data_covering_space(SpaceId id)
duke@435 963 {
duke@435 964 // At this point, top is the value before GC, new_top() is the value that will
duke@435 965 // be set at the end of GC. The marking bitmap is cleared to top; nothing
duke@435 966 // should be marked above top. The summary data is cleared to the larger of
duke@435 967 // top & new_top.
duke@435 968 MutableSpace* const space = _space_info[id].space();
duke@435 969 HeapWord* const bot = space->bottom();
duke@435 970 HeapWord* const top = space->top();
duke@435 971 HeapWord* const max_top = MAX2(top, _space_info[id].new_top());
duke@435 972
duke@435 973 const idx_t beg_bit = _mark_bitmap.addr_to_bit(bot);
duke@435 974 const idx_t end_bit = BitMap::word_align_up(_mark_bitmap.addr_to_bit(top));
duke@435 975 _mark_bitmap.clear_range(beg_bit, end_bit);
duke@435 976
duke@435 977 const size_t beg_chunk = _summary_data.addr_to_chunk_idx(bot);
duke@435 978 const size_t end_chunk =
duke@435 979 _summary_data.addr_to_chunk_idx(_summary_data.chunk_align_up(max_top));
duke@435 980 _summary_data.clear_range(beg_chunk, end_chunk);
duke@435 981 }
duke@435 982
duke@435 983 void PSParallelCompact::pre_compact(PreGCValues* pre_gc_values)
duke@435 984 {
duke@435 985 // Update the from & to space pointers in space_info, since they are swapped
duke@435 986 // at each young gen gc. Do the update unconditionally (even though a
duke@435 987 // promotion failure does not swap spaces) because an unknown number of minor
duke@435 988 // collections will have swapped the spaces an unknown number of times.
duke@435 989 TraceTime tm("pre compact", print_phases(), true, gclog_or_tty);
duke@435 990 ParallelScavengeHeap* heap = gc_heap();
duke@435 991 _space_info[from_space_id].set_space(heap->young_gen()->from_space());
duke@435 992 _space_info[to_space_id].set_space(heap->young_gen()->to_space());
duke@435 993
duke@435 994 pre_gc_values->fill(heap);
duke@435 995
duke@435 996 ParCompactionManager::reset();
duke@435 997 NOT_PRODUCT(_mark_bitmap.reset_counters());
duke@435 998 DEBUG_ONLY(add_obj_count = add_obj_size = 0;)
duke@435 999 DEBUG_ONLY(mark_bitmap_count = mark_bitmap_size = 0;)
duke@435 1000
duke@435 1001 // Increment the invocation count
duke@435 1002 heap->increment_total_collections();
duke@435 1003
duke@435 1004 // We need to track unique mark sweep invocations as well.
duke@435 1005 _total_invocations++;
duke@435 1006
duke@435 1007 if (PrintHeapAtGC) {
duke@435 1008 Universe::print_heap_before_gc();
duke@435 1009 }
duke@435 1010
duke@435 1011 // Fill in TLABs
duke@435 1012 heap->accumulate_statistics_all_tlabs();
duke@435 1013 heap->ensure_parsability(true); // retire TLABs
duke@435 1014
duke@435 1015 if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
duke@435 1016 HandleMark hm; // Discard invalid handles created during verification
duke@435 1017 gclog_or_tty->print(" VerifyBeforeGC:");
duke@435 1018 Universe::verify(true);
duke@435 1019 }
duke@435 1020
duke@435 1021 // Verify object start arrays
duke@435 1022 if (VerifyObjectStartArray &&
duke@435 1023 VerifyBeforeGC) {
duke@435 1024 heap->old_gen()->verify_object_start_array();
duke@435 1025 heap->perm_gen()->verify_object_start_array();
duke@435 1026 }
duke@435 1027
duke@435 1028 DEBUG_ONLY(mark_bitmap()->verify_clear();)
duke@435 1029 DEBUG_ONLY(summary_data().verify_clear();)
duke@435 1030 }
duke@435 1031
duke@435 1032 void PSParallelCompact::post_compact()
duke@435 1033 {
duke@435 1034 TraceTime tm("post compact", print_phases(), true, gclog_or_tty);
duke@435 1035
duke@435 1036 // Clear the marking bitmap and summary data and update top() in each space.
duke@435 1037 for (unsigned int id = perm_space_id; id < last_space_id; ++id) {
duke@435 1038 clear_data_covering_space(SpaceId(id));
duke@435 1039 _space_info[id].space()->set_top(_space_info[id].new_top());
duke@435 1040 }
duke@435 1041
duke@435 1042 MutableSpace* const eden_space = _space_info[eden_space_id].space();
duke@435 1043 MutableSpace* const from_space = _space_info[from_space_id].space();
duke@435 1044 MutableSpace* const to_space = _space_info[to_space_id].space();
duke@435 1045
duke@435 1046 ParallelScavengeHeap* heap = gc_heap();
duke@435 1047 bool eden_empty = eden_space->is_empty();
duke@435 1048 if (!eden_empty) {
duke@435 1049 eden_empty = absorb_live_data_from_eden(heap->size_policy(),
duke@435 1050 heap->young_gen(), heap->old_gen());
duke@435 1051 }
duke@435 1052
duke@435 1053 // Update heap occupancy information which is used as input to the soft ref
duke@435 1054 // clearing policy at the next gc.
duke@435 1055 Universe::update_heap_info_at_gc();
duke@435 1056
duke@435 1057 bool young_gen_empty = eden_empty && from_space->is_empty() &&
duke@435 1058 to_space->is_empty();
duke@435 1059
duke@435 1060 BarrierSet* bs = heap->barrier_set();
duke@435 1061 if (bs->is_a(BarrierSet::ModRef)) {
duke@435 1062 ModRefBarrierSet* modBS = (ModRefBarrierSet*)bs;
duke@435 1063 MemRegion old_mr = heap->old_gen()->reserved();
duke@435 1064 MemRegion perm_mr = heap->perm_gen()->reserved();
duke@435 1065 assert(perm_mr.end() <= old_mr.start(), "Generations out of order");
duke@435 1066
duke@435 1067 if (young_gen_empty) {
duke@435 1068 modBS->clear(MemRegion(perm_mr.start(), old_mr.end()));
duke@435 1069 } else {
duke@435 1070 modBS->invalidate(MemRegion(perm_mr.start(), old_mr.end()));
duke@435 1071 }
duke@435 1072 }
duke@435 1073
duke@435 1074 Threads::gc_epilogue();
duke@435 1075 CodeCache::gc_epilogue();
duke@435 1076
duke@435 1077 COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
duke@435 1078
duke@435 1079 ref_processor()->enqueue_discovered_references(NULL);
duke@435 1080
duke@435 1081 // Update time of last GC
duke@435 1082 reset_millis_since_last_gc();
duke@435 1083 }
duke@435 1084
duke@435 1085 HeapWord*
duke@435 1086 PSParallelCompact::compute_dense_prefix_via_density(const SpaceId id,
duke@435 1087 bool maximum_compaction)
duke@435 1088 {
duke@435 1089 const size_t chunk_size = ParallelCompactData::ChunkSize;
duke@435 1090 const ParallelCompactData& sd = summary_data();
duke@435 1091
duke@435 1092 const MutableSpace* const space = _space_info[id].space();
duke@435 1093 HeapWord* const top_aligned_up = sd.chunk_align_up(space->top());
duke@435 1094 const ChunkData* const beg_cp = sd.addr_to_chunk_ptr(space->bottom());
duke@435 1095 const ChunkData* const end_cp = sd.addr_to_chunk_ptr(top_aligned_up);
duke@435 1096
duke@435 1097 // Skip full chunks at the beginning of the space--they are necessarily part
duke@435 1098 // of the dense prefix.
duke@435 1099 size_t full_count = 0;
duke@435 1100 const ChunkData* cp;
duke@435 1101 for (cp = beg_cp; cp < end_cp && cp->data_size() == chunk_size; ++cp) {
duke@435 1102 ++full_count;
duke@435 1103 }
duke@435 1104
duke@435 1105 assert(total_invocations() >= _maximum_compaction_gc_num, "sanity");
duke@435 1106 const size_t gcs_since_max = total_invocations() - _maximum_compaction_gc_num;
duke@435 1107 const bool interval_ended = gcs_since_max > HeapMaximumCompactionInterval;
duke@435 1108 if (maximum_compaction || cp == end_cp || interval_ended) {
duke@435 1109 _maximum_compaction_gc_num = total_invocations();
duke@435 1110 return sd.chunk_to_addr(cp);
duke@435 1111 }
duke@435 1112
duke@435 1113 HeapWord* const new_top = _space_info[id].new_top();
duke@435 1114 const size_t space_live = pointer_delta(new_top, space->bottom());
duke@435 1115 const size_t space_used = space->used_in_words();
duke@435 1116 const size_t space_capacity = space->capacity_in_words();
duke@435 1117
duke@435 1118 const double cur_density = double(space_live) / space_capacity;
duke@435 1119 const double deadwood_density =
duke@435 1120 (1.0 - cur_density) * (1.0 - cur_density) * cur_density * cur_density;
duke@435 1121 const size_t deadwood_goal = size_t(space_capacity * deadwood_density);
duke@435 1122
duke@435 1123 if (TraceParallelOldGCDensePrefix) {
duke@435 1124 tty->print_cr("cur_dens=%5.3f dw_dens=%5.3f dw_goal=" SIZE_FORMAT,
duke@435 1125 cur_density, deadwood_density, deadwood_goal);
duke@435 1126 tty->print_cr("space_live=" SIZE_FORMAT " " "space_used=" SIZE_FORMAT " "
duke@435 1127 "space_cap=" SIZE_FORMAT,
duke@435 1128 space_live, space_used,
duke@435 1129 space_capacity);
duke@435 1130 }
duke@435 1131
duke@435 1132 // XXX - Use binary search?
duke@435 1133 HeapWord* dense_prefix = sd.chunk_to_addr(cp);
duke@435 1134 const ChunkData* full_cp = cp;
duke@435 1135 const ChunkData* const top_cp = sd.addr_to_chunk_ptr(space->top() - 1);
duke@435 1136 while (cp < end_cp) {
duke@435 1137 HeapWord* chunk_destination = cp->destination();
duke@435 1138 const size_t cur_deadwood = pointer_delta(dense_prefix, chunk_destination);
duke@435 1139 if (TraceParallelOldGCDensePrefix && Verbose) {
duke@435 1140 tty->print_cr("c#=" SIZE_FORMAT_W("04") " dst=" PTR_FORMAT " "
duke@435 1141 "dp=" SIZE_FORMAT_W("08") " " "cdw=" SIZE_FORMAT_W("08"),
duke@435 1142 sd.chunk(cp), chunk_destination,
duke@435 1143 dense_prefix, cur_deadwood);
duke@435 1144 }
duke@435 1145
duke@435 1146 if (cur_deadwood >= deadwood_goal) {
duke@435 1147 // Found the chunk that has the correct amount of deadwood to the left.
duke@435 1148 // This typically occurs after crossing a fairly sparse set of chunks, so
duke@435 1149 // iterate backwards over those sparse chunks, looking for the chunk that
duke@435 1150 // has the lowest density of live objects 'to the right.'
duke@435 1151 size_t space_to_left = sd.chunk(cp) * chunk_size;
duke@435 1152 size_t live_to_left = space_to_left - cur_deadwood;
duke@435 1153 size_t space_to_right = space_capacity - space_to_left;
duke@435 1154 size_t live_to_right = space_live - live_to_left;
duke@435 1155 double density_to_right = double(live_to_right) / space_to_right;
duke@435 1156 while (cp > full_cp) {
duke@435 1157 --cp;
duke@435 1158 const size_t prev_chunk_live_to_right = live_to_right - cp->data_size();
duke@435 1159 const size_t prev_chunk_space_to_right = space_to_right + chunk_size;
duke@435 1160 double prev_chunk_density_to_right =
duke@435 1161 double(prev_chunk_live_to_right) / prev_chunk_space_to_right;
duke@435 1162 if (density_to_right <= prev_chunk_density_to_right) {
duke@435 1163 return dense_prefix;
duke@435 1164 }
duke@435 1165 if (TraceParallelOldGCDensePrefix && Verbose) {
duke@435 1166 tty->print_cr("backing up from c=" SIZE_FORMAT_W("4") " d2r=%10.8f "
duke@435 1167 "pc_d2r=%10.8f", sd.chunk(cp), density_to_right,
duke@435 1168 prev_chunk_density_to_right);
duke@435 1169 }
duke@435 1170 dense_prefix -= chunk_size;
duke@435 1171 live_to_right = prev_chunk_live_to_right;
duke@435 1172 space_to_right = prev_chunk_space_to_right;
duke@435 1173 density_to_right = prev_chunk_density_to_right;
duke@435 1174 }
duke@435 1175 return dense_prefix;
duke@435 1176 }
duke@435 1177
duke@435 1178 dense_prefix += chunk_size;
duke@435 1179 ++cp;
duke@435 1180 }
duke@435 1181
duke@435 1182 return dense_prefix;
duke@435 1183 }
duke@435 1184
duke@435 1185 #ifndef PRODUCT
duke@435 1186 void PSParallelCompact::print_dense_prefix_stats(const char* const algorithm,
duke@435 1187 const SpaceId id,
duke@435 1188 const bool maximum_compaction,
duke@435 1189 HeapWord* const addr)
duke@435 1190 {
duke@435 1191 const size_t chunk_idx = summary_data().addr_to_chunk_idx(addr);
duke@435 1192 ChunkData* const cp = summary_data().chunk(chunk_idx);
duke@435 1193 const MutableSpace* const space = _space_info[id].space();
duke@435 1194 HeapWord* const new_top = _space_info[id].new_top();
duke@435 1195
duke@435 1196 const size_t space_live = pointer_delta(new_top, space->bottom());
duke@435 1197 const size_t dead_to_left = pointer_delta(addr, cp->destination());
duke@435 1198 const size_t space_cap = space->capacity_in_words();
duke@435 1199 const double dead_to_left_pct = double(dead_to_left) / space_cap;
duke@435 1200 const size_t live_to_right = new_top - cp->destination();
duke@435 1201 const size_t dead_to_right = space->top() - addr - live_to_right;
duke@435 1202
duke@435 1203 tty->print_cr("%s=" PTR_FORMAT " dpc=" SIZE_FORMAT_W("05") " "
duke@435 1204 "spl=" SIZE_FORMAT " "
duke@435 1205 "d2l=" SIZE_FORMAT " d2l%%=%6.4f "
duke@435 1206 "d2r=" SIZE_FORMAT " l2r=" SIZE_FORMAT
duke@435 1207 " ratio=%10.8f",
duke@435 1208 algorithm, addr, chunk_idx,
duke@435 1209 space_live,
duke@435 1210 dead_to_left, dead_to_left_pct,
duke@435 1211 dead_to_right, live_to_right,
duke@435 1212 double(dead_to_right) / live_to_right);
duke@435 1213 }
duke@435 1214 #endif // #ifndef PRODUCT
duke@435 1215
duke@435 1216 // Return a fraction indicating how much of the generation can be treated as
duke@435 1217 // "dead wood" (i.e., not reclaimed). The function uses a normal distribution
duke@435 1218 // based on the density of live objects in the generation to determine a limit,
duke@435 1219 // which is then adjusted so the return value is min_percent when the density is
duke@435 1220 // 1.
duke@435 1221 //
duke@435 1222 // The following table shows some return values for a different values of the
duke@435 1223 // standard deviation (ParallelOldDeadWoodLimiterStdDev); the mean is 0.5 and
duke@435 1224 // min_percent is 1.
duke@435 1225 //
duke@435 1226 // fraction allowed as dead wood
duke@435 1227 // -----------------------------------------------------------------
duke@435 1228 // density std_dev=70 std_dev=75 std_dev=80 std_dev=85 std_dev=90 std_dev=95
duke@435 1229 // ------- ---------- ---------- ---------- ---------- ---------- ----------
duke@435 1230 // 0.00000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000
duke@435 1231 // 0.05000 0.03193096 0.02836880 0.02550828 0.02319280 0.02130337 0.01974941
duke@435 1232 // 0.10000 0.05247504 0.04547452 0.03988045 0.03537016 0.03170171 0.02869272
duke@435 1233 // 0.15000 0.07135702 0.06111390 0.05296419 0.04641639 0.04110601 0.03676066
duke@435 1234 // 0.20000 0.08831616 0.07509618 0.06461766 0.05622444 0.04943437 0.04388975
duke@435 1235 // 0.25000 0.10311208 0.08724696 0.07471205 0.06469760 0.05661313 0.05002313
duke@435 1236 // 0.30000 0.11553050 0.09741183 0.08313394 0.07175114 0.06257797 0.05511132
duke@435 1237 // 0.35000 0.12538832 0.10545958 0.08978741 0.07731366 0.06727491 0.05911289
duke@435 1238 // 0.40000 0.13253818 0.11128511 0.09459590 0.08132834 0.07066107 0.06199500
duke@435 1239 // 0.45000 0.13687208 0.11481163 0.09750361 0.08375387 0.07270534 0.06373386
duke@435 1240 // 0.50000 0.13832410 0.11599237 0.09847664 0.08456518 0.07338887 0.06431510
duke@435 1241 // 0.55000 0.13687208 0.11481163 0.09750361 0.08375387 0.07270534 0.06373386
duke@435 1242 // 0.60000 0.13253818 0.11128511 0.09459590 0.08132834 0.07066107 0.06199500
duke@435 1243 // 0.65000 0.12538832 0.10545958 0.08978741 0.07731366 0.06727491 0.05911289
duke@435 1244 // 0.70000 0.11553050 0.09741183 0.08313394 0.07175114 0.06257797 0.05511132
duke@435 1245 // 0.75000 0.10311208 0.08724696 0.07471205 0.06469760 0.05661313 0.05002313
duke@435 1246 // 0.80000 0.08831616 0.07509618 0.06461766 0.05622444 0.04943437 0.04388975
duke@435 1247 // 0.85000 0.07135702 0.06111390 0.05296419 0.04641639 0.04110601 0.03676066
duke@435 1248 // 0.90000 0.05247504 0.04547452 0.03988045 0.03537016 0.03170171 0.02869272
duke@435 1249 // 0.95000 0.03193096 0.02836880 0.02550828 0.02319280 0.02130337 0.01974941
duke@435 1250 // 1.00000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000
duke@435 1251
duke@435 1252 double PSParallelCompact::dead_wood_limiter(double density, size_t min_percent)
duke@435 1253 {
duke@435 1254 assert(_dwl_initialized, "uninitialized");
duke@435 1255
duke@435 1256 // The raw limit is the value of the normal distribution at x = density.
duke@435 1257 const double raw_limit = normal_distribution(density);
duke@435 1258
duke@435 1259 // Adjust the raw limit so it becomes the minimum when the density is 1.
duke@435 1260 //
duke@435 1261 // First subtract the adjustment value (which is simply the precomputed value
duke@435 1262 // normal_distribution(1.0)); this yields a value of 0 when the density is 1.
duke@435 1263 // Then add the minimum value, so the minimum is returned when the density is
duke@435 1264 // 1. Finally, prevent negative values, which occur when the mean is not 0.5.
duke@435 1265 const double min = double(min_percent) / 100.0;
duke@435 1266 const double limit = raw_limit - _dwl_adjustment + min;
duke@435 1267 return MAX2(limit, 0.0);
duke@435 1268 }
duke@435 1269
duke@435 1270 ParallelCompactData::ChunkData*
duke@435 1271 PSParallelCompact::first_dead_space_chunk(const ChunkData* beg,
duke@435 1272 const ChunkData* end)
duke@435 1273 {
duke@435 1274 const size_t chunk_size = ParallelCompactData::ChunkSize;
duke@435 1275 ParallelCompactData& sd = summary_data();
duke@435 1276 size_t left = sd.chunk(beg);
duke@435 1277 size_t right = end > beg ? sd.chunk(end) - 1 : left;
duke@435 1278
duke@435 1279 // Binary search.
duke@435 1280 while (left < right) {
duke@435 1281 // Equivalent to (left + right) / 2, but does not overflow.
duke@435 1282 const size_t middle = left + (right - left) / 2;
duke@435 1283 ChunkData* const middle_ptr = sd.chunk(middle);
duke@435 1284 HeapWord* const dest = middle_ptr->destination();
duke@435 1285 HeapWord* const addr = sd.chunk_to_addr(middle);
duke@435 1286 assert(dest != NULL, "sanity");
duke@435 1287 assert(dest <= addr, "must move left");
duke@435 1288
duke@435 1289 if (middle > left && dest < addr) {
duke@435 1290 right = middle - 1;
duke@435 1291 } else if (middle < right && middle_ptr->data_size() == chunk_size) {
duke@435 1292 left = middle + 1;
duke@435 1293 } else {
duke@435 1294 return middle_ptr;
duke@435 1295 }
duke@435 1296 }
duke@435 1297 return sd.chunk(left);
duke@435 1298 }
duke@435 1299
duke@435 1300 ParallelCompactData::ChunkData*
duke@435 1301 PSParallelCompact::dead_wood_limit_chunk(const ChunkData* beg,
duke@435 1302 const ChunkData* end,
duke@435 1303 size_t dead_words)
duke@435 1304 {
duke@435 1305 ParallelCompactData& sd = summary_data();
duke@435 1306 size_t left = sd.chunk(beg);
duke@435 1307 size_t right = end > beg ? sd.chunk(end) - 1 : left;
duke@435 1308
duke@435 1309 // Binary search.
duke@435 1310 while (left < right) {
duke@435 1311 // Equivalent to (left + right) / 2, but does not overflow.
duke@435 1312 const size_t middle = left + (right - left) / 2;
duke@435 1313 ChunkData* const middle_ptr = sd.chunk(middle);
duke@435 1314 HeapWord* const dest = middle_ptr->destination();
duke@435 1315 HeapWord* const addr = sd.chunk_to_addr(middle);
duke@435 1316 assert(dest != NULL, "sanity");
duke@435 1317 assert(dest <= addr, "must move left");
duke@435 1318
duke@435 1319 const size_t dead_to_left = pointer_delta(addr, dest);
duke@435 1320 if (middle > left && dead_to_left > dead_words) {
duke@435 1321 right = middle - 1;
duke@435 1322 } else if (middle < right && dead_to_left < dead_words) {
duke@435 1323 left = middle + 1;
duke@435 1324 } else {
duke@435 1325 return middle_ptr;
duke@435 1326 }
duke@435 1327 }
duke@435 1328 return sd.chunk(left);
duke@435 1329 }
duke@435 1330
duke@435 1331 // The result is valid during the summary phase, after the initial summarization
duke@435 1332 // of each space into itself, and before final summarization.
duke@435 1333 inline double
duke@435 1334 PSParallelCompact::reclaimed_ratio(const ChunkData* const cp,
duke@435 1335 HeapWord* const bottom,
duke@435 1336 HeapWord* const top,
duke@435 1337 HeapWord* const new_top)
duke@435 1338 {
duke@435 1339 ParallelCompactData& sd = summary_data();
duke@435 1340
duke@435 1341 assert(cp != NULL, "sanity");
duke@435 1342 assert(bottom != NULL, "sanity");
duke@435 1343 assert(top != NULL, "sanity");
duke@435 1344 assert(new_top != NULL, "sanity");
duke@435 1345 assert(top >= new_top, "summary data problem?");
duke@435 1346 assert(new_top > bottom, "space is empty; should not be here");
duke@435 1347 assert(new_top >= cp->destination(), "sanity");
duke@435 1348 assert(top >= sd.chunk_to_addr(cp), "sanity");
duke@435 1349
duke@435 1350 HeapWord* const destination = cp->destination();
duke@435 1351 const size_t dense_prefix_live = pointer_delta(destination, bottom);
duke@435 1352 const size_t compacted_region_live = pointer_delta(new_top, destination);
duke@435 1353 const size_t compacted_region_used = pointer_delta(top, sd.chunk_to_addr(cp));
duke@435 1354 const size_t reclaimable = compacted_region_used - compacted_region_live;
duke@435 1355
duke@435 1356 const double divisor = dense_prefix_live + 1.25 * compacted_region_live;
duke@435 1357 return double(reclaimable) / divisor;
duke@435 1358 }
duke@435 1359
duke@435 1360 // Return the address of the end of the dense prefix, a.k.a. the start of the
duke@435 1361 // compacted region. The address is always on a chunk boundary.
duke@435 1362 //
duke@435 1363 // Completely full chunks at the left are skipped, since no compaction can occur
duke@435 1364 // in those chunks. Then the maximum amount of dead wood to allow is computed,
duke@435 1365 // based on the density (amount live / capacity) of the generation; the chunk
duke@435 1366 // with approximately that amount of dead space to the left is identified as the
duke@435 1367 // limit chunk. Chunks between the last completely full chunk and the limit
duke@435 1368 // chunk are scanned and the one that has the best (maximum) reclaimed_ratio()
duke@435 1369 // is selected.
duke@435 1370 HeapWord*
duke@435 1371 PSParallelCompact::compute_dense_prefix(const SpaceId id,
duke@435 1372 bool maximum_compaction)
duke@435 1373 {
duke@435 1374 const size_t chunk_size = ParallelCompactData::ChunkSize;
duke@435 1375 const ParallelCompactData& sd = summary_data();
duke@435 1376
duke@435 1377 const MutableSpace* const space = _space_info[id].space();
duke@435 1378 HeapWord* const top = space->top();
duke@435 1379 HeapWord* const top_aligned_up = sd.chunk_align_up(top);
duke@435 1380 HeapWord* const new_top = _space_info[id].new_top();
duke@435 1381 HeapWord* const new_top_aligned_up = sd.chunk_align_up(new_top);
duke@435 1382 HeapWord* const bottom = space->bottom();
duke@435 1383 const ChunkData* const beg_cp = sd.addr_to_chunk_ptr(bottom);
duke@435 1384 const ChunkData* const top_cp = sd.addr_to_chunk_ptr(top_aligned_up);
duke@435 1385 const ChunkData* const new_top_cp = sd.addr_to_chunk_ptr(new_top_aligned_up);
duke@435 1386
duke@435 1387 // Skip full chunks at the beginning of the space--they are necessarily part
duke@435 1388 // of the dense prefix.
duke@435 1389 const ChunkData* const full_cp = first_dead_space_chunk(beg_cp, new_top_cp);
duke@435 1390 assert(full_cp->destination() == sd.chunk_to_addr(full_cp) ||
duke@435 1391 space->is_empty(), "no dead space allowed to the left");
duke@435 1392 assert(full_cp->data_size() < chunk_size || full_cp == new_top_cp - 1,
duke@435 1393 "chunk must have dead space");
duke@435 1394
duke@435 1395 // The gc number is saved whenever a maximum compaction is done, and used to
duke@435 1396 // determine when the maximum compaction interval has expired. This avoids
duke@435 1397 // successive max compactions for different reasons.
duke@435 1398 assert(total_invocations() >= _maximum_compaction_gc_num, "sanity");
duke@435 1399 const size_t gcs_since_max = total_invocations() - _maximum_compaction_gc_num;
duke@435 1400 const bool interval_ended = gcs_since_max > HeapMaximumCompactionInterval ||
duke@435 1401 total_invocations() == HeapFirstMaximumCompactionCount;
duke@435 1402 if (maximum_compaction || full_cp == top_cp || interval_ended) {
duke@435 1403 _maximum_compaction_gc_num = total_invocations();
duke@435 1404 return sd.chunk_to_addr(full_cp);
duke@435 1405 }
duke@435 1406
duke@435 1407 const size_t space_live = pointer_delta(new_top, bottom);
duke@435 1408 const size_t space_used = space->used_in_words();
duke@435 1409 const size_t space_capacity = space->capacity_in_words();
duke@435 1410
duke@435 1411 const double density = double(space_live) / double(space_capacity);
duke@435 1412 const size_t min_percent_free =
duke@435 1413 id == perm_space_id ? PermMarkSweepDeadRatio : MarkSweepDeadRatio;
duke@435 1414 const double limiter = dead_wood_limiter(density, min_percent_free);
duke@435 1415 const size_t dead_wood_max = space_used - space_live;
duke@435 1416 const size_t dead_wood_limit = MIN2(size_t(space_capacity * limiter),
duke@435 1417 dead_wood_max);
duke@435 1418
duke@435 1419 if (TraceParallelOldGCDensePrefix) {
duke@435 1420 tty->print_cr("space_live=" SIZE_FORMAT " " "space_used=" SIZE_FORMAT " "
duke@435 1421 "space_cap=" SIZE_FORMAT,
duke@435 1422 space_live, space_used,
duke@435 1423 space_capacity);
duke@435 1424 tty->print_cr("dead_wood_limiter(%6.4f, %d)=%6.4f "
duke@435 1425 "dead_wood_max=" SIZE_FORMAT " dead_wood_limit=" SIZE_FORMAT,
duke@435 1426 density, min_percent_free, limiter,
duke@435 1427 dead_wood_max, dead_wood_limit);
duke@435 1428 }
duke@435 1429
duke@435 1430 // Locate the chunk with the desired amount of dead space to the left.
duke@435 1431 const ChunkData* const limit_cp =
duke@435 1432 dead_wood_limit_chunk(full_cp, top_cp, dead_wood_limit);
duke@435 1433
duke@435 1434 // Scan from the first chunk with dead space to the limit chunk and find the
duke@435 1435 // one with the best (largest) reclaimed ratio.
duke@435 1436 double best_ratio = 0.0;
duke@435 1437 const ChunkData* best_cp = full_cp;
duke@435 1438 for (const ChunkData* cp = full_cp; cp < limit_cp; ++cp) {
duke@435 1439 double tmp_ratio = reclaimed_ratio(cp, bottom, top, new_top);
duke@435 1440 if (tmp_ratio > best_ratio) {
duke@435 1441 best_cp = cp;
duke@435 1442 best_ratio = tmp_ratio;
duke@435 1443 }
duke@435 1444 }
duke@435 1445
duke@435 1446 #if 0
duke@435 1447 // Something to consider: if the chunk with the best ratio is 'close to' the
duke@435 1448 // first chunk w/free space, choose the first chunk with free space
duke@435 1449 // ("first-free"). The first-free chunk is usually near the start of the
duke@435 1450 // heap, which means we are copying most of the heap already, so copy a bit
duke@435 1451 // more to get complete compaction.
duke@435 1452 if (pointer_delta(best_cp, full_cp, sizeof(ChunkData)) < 4) {
duke@435 1453 _maximum_compaction_gc_num = total_invocations();
duke@435 1454 best_cp = full_cp;
duke@435 1455 }
duke@435 1456 #endif // #if 0
duke@435 1457
duke@435 1458 return sd.chunk_to_addr(best_cp);
duke@435 1459 }
duke@435 1460
duke@435 1461 void PSParallelCompact::summarize_spaces_quick()
duke@435 1462 {
duke@435 1463 for (unsigned int i = 0; i < last_space_id; ++i) {
duke@435 1464 const MutableSpace* space = _space_info[i].space();
duke@435 1465 bool result = _summary_data.summarize(space->bottom(), space->end(),
duke@435 1466 space->bottom(), space->top(),
duke@435 1467 _space_info[i].new_top_addr());
duke@435 1468 assert(result, "should never fail");
duke@435 1469 _space_info[i].set_dense_prefix(space->bottom());
duke@435 1470 }
duke@435 1471 }
duke@435 1472
duke@435 1473 void PSParallelCompact::fill_dense_prefix_end(SpaceId id)
duke@435 1474 {
duke@435 1475 HeapWord* const dense_prefix_end = dense_prefix(id);
duke@435 1476 const ChunkData* chunk = _summary_data.addr_to_chunk_ptr(dense_prefix_end);
duke@435 1477 const idx_t dense_prefix_bit = _mark_bitmap.addr_to_bit(dense_prefix_end);
duke@435 1478 if (dead_space_crosses_boundary(chunk, dense_prefix_bit)) {
duke@435 1479 // Only enough dead space is filled so that any remaining dead space to the
duke@435 1480 // left is larger than the minimum filler object. (The remainder is filled
duke@435 1481 // during the copy/update phase.)
duke@435 1482 //
duke@435 1483 // The size of the dead space to the right of the boundary is not a
duke@435 1484 // concern, since compaction will be able to use whatever space is
duke@435 1485 // available.
duke@435 1486 //
duke@435 1487 // Here '||' is the boundary, 'x' represents a don't care bit and a box
duke@435 1488 // surrounds the space to be filled with an object.
duke@435 1489 //
duke@435 1490 // In the 32-bit VM, each bit represents two 32-bit words:
duke@435 1491 // +---+
duke@435 1492 // a) beg_bits: ... x x x | 0 | || 0 x x ...
duke@435 1493 // end_bits: ... x x x | 0 | || 0 x x ...
duke@435 1494 // +---+
duke@435 1495 //
duke@435 1496 // In the 64-bit VM, each bit represents one 64-bit word:
duke@435 1497 // +------------+
duke@435 1498 // b) beg_bits: ... x x x | 0 || 0 | x x ...
duke@435 1499 // end_bits: ... x x 1 | 0 || 0 | x x ...
duke@435 1500 // +------------+
duke@435 1501 // +-------+
duke@435 1502 // c) beg_bits: ... x x | 0 0 | || 0 x x ...
duke@435 1503 // end_bits: ... x 1 | 0 0 | || 0 x x ...
duke@435 1504 // +-------+
duke@435 1505 // +-----------+
duke@435 1506 // d) beg_bits: ... x | 0 0 0 | || 0 x x ...
duke@435 1507 // end_bits: ... 1 | 0 0 0 | || 0 x x ...
duke@435 1508 // +-----------+
duke@435 1509 // +-------+
duke@435 1510 // e) beg_bits: ... 0 0 | 0 0 | || 0 x x ...
duke@435 1511 // end_bits: ... 0 0 | 0 0 | || 0 x x ...
duke@435 1512 // +-------+
duke@435 1513
duke@435 1514 // Initially assume case a, c or e will apply.
duke@435 1515 size_t obj_len = (size_t)oopDesc::header_size();
duke@435 1516 HeapWord* obj_beg = dense_prefix_end - obj_len;
duke@435 1517
duke@435 1518 #ifdef _LP64
duke@435 1519 if (_mark_bitmap.is_obj_end(dense_prefix_bit - 2)) {
duke@435 1520 // Case b above.
duke@435 1521 obj_beg = dense_prefix_end - 1;
duke@435 1522 } else if (!_mark_bitmap.is_obj_end(dense_prefix_bit - 3) &&
duke@435 1523 _mark_bitmap.is_obj_end(dense_prefix_bit - 4)) {
duke@435 1524 // Case d above.
duke@435 1525 obj_beg = dense_prefix_end - 3;
duke@435 1526 obj_len = 3;
duke@435 1527 }
duke@435 1528 #endif // #ifdef _LP64
duke@435 1529
duke@435 1530 MemRegion region(obj_beg, obj_len);
duke@435 1531 SharedHeap::fill_region_with_object(region);
duke@435 1532 _mark_bitmap.mark_obj(obj_beg, obj_len);
duke@435 1533 _summary_data.add_obj(obj_beg, obj_len);
duke@435 1534 assert(start_array(id) != NULL, "sanity");
duke@435 1535 start_array(id)->allocate_block(obj_beg);
duke@435 1536 }
duke@435 1537 }
duke@435 1538
duke@435 1539 void
duke@435 1540 PSParallelCompact::summarize_space(SpaceId id, bool maximum_compaction)
duke@435 1541 {
duke@435 1542 assert(id < last_space_id, "id out of range");
duke@435 1543
duke@435 1544 const MutableSpace* space = _space_info[id].space();
duke@435 1545 HeapWord** new_top_addr = _space_info[id].new_top_addr();
duke@435 1546
duke@435 1547 HeapWord* dense_prefix_end = compute_dense_prefix(id, maximum_compaction);
duke@435 1548 _space_info[id].set_dense_prefix(dense_prefix_end);
duke@435 1549
duke@435 1550 #ifndef PRODUCT
duke@435 1551 if (TraceParallelOldGCDensePrefix) {
duke@435 1552 print_dense_prefix_stats("ratio", id, maximum_compaction, dense_prefix_end);
duke@435 1553 HeapWord* addr = compute_dense_prefix_via_density(id, maximum_compaction);
duke@435 1554 print_dense_prefix_stats("density", id, maximum_compaction, addr);
duke@435 1555 }
duke@435 1556 #endif // #ifndef PRODUCT
duke@435 1557
duke@435 1558 // If dead space crosses the dense prefix boundary, it is (at least partially)
duke@435 1559 // filled with a dummy object, marked live and added to the summary data.
duke@435 1560 // This simplifies the copy/update phase and must be done before the final
duke@435 1561 // locations of objects are determined, to prevent leaving a fragment of dead
duke@435 1562 // space that is too small to fill with an object.
duke@435 1563 if (!maximum_compaction && dense_prefix_end != space->bottom()) {
duke@435 1564 fill_dense_prefix_end(id);
duke@435 1565 }
duke@435 1566
duke@435 1567 // Compute the destination of each Chunk, and thus each object.
duke@435 1568 _summary_data.summarize_dense_prefix(space->bottom(), dense_prefix_end);
duke@435 1569 _summary_data.summarize(dense_prefix_end, space->end(),
duke@435 1570 dense_prefix_end, space->top(),
duke@435 1571 new_top_addr);
duke@435 1572
duke@435 1573 if (TraceParallelOldGCSummaryPhase) {
duke@435 1574 const size_t chunk_size = ParallelCompactData::ChunkSize;
duke@435 1575 const size_t dp_chunk = _summary_data.addr_to_chunk_idx(dense_prefix_end);
duke@435 1576 const size_t dp_words = pointer_delta(dense_prefix_end, space->bottom());
duke@435 1577 const HeapWord* nt_aligned_up = _summary_data.chunk_align_up(*new_top_addr);
duke@435 1578 const size_t cr_words = pointer_delta(nt_aligned_up, dense_prefix_end);
duke@435 1579 tty->print_cr("id=%d cap=" SIZE_FORMAT " dp=" PTR_FORMAT " "
duke@435 1580 "dp_chunk=" SIZE_FORMAT " " "dp_count=" SIZE_FORMAT " "
duke@435 1581 "cr_count=" SIZE_FORMAT " " "nt=" PTR_FORMAT,
duke@435 1582 id, space->capacity_in_words(), dense_prefix_end,
duke@435 1583 dp_chunk, dp_words / chunk_size,
duke@435 1584 cr_words / chunk_size, *new_top_addr);
duke@435 1585 }
duke@435 1586 }
duke@435 1587
duke@435 1588 void PSParallelCompact::summary_phase(ParCompactionManager* cm,
duke@435 1589 bool maximum_compaction)
duke@435 1590 {
duke@435 1591 EventMark m("2 summarize");
duke@435 1592 TraceTime tm("summary phase", print_phases(), true, gclog_or_tty);
duke@435 1593 // trace("2");
duke@435 1594
duke@435 1595 #ifdef ASSERT
duke@435 1596 if (VerifyParallelOldWithMarkSweep &&
duke@435 1597 (PSParallelCompact::total_invocations() %
duke@435 1598 VerifyParallelOldWithMarkSweepInterval) == 0) {
duke@435 1599 verify_mark_bitmap(_mark_bitmap);
duke@435 1600 }
duke@435 1601 if (TraceParallelOldGCMarkingPhase) {
duke@435 1602 tty->print_cr("add_obj_count=" SIZE_FORMAT " "
duke@435 1603 "add_obj_bytes=" SIZE_FORMAT,
duke@435 1604 add_obj_count, add_obj_size * HeapWordSize);
duke@435 1605 tty->print_cr("mark_bitmap_count=" SIZE_FORMAT " "
duke@435 1606 "mark_bitmap_bytes=" SIZE_FORMAT,
duke@435 1607 mark_bitmap_count, mark_bitmap_size * HeapWordSize);
duke@435 1608 }
duke@435 1609 #endif // #ifdef ASSERT
duke@435 1610
duke@435 1611 // Quick summarization of each space into itself, to see how much is live.
duke@435 1612 summarize_spaces_quick();
duke@435 1613
duke@435 1614 if (TraceParallelOldGCSummaryPhase) {
duke@435 1615 tty->print_cr("summary_phase: after summarizing each space to self");
duke@435 1616 Universe::print();
duke@435 1617 NOT_PRODUCT(print_chunk_ranges());
duke@435 1618 if (Verbose) {
duke@435 1619 NOT_PRODUCT(print_initial_summary_data(_summary_data, _space_info));
duke@435 1620 }
duke@435 1621 }
duke@435 1622
duke@435 1623 // The amount of live data that will end up in old space (assuming it fits).
duke@435 1624 size_t old_space_total_live = 0;
duke@435 1625 unsigned int id;
duke@435 1626 for (id = old_space_id; id < last_space_id; ++id) {
duke@435 1627 old_space_total_live += pointer_delta(_space_info[id].new_top(),
duke@435 1628 _space_info[id].space()->bottom());
duke@435 1629 }
duke@435 1630
duke@435 1631 const MutableSpace* old_space = _space_info[old_space_id].space();
duke@435 1632 if (old_space_total_live > old_space->capacity_in_words()) {
duke@435 1633 // XXX - should also try to expand
duke@435 1634 maximum_compaction = true;
duke@435 1635 } else if (!UseParallelOldGCDensePrefix) {
duke@435 1636 maximum_compaction = true;
duke@435 1637 }
duke@435 1638
duke@435 1639 // Permanent and Old generations.
duke@435 1640 summarize_space(perm_space_id, maximum_compaction);
duke@435 1641 summarize_space(old_space_id, maximum_compaction);
duke@435 1642
duke@435 1643 // Summarize the remaining spaces (those in the young gen) into old space. If
duke@435 1644 // the live data from a space doesn't fit, the existing summarization is left
duke@435 1645 // intact, so the data is compacted down within the space itself.
duke@435 1646 HeapWord** new_top_addr = _space_info[old_space_id].new_top_addr();
duke@435 1647 HeapWord* const target_space_end = old_space->end();
duke@435 1648 for (id = eden_space_id; id < last_space_id; ++id) {
duke@435 1649 const MutableSpace* space = _space_info[id].space();
duke@435 1650 const size_t live = pointer_delta(_space_info[id].new_top(),
duke@435 1651 space->bottom());
duke@435 1652 const size_t available = pointer_delta(target_space_end, *new_top_addr);
duke@435 1653 if (live <= available) {
duke@435 1654 // All the live data will fit.
duke@435 1655 if (TraceParallelOldGCSummaryPhase) {
duke@435 1656 tty->print_cr("summarizing %d into old_space @ " PTR_FORMAT,
duke@435 1657 id, *new_top_addr);
duke@435 1658 }
duke@435 1659 _summary_data.summarize(*new_top_addr, target_space_end,
duke@435 1660 space->bottom(), space->top(),
duke@435 1661 new_top_addr);
duke@435 1662
duke@435 1663 // Reset the new_top value for the space.
duke@435 1664 _space_info[id].set_new_top(space->bottom());
duke@435 1665
duke@435 1666 // Clear the source_chunk field for each chunk in the space.
duke@435 1667 ChunkData* beg_chunk = _summary_data.addr_to_chunk_ptr(space->bottom());
duke@435 1668 ChunkData* end_chunk = _summary_data.addr_to_chunk_ptr(space->top() - 1);
duke@435 1669 while (beg_chunk <= end_chunk) {
duke@435 1670 beg_chunk->set_source_chunk(0);
duke@435 1671 ++beg_chunk;
duke@435 1672 }
duke@435 1673 }
duke@435 1674 }
duke@435 1675
duke@435 1676 // Fill in the block data after any changes to the chunks have
duke@435 1677 // been made.
duke@435 1678 #ifdef ASSERT
duke@435 1679 summarize_blocks(cm, perm_space_id);
duke@435 1680 summarize_blocks(cm, old_space_id);
duke@435 1681 #else
duke@435 1682 if (!UseParallelOldGCChunkPointerCalc) {
duke@435 1683 summarize_blocks(cm, perm_space_id);
duke@435 1684 summarize_blocks(cm, old_space_id);
duke@435 1685 }
duke@435 1686 #endif
duke@435 1687
duke@435 1688 if (TraceParallelOldGCSummaryPhase) {
duke@435 1689 tty->print_cr("summary_phase: after final summarization");
duke@435 1690 Universe::print();
duke@435 1691 NOT_PRODUCT(print_chunk_ranges());
duke@435 1692 if (Verbose) {
duke@435 1693 NOT_PRODUCT(print_generic_summary_data(_summary_data, _space_info));
duke@435 1694 }
duke@435 1695 }
duke@435 1696 }
duke@435 1697
duke@435 1698 // Fill in the BlockData.
duke@435 1699 // Iterate over the spaces and within each space iterate over
duke@435 1700 // the chunks and fill in the BlockData for each chunk.
duke@435 1701
duke@435 1702 void PSParallelCompact::summarize_blocks(ParCompactionManager* cm,
duke@435 1703 SpaceId first_compaction_space_id) {
duke@435 1704 #if 0
duke@435 1705 DEBUG_ONLY(ParallelCompactData::BlockData::set_cur_phase(1);)
duke@435 1706 for (SpaceId cur_space_id = first_compaction_space_id;
duke@435 1707 cur_space_id != last_space_id;
duke@435 1708 cur_space_id = next_compaction_space_id(cur_space_id)) {
duke@435 1709 // Iterate over the chunks in the space
duke@435 1710 size_t start_chunk_index =
duke@435 1711 _summary_data.addr_to_chunk_idx(space(cur_space_id)->bottom());
duke@435 1712 BitBlockUpdateClosure bbu(mark_bitmap(),
duke@435 1713 cm,
duke@435 1714 start_chunk_index);
duke@435 1715 // Iterate over blocks.
duke@435 1716 for (size_t chunk_index = start_chunk_index;
duke@435 1717 chunk_index < _summary_data.chunk_count() &&
duke@435 1718 _summary_data.chunk_to_addr(chunk_index) < space(cur_space_id)->top();
duke@435 1719 chunk_index++) {
duke@435 1720
duke@435 1721 // Reset the closure for the new chunk. Note that the closure
duke@435 1722 // maintains some data that does not get reset for each chunk
duke@435 1723 // so a new instance of the closure is no appropriate.
duke@435 1724 bbu.reset_chunk(chunk_index);
duke@435 1725
duke@435 1726 // Start the iteration with the first live object. This
duke@435 1727 // may return the end of the chunk. That is acceptable since
duke@435 1728 // it will properly limit the iterations.
duke@435 1729 ParMarkBitMap::idx_t left_offset = mark_bitmap()->addr_to_bit(
duke@435 1730 _summary_data.first_live_or_end_in_chunk(chunk_index));
duke@435 1731
duke@435 1732 // End the iteration at the end of the chunk.
duke@435 1733 HeapWord* chunk_addr = _summary_data.chunk_to_addr(chunk_index);
duke@435 1734 HeapWord* chunk_end = chunk_addr + ParallelCompactData::ChunkSize;
duke@435 1735 ParMarkBitMap::idx_t right_offset =
duke@435 1736 mark_bitmap()->addr_to_bit(chunk_end);
duke@435 1737
duke@435 1738 // Blocks that have not objects starting in them can be
duke@435 1739 // skipped because their data will never be used.
duke@435 1740 if (left_offset < right_offset) {
duke@435 1741
duke@435 1742 // Iterate through the objects in the chunk.
duke@435 1743 ParMarkBitMap::idx_t last_offset =
duke@435 1744 mark_bitmap()->pair_iterate(&bbu, left_offset, right_offset);
duke@435 1745
duke@435 1746 // If last_offset is less than right_offset, then the iterations
duke@435 1747 // terminated while it was looking for an end bit. "last_offset"
duke@435 1748 // is then the offset for the last start bit. In this situation
duke@435 1749 // the "offset" field for the next block to the right (_cur_block + 1)
duke@435 1750 // will not have been update although there may be live data
duke@435 1751 // to the left of the chunk.
duke@435 1752
duke@435 1753 size_t cur_block_plus_1 = bbu.cur_block() + 1;
duke@435 1754 HeapWord* cur_block_plus_1_addr =
duke@435 1755 _summary_data.block_to_addr(bbu.cur_block()) +
duke@435 1756 ParallelCompactData::BlockSize;
duke@435 1757 HeapWord* last_offset_addr = mark_bitmap()->bit_to_addr(last_offset);
duke@435 1758 #if 1 // This code works. The else doesn't but should. Why does it?
duke@435 1759 // The current block (cur_block()) has already been updated.
duke@435 1760 // The last block that may need to be updated is either the
duke@435 1761 // next block (current block + 1) or the block where the
duke@435 1762 // last object starts (which can be greater than the
duke@435 1763 // next block if there were no objects found in intervening
duke@435 1764 // blocks).
duke@435 1765 size_t last_block =
duke@435 1766 MAX2(bbu.cur_block() + 1,
duke@435 1767 _summary_data.addr_to_block_idx(last_offset_addr));
duke@435 1768 #else
duke@435 1769 // The current block has already been updated. The only block
duke@435 1770 // that remains to be updated is the block where the last
duke@435 1771 // object in the chunk starts.
duke@435 1772 size_t last_block = _summary_data.addr_to_block_idx(last_offset_addr);
duke@435 1773 #endif
duke@435 1774 assert_bit_is_start(last_offset);
duke@435 1775 assert((last_block == _summary_data.block_count()) ||
duke@435 1776 (_summary_data.block(last_block)->raw_offset() == 0),
duke@435 1777 "Should not have been set");
duke@435 1778 // Is the last block still in the current chunk? If still
duke@435 1779 // in this chunk, update the last block (the counting that
duke@435 1780 // included the current block is meant for the offset of the last
duke@435 1781 // block). If not in this chunk, do nothing. Should not
duke@435 1782 // update a block in the next chunk.
duke@435 1783 if (ParallelCompactData::chunk_contains_block(bbu.chunk_index(),
duke@435 1784 last_block)) {
duke@435 1785 if (last_offset < right_offset) {
duke@435 1786 // The last object started in this chunk but ends beyond
duke@435 1787 // this chunk. Update the block for this last object.
duke@435 1788 assert(mark_bitmap()->is_marked(last_offset), "Should be marked");
duke@435 1789 // No end bit was found. The closure takes care of
duke@435 1790 // the cases where
duke@435 1791 // an objects crosses over into the next block
duke@435 1792 // an objects starts and ends in the next block
duke@435 1793 // It does not handle the case where an object is
duke@435 1794 // the first object in a later block and extends
duke@435 1795 // past the end of the chunk (i.e., the closure
duke@435 1796 // only handles complete objects that are in the range
duke@435 1797 // it is given). That object is handed back here
duke@435 1798 // for any special consideration necessary.
duke@435 1799 //
duke@435 1800 // Is the first bit in the last block a start or end bit?
duke@435 1801 //
duke@435 1802 // If the partial object ends in the last block L,
duke@435 1803 // then the 1st bit in L may be an end bit.
duke@435 1804 //
duke@435 1805 // Else does the last object start in a block after the current
duke@435 1806 // block? A block AA will already have been updated if an
duke@435 1807 // object ends in the next block AA+1. An object found to end in
duke@435 1808 // the AA+1 is the trigger that updates AA. Objects are being
duke@435 1809 // counted in the current block for updaing a following
duke@435 1810 // block. An object may start in later block
duke@435 1811 // block but may extend beyond the last block in the chunk.
duke@435 1812 // Updates are only done when the end of an object has been
duke@435 1813 // found. If the last object (covered by block L) starts
duke@435 1814 // beyond the current block, then no object ends in L (otherwise
duke@435 1815 // L would be the current block). So the first bit in L is
duke@435 1816 // a start bit.
duke@435 1817 //
duke@435 1818 // Else the last objects start in the current block and ends
duke@435 1819 // beyond the chunk. The current block has already been
duke@435 1820 // updated and there is no later block (with an object
duke@435 1821 // starting in it) that needs to be updated.
duke@435 1822 //
duke@435 1823 if (_summary_data.partial_obj_ends_in_block(last_block)) {
duke@435 1824 _summary_data.block(last_block)->set_end_bit_offset(
duke@435 1825 bbu.live_data_left());
duke@435 1826 } else if (last_offset_addr >= cur_block_plus_1_addr) {
duke@435 1827 // The start of the object is on a later block
duke@435 1828 // (to the right of the current block and there are no
duke@435 1829 // complete live objects to the left of this last object
duke@435 1830 // within the chunk.
duke@435 1831 // The first bit in the block is for the start of the
duke@435 1832 // last object.
duke@435 1833 _summary_data.block(last_block)->set_start_bit_offset(
duke@435 1834 bbu.live_data_left());
duke@435 1835 } else {
duke@435 1836 // The start of the last object was found in
duke@435 1837 // the current chunk (which has already
duke@435 1838 // been updated).
duke@435 1839 assert(bbu.cur_block() ==
duke@435 1840 _summary_data.addr_to_block_idx(last_offset_addr),
duke@435 1841 "Should be a block already processed");
duke@435 1842 }
duke@435 1843 #ifdef ASSERT
duke@435 1844 // Is there enough block information to find this object?
duke@435 1845 // The destination of the chunk has not been set so the
duke@435 1846 // values returned by calc_new_pointer() and
duke@435 1847 // block_calc_new_pointer() will only be
duke@435 1848 // offsets. But they should agree.
duke@435 1849 HeapWord* moved_obj_with_chunks =
duke@435 1850 _summary_data.chunk_calc_new_pointer(last_offset_addr);
duke@435 1851 HeapWord* moved_obj_with_blocks =
duke@435 1852 _summary_data.calc_new_pointer(last_offset_addr);
duke@435 1853 assert(moved_obj_with_chunks == moved_obj_with_blocks,
duke@435 1854 "Block calculation is wrong");
duke@435 1855 #endif
duke@435 1856 } else if (last_block < _summary_data.block_count()) {
duke@435 1857 // Iterations ended looking for a start bit (but
duke@435 1858 // did not run off the end of the block table).
duke@435 1859 _summary_data.block(last_block)->set_start_bit_offset(
duke@435 1860 bbu.live_data_left());
duke@435 1861 }
duke@435 1862 }
duke@435 1863 #ifdef ASSERT
duke@435 1864 // Is there enough block information to find this object?
duke@435 1865 HeapWord* left_offset_addr = mark_bitmap()->bit_to_addr(left_offset);
duke@435 1866 HeapWord* moved_obj_with_chunks =
duke@435 1867 _summary_data.calc_new_pointer(left_offset_addr);
duke@435 1868 HeapWord* moved_obj_with_blocks =
duke@435 1869 _summary_data.calc_new_pointer(left_offset_addr);
duke@435 1870 assert(moved_obj_with_chunks == moved_obj_with_blocks,
duke@435 1871 "Block calculation is wrong");
duke@435 1872 #endif
duke@435 1873
duke@435 1874 // Is there another block after the end of this chunk?
duke@435 1875 #ifdef ASSERT
duke@435 1876 if (last_block < _summary_data.block_count()) {
duke@435 1877 // No object may have been found in a block. If that
duke@435 1878 // block is at the end of the chunk, the iteration will
duke@435 1879 // terminate without incrementing the current block so
duke@435 1880 // that the current block is not the last block in the
duke@435 1881 // chunk. That situation precludes asserting that the
duke@435 1882 // current block is the last block in the chunk. Assert
duke@435 1883 // the lesser condition that the current block does not
duke@435 1884 // exceed the chunk.
duke@435 1885 assert(_summary_data.block_to_addr(last_block) <=
duke@435 1886 (_summary_data.chunk_to_addr(chunk_index) +
duke@435 1887 ParallelCompactData::ChunkSize),
duke@435 1888 "Chunk and block inconsistency");
duke@435 1889 assert(last_offset <= right_offset, "Iteration over ran end");
duke@435 1890 }
duke@435 1891 #endif
duke@435 1892 }
duke@435 1893 #ifdef ASSERT
duke@435 1894 if (PrintGCDetails && Verbose) {
duke@435 1895 if (_summary_data.chunk(chunk_index)->partial_obj_size() == 1) {
duke@435 1896 size_t first_block =
duke@435 1897 chunk_index / ParallelCompactData::BlocksPerChunk;
duke@435 1898 gclog_or_tty->print_cr("first_block " PTR_FORMAT
duke@435 1899 " _offset " PTR_FORMAT
duke@435 1900 "_first_is_start_bit %d",
duke@435 1901 first_block,
duke@435 1902 _summary_data.block(first_block)->raw_offset(),
duke@435 1903 _summary_data.block(first_block)->first_is_start_bit());
duke@435 1904 }
duke@435 1905 }
duke@435 1906 #endif
duke@435 1907 }
duke@435 1908 }
duke@435 1909 DEBUG_ONLY(ParallelCompactData::BlockData::set_cur_phase(16);)
duke@435 1910 #endif // #if 0
duke@435 1911 }
duke@435 1912
duke@435 1913 // This method should contain all heap-specific policy for invoking a full
duke@435 1914 // collection. invoke_no_policy() will only attempt to compact the heap; it
duke@435 1915 // will do nothing further. If we need to bail out for policy reasons, scavenge
duke@435 1916 // before full gc, or any other specialized behavior, it needs to be added here.
duke@435 1917 //
duke@435 1918 // Note that this method should only be called from the vm_thread while at a
duke@435 1919 // safepoint.
duke@435 1920 void PSParallelCompact::invoke(bool maximum_heap_compaction) {
duke@435 1921 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
duke@435 1922 assert(Thread::current() == (Thread*)VMThread::vm_thread(),
duke@435 1923 "should be in vm thread");
duke@435 1924 ParallelScavengeHeap* heap = gc_heap();
duke@435 1925 GCCause::Cause gc_cause = heap->gc_cause();
duke@435 1926 assert(!heap->is_gc_active(), "not reentrant");
duke@435 1927
duke@435 1928 PSAdaptiveSizePolicy* policy = heap->size_policy();
duke@435 1929
duke@435 1930 // Before each allocation/collection attempt, find out from the
duke@435 1931 // policy object if GCs are, on the whole, taking too long. If so,
duke@435 1932 // bail out without attempting a collection. The exceptions are
duke@435 1933 // for explicitly requested GC's.
duke@435 1934 if (!policy->gc_time_limit_exceeded() ||
duke@435 1935 GCCause::is_user_requested_gc(gc_cause) ||
duke@435 1936 GCCause::is_serviceability_requested_gc(gc_cause)) {
duke@435 1937 IsGCActiveMark mark;
duke@435 1938
duke@435 1939 if (ScavengeBeforeFullGC) {
duke@435 1940 PSScavenge::invoke_no_policy();
duke@435 1941 }
duke@435 1942
duke@435 1943 PSParallelCompact::invoke_no_policy(maximum_heap_compaction);
duke@435 1944 }
duke@435 1945 }
duke@435 1946
duke@435 1947 bool ParallelCompactData::chunk_contains(size_t chunk_index, HeapWord* addr) {
duke@435 1948 size_t addr_chunk_index = addr_to_chunk_idx(addr);
duke@435 1949 return chunk_index == addr_chunk_index;
duke@435 1950 }
duke@435 1951
duke@435 1952 bool ParallelCompactData::chunk_contains_block(size_t chunk_index,
duke@435 1953 size_t block_index) {
duke@435 1954 size_t first_block_in_chunk = chunk_index * BlocksPerChunk;
duke@435 1955 size_t last_block_in_chunk = (chunk_index + 1) * BlocksPerChunk - 1;
duke@435 1956
duke@435 1957 return (first_block_in_chunk <= block_index) &&
duke@435 1958 (block_index <= last_block_in_chunk);
duke@435 1959 }
duke@435 1960
duke@435 1961 // This method contains no policy. You should probably
duke@435 1962 // be calling invoke() instead.
duke@435 1963 void PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
duke@435 1964 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
duke@435 1965 assert(ref_processor() != NULL, "Sanity");
duke@435 1966
duke@435 1967 if (GC_locker::is_active()) {
duke@435 1968 return;
duke@435 1969 }
duke@435 1970
duke@435 1971 TimeStamp marking_start;
duke@435 1972 TimeStamp compaction_start;
duke@435 1973 TimeStamp collection_exit;
duke@435 1974
duke@435 1975 // "serial_CM" is needed until the parallel implementation
duke@435 1976 // of the move and update is done.
duke@435 1977 ParCompactionManager* serial_CM = new ParCompactionManager();
duke@435 1978 // Don't initialize more than once.
duke@435 1979 // serial_CM->initialize(&summary_data(), mark_bitmap());
duke@435 1980
duke@435 1981 ParallelScavengeHeap* heap = gc_heap();
duke@435 1982 GCCause::Cause gc_cause = heap->gc_cause();
duke@435 1983 PSYoungGen* young_gen = heap->young_gen();
duke@435 1984 PSOldGen* old_gen = heap->old_gen();
duke@435 1985 PSPermGen* perm_gen = heap->perm_gen();
duke@435 1986 PSAdaptiveSizePolicy* size_policy = heap->size_policy();
duke@435 1987
duke@435 1988 _print_phases = PrintGCDetails && PrintParallelOldGCPhaseTimes;
duke@435 1989
duke@435 1990 // Make sure data structures are sane, make the heap parsable, and do other
duke@435 1991 // miscellaneous bookkeeping.
duke@435 1992 PreGCValues pre_gc_values;
duke@435 1993 pre_compact(&pre_gc_values);
duke@435 1994
duke@435 1995 // Place after pre_compact() where the number of invocations is incremented.
duke@435 1996 AdaptiveSizePolicyOutput(size_policy, heap->total_collections());
duke@435 1997
duke@435 1998 {
duke@435 1999 ResourceMark rm;
duke@435 2000 HandleMark hm;
duke@435 2001
duke@435 2002 const bool is_system_gc = gc_cause == GCCause::_java_lang_system_gc;
duke@435 2003
duke@435 2004 // This is useful for debugging but don't change the output the
duke@435 2005 // the customer sees.
duke@435 2006 const char* gc_cause_str = "Full GC";
duke@435 2007 if (is_system_gc && PrintGCDetails) {
duke@435 2008 gc_cause_str = "Full GC (System)";
duke@435 2009 }
duke@435 2010 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
duke@435 2011 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
duke@435 2012 TraceTime t1(gc_cause_str, PrintGC, !PrintGCDetails, gclog_or_tty);
duke@435 2013 TraceCollectorStats tcs(counters());
duke@435 2014 TraceMemoryManagerStats tms(true /* Full GC */);
duke@435 2015
duke@435 2016 if (TraceGen1Time) accumulated_time()->start();
duke@435 2017
duke@435 2018 // Let the size policy know we're starting
duke@435 2019 size_policy->major_collection_begin();
duke@435 2020
duke@435 2021 // When collecting the permanent generation methodOops may be moving,
duke@435 2022 // so we either have to flush all bcp data or convert it into bci.
duke@435 2023 CodeCache::gc_prologue();
duke@435 2024 Threads::gc_prologue();
duke@435 2025
duke@435 2026 NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
duke@435 2027 COMPILER2_PRESENT(DerivedPointerTable::clear());
duke@435 2028
duke@435 2029 ref_processor()->enable_discovery();
duke@435 2030
duke@435 2031 bool marked_for_unloading = false;
duke@435 2032
duke@435 2033 marking_start.update();
duke@435 2034 marking_phase(serial_CM, maximum_heap_compaction);
duke@435 2035
duke@435 2036 #ifndef PRODUCT
duke@435 2037 if (TraceParallelOldGCMarkingPhase) {
duke@435 2038 gclog_or_tty->print_cr("marking_phase: cas_tries %d cas_retries %d "
duke@435 2039 "cas_by_another %d",
duke@435 2040 mark_bitmap()->cas_tries(), mark_bitmap()->cas_retries(),
duke@435 2041 mark_bitmap()->cas_by_another());
duke@435 2042 }
duke@435 2043 #endif // #ifndef PRODUCT
duke@435 2044
duke@435 2045 #ifdef ASSERT
duke@435 2046 if (VerifyParallelOldWithMarkSweep &&
duke@435 2047 (PSParallelCompact::total_invocations() %
duke@435 2048 VerifyParallelOldWithMarkSweepInterval) == 0) {
duke@435 2049 gclog_or_tty->print_cr("Verify marking with mark_sweep_phase1()");
duke@435 2050 if (PrintGCDetails && Verbose) {
duke@435 2051 gclog_or_tty->print_cr("mark_sweep_phase1:");
duke@435 2052 }
duke@435 2053 // Clear the discovered lists so that discovered objects
duke@435 2054 // don't look like they have been discovered twice.
duke@435 2055 ref_processor()->clear_discovered_references();
duke@435 2056
duke@435 2057 PSMarkSweep::allocate_stacks();
duke@435 2058 MemRegion mr = Universe::heap()->reserved_region();
duke@435 2059 PSMarkSweep::ref_processor()->enable_discovery();
duke@435 2060 PSMarkSweep::mark_sweep_phase1(maximum_heap_compaction);
duke@435 2061 }
duke@435 2062 #endif
duke@435 2063
duke@435 2064 bool max_on_system_gc = UseMaximumCompactionOnSystemGC && is_system_gc;
duke@435 2065 summary_phase(serial_CM, maximum_heap_compaction || max_on_system_gc);
duke@435 2066
duke@435 2067 #ifdef ASSERT
duke@435 2068 if (VerifyParallelOldWithMarkSweep &&
duke@435 2069 (PSParallelCompact::total_invocations() %
duke@435 2070 VerifyParallelOldWithMarkSweepInterval) == 0) {
duke@435 2071 if (PrintGCDetails && Verbose) {
duke@435 2072 gclog_or_tty->print_cr("mark_sweep_phase2:");
duke@435 2073 }
duke@435 2074 PSMarkSweep::mark_sweep_phase2();
duke@435 2075 }
duke@435 2076 #endif
duke@435 2077
duke@435 2078 COMPILER2_PRESENT(assert(DerivedPointerTable::is_active(), "Sanity"));
duke@435 2079 COMPILER2_PRESENT(DerivedPointerTable::set_active(false));
duke@435 2080
duke@435 2081 // adjust_roots() updates Universe::_intArrayKlassObj which is
duke@435 2082 // needed by the compaction for filling holes in the dense prefix.
duke@435 2083 adjust_roots();
duke@435 2084
duke@435 2085 #ifdef ASSERT
duke@435 2086 if (VerifyParallelOldWithMarkSweep &&
duke@435 2087 (PSParallelCompact::total_invocations() %
duke@435 2088 VerifyParallelOldWithMarkSweepInterval) == 0) {
duke@435 2089 // Do a separate verify phase so that the verify
duke@435 2090 // code can use the the forwarding pointers to
duke@435 2091 // check the new pointer calculation. The restore_marks()
duke@435 2092 // has to be done before the real compact.
duke@435 2093 serial_CM->set_action(ParCompactionManager::VerifyUpdate);
duke@435 2094 compact_perm(serial_CM);
duke@435 2095 compact_serial(serial_CM);
duke@435 2096 serial_CM->set_action(ParCompactionManager::ResetObjects);
duke@435 2097 compact_perm(serial_CM);
duke@435 2098 compact_serial(serial_CM);
duke@435 2099 serial_CM->set_action(ParCompactionManager::UpdateAndCopy);
duke@435 2100
duke@435 2101 // For debugging only
duke@435 2102 PSMarkSweep::restore_marks();
duke@435 2103 PSMarkSweep::deallocate_stacks();
duke@435 2104 }
duke@435 2105 #endif
duke@435 2106
duke@435 2107 compaction_start.update();
duke@435 2108 // Does the perm gen always have to be done serially because
duke@435 2109 // klasses are used in the update of an object?
duke@435 2110 compact_perm(serial_CM);
duke@435 2111
duke@435 2112 if (UseParallelOldGCCompacting) {
duke@435 2113 compact();
duke@435 2114 } else {
duke@435 2115 compact_serial(serial_CM);
duke@435 2116 }
duke@435 2117
duke@435 2118 delete serial_CM;
duke@435 2119
duke@435 2120 // Reset the mark bitmap, summary data, and do other bookkeeping. Must be
duke@435 2121 // done before resizing.
duke@435 2122 post_compact();
duke@435 2123
duke@435 2124 // Let the size policy know we're done
duke@435 2125 size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause);
duke@435 2126
duke@435 2127 if (UseAdaptiveSizePolicy) {
duke@435 2128 if (PrintAdaptiveSizePolicy) {
duke@435 2129 gclog_or_tty->print("AdaptiveSizeStart: ");
duke@435 2130 gclog_or_tty->stamp();
duke@435 2131 gclog_or_tty->print_cr(" collection: %d ",
duke@435 2132 heap->total_collections());
duke@435 2133 if (Verbose) {
duke@435 2134 gclog_or_tty->print("old_gen_capacity: %d young_gen_capacity: %d"
duke@435 2135 " perm_gen_capacity: %d ",
duke@435 2136 old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes(),
duke@435 2137 perm_gen->capacity_in_bytes());
duke@435 2138 }
duke@435 2139 }
duke@435 2140
duke@435 2141 // Don't check if the size_policy is ready here. Let
duke@435 2142 // the size_policy check that internally.
duke@435 2143 if (UseAdaptiveGenerationSizePolicyAtMajorCollection &&
duke@435 2144 ((gc_cause != GCCause::_java_lang_system_gc) ||
duke@435 2145 UseAdaptiveSizePolicyWithSystemGC)) {
duke@435 2146 // Calculate optimal free space amounts
duke@435 2147 assert(young_gen->max_size() >
duke@435 2148 young_gen->from_space()->capacity_in_bytes() +
duke@435 2149 young_gen->to_space()->capacity_in_bytes(),
duke@435 2150 "Sizes of space in young gen are out-of-bounds");
duke@435 2151 size_t max_eden_size = young_gen->max_size() -
duke@435 2152 young_gen->from_space()->capacity_in_bytes() -
duke@435 2153 young_gen->to_space()->capacity_in_bytes();
duke@435 2154 size_policy->compute_generation_free_space(young_gen->used_in_bytes(),
duke@435 2155 young_gen->eden_space()->used_in_bytes(),
duke@435 2156 old_gen->used_in_bytes(),
duke@435 2157 perm_gen->used_in_bytes(),
duke@435 2158 young_gen->eden_space()->capacity_in_bytes(),
duke@435 2159 old_gen->max_gen_size(),
duke@435 2160 max_eden_size,
duke@435 2161 true /* full gc*/,
duke@435 2162 gc_cause);
duke@435 2163
duke@435 2164 heap->resize_old_gen(size_policy->calculated_old_free_size_in_bytes());
duke@435 2165
duke@435 2166 // Don't resize the young generation at an major collection. A
duke@435 2167 // desired young generation size may have been calculated but
duke@435 2168 // resizing the young generation complicates the code because the
duke@435 2169 // resizing of the old generation may have moved the boundary
duke@435 2170 // between the young generation and the old generation. Let the
duke@435 2171 // young generation resizing happen at the minor collections.
duke@435 2172 }
duke@435 2173 if (PrintAdaptiveSizePolicy) {
duke@435 2174 gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ",
duke@435 2175 heap->total_collections());
duke@435 2176 }
duke@435 2177 }
duke@435 2178
duke@435 2179 if (UsePerfData) {
duke@435 2180 PSGCAdaptivePolicyCounters* const counters = heap->gc_policy_counters();
duke@435 2181 counters->update_counters();
duke@435 2182 counters->update_old_capacity(old_gen->capacity_in_bytes());
duke@435 2183 counters->update_young_capacity(young_gen->capacity_in_bytes());
duke@435 2184 }
duke@435 2185
duke@435 2186 heap->resize_all_tlabs();
duke@435 2187
duke@435 2188 // We collected the perm gen, so we'll resize it here.
duke@435 2189 perm_gen->compute_new_size(pre_gc_values.perm_gen_used());
duke@435 2190
duke@435 2191 if (TraceGen1Time) accumulated_time()->stop();
duke@435 2192
duke@435 2193 if (PrintGC) {
duke@435 2194 if (PrintGCDetails) {
duke@435 2195 // No GC timestamp here. This is after GC so it would be confusing.
duke@435 2196 young_gen->print_used_change(pre_gc_values.young_gen_used());
duke@435 2197 old_gen->print_used_change(pre_gc_values.old_gen_used());
duke@435 2198 heap->print_heap_change(pre_gc_values.heap_used());
duke@435 2199 // Print perm gen last (print_heap_change() excludes the perm gen).
duke@435 2200 perm_gen->print_used_change(pre_gc_values.perm_gen_used());
duke@435 2201 } else {
duke@435 2202 heap->print_heap_change(pre_gc_values.heap_used());
duke@435 2203 }
duke@435 2204 }
duke@435 2205
duke@435 2206 // Track memory usage and detect low memory
duke@435 2207 MemoryService::track_memory_usage();
duke@435 2208 heap->update_counters();
duke@435 2209
duke@435 2210 if (PrintGCDetails) {
duke@435 2211 if (size_policy->print_gc_time_limit_would_be_exceeded()) {
duke@435 2212 if (size_policy->gc_time_limit_exceeded()) {
duke@435 2213 gclog_or_tty->print_cr(" GC time is exceeding GCTimeLimit "
duke@435 2214 "of %d%%", GCTimeLimit);
duke@435 2215 } else {
duke@435 2216 gclog_or_tty->print_cr(" GC time would exceed GCTimeLimit "
duke@435 2217 "of %d%%", GCTimeLimit);
duke@435 2218 }
duke@435 2219 }
duke@435 2220 size_policy->set_print_gc_time_limit_would_be_exceeded(false);
duke@435 2221 }
duke@435 2222 }
duke@435 2223
duke@435 2224 if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
duke@435 2225 HandleMark hm; // Discard invalid handles created during verification
duke@435 2226 gclog_or_tty->print(" VerifyAfterGC:");
duke@435 2227 Universe::verify(false);
duke@435 2228 }
duke@435 2229
duke@435 2230 // Re-verify object start arrays
duke@435 2231 if (VerifyObjectStartArray &&
duke@435 2232 VerifyAfterGC) {
duke@435 2233 old_gen->verify_object_start_array();
duke@435 2234 perm_gen->verify_object_start_array();
duke@435 2235 }
duke@435 2236
duke@435 2237 NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
duke@435 2238
duke@435 2239 collection_exit.update();
duke@435 2240
duke@435 2241 if (PrintHeapAtGC) {
duke@435 2242 Universe::print_heap_after_gc();
duke@435 2243 }
duke@435 2244 if (PrintGCTaskTimeStamps) {
duke@435 2245 gclog_or_tty->print_cr("VM-Thread " INT64_FORMAT " " INT64_FORMAT " "
duke@435 2246 INT64_FORMAT,
duke@435 2247 marking_start.ticks(), compaction_start.ticks(),
duke@435 2248 collection_exit.ticks());
duke@435 2249 gc_task_manager()->print_task_time_stamps();
duke@435 2250 }
duke@435 2251 }
duke@435 2252
duke@435 2253 bool PSParallelCompact::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
duke@435 2254 PSYoungGen* young_gen,
duke@435 2255 PSOldGen* old_gen) {
duke@435 2256 MutableSpace* const eden_space = young_gen->eden_space();
duke@435 2257 assert(!eden_space->is_empty(), "eden must be non-empty");
duke@435 2258 assert(young_gen->virtual_space()->alignment() ==
duke@435 2259 old_gen->virtual_space()->alignment(), "alignments do not match");
duke@435 2260
duke@435 2261 if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary)) {
duke@435 2262 return false;
duke@435 2263 }
duke@435 2264
duke@435 2265 // Both generations must be completely committed.
duke@435 2266 if (young_gen->virtual_space()->uncommitted_size() != 0) {
duke@435 2267 return false;
duke@435 2268 }
duke@435 2269 if (old_gen->virtual_space()->uncommitted_size() != 0) {
duke@435 2270 return false;
duke@435 2271 }
duke@435 2272
duke@435 2273 // Figure out how much to take from eden. Include the average amount promoted
duke@435 2274 // in the total; otherwise the next young gen GC will simply bail out to a
duke@435 2275 // full GC.
duke@435 2276 const size_t alignment = old_gen->virtual_space()->alignment();
duke@435 2277 const size_t eden_used = eden_space->used_in_bytes();
duke@435 2278 const size_t promoted = (size_t)size_policy->avg_promoted()->padded_average();
duke@435 2279 const size_t absorb_size = align_size_up(eden_used + promoted, alignment);
duke@435 2280 const size_t eden_capacity = eden_space->capacity_in_bytes();
duke@435 2281
duke@435 2282 if (absorb_size >= eden_capacity) {
duke@435 2283 return false; // Must leave some space in eden.
duke@435 2284 }
duke@435 2285
duke@435 2286 const size_t new_young_size = young_gen->capacity_in_bytes() - absorb_size;
duke@435 2287 if (new_young_size < young_gen->min_gen_size()) {
duke@435 2288 return false; // Respect young gen minimum size.
duke@435 2289 }
duke@435 2290
duke@435 2291 if (TraceAdaptiveGCBoundary && Verbose) {
duke@435 2292 gclog_or_tty->print(" absorbing " SIZE_FORMAT "K: "
duke@435 2293 "eden " SIZE_FORMAT "K->" SIZE_FORMAT "K "
duke@435 2294 "from " SIZE_FORMAT "K, to " SIZE_FORMAT "K "
duke@435 2295 "young_gen " SIZE_FORMAT "K->" SIZE_FORMAT "K ",
duke@435 2296 absorb_size / K,
duke@435 2297 eden_capacity / K, (eden_capacity - absorb_size) / K,
duke@435 2298 young_gen->from_space()->used_in_bytes() / K,
duke@435 2299 young_gen->to_space()->used_in_bytes() / K,
duke@435 2300 young_gen->capacity_in_bytes() / K, new_young_size / K);
duke@435 2301 }
duke@435 2302
duke@435 2303 // Fill the unused part of the old gen.
duke@435 2304 MutableSpace* const old_space = old_gen->object_space();
duke@435 2305 MemRegion old_gen_unused(old_space->top(), old_space->end());
duke@435 2306 if (!old_gen_unused.is_empty()) {
duke@435 2307 SharedHeap::fill_region_with_object(old_gen_unused);
duke@435 2308 }
duke@435 2309
duke@435 2310 // Take the live data from eden and set both top and end in the old gen to
duke@435 2311 // eden top. (Need to set end because reset_after_change() mangles the region
duke@435 2312 // from end to virtual_space->high() in debug builds).
duke@435 2313 HeapWord* const new_top = eden_space->top();
duke@435 2314 old_gen->virtual_space()->expand_into(young_gen->virtual_space(),
duke@435 2315 absorb_size);
duke@435 2316 young_gen->reset_after_change();
duke@435 2317 old_space->set_top(new_top);
duke@435 2318 old_space->set_end(new_top);
duke@435 2319 old_gen->reset_after_change();
duke@435 2320
duke@435 2321 // Update the object start array for the filler object and the data from eden.
duke@435 2322 ObjectStartArray* const start_array = old_gen->start_array();
duke@435 2323 HeapWord* const start = old_gen_unused.start();
duke@435 2324 for (HeapWord* addr = start; addr < new_top; addr += oop(addr)->size()) {
duke@435 2325 start_array->allocate_block(addr);
duke@435 2326 }
duke@435 2327
duke@435 2328 // Could update the promoted average here, but it is not typically updated at
duke@435 2329 // full GCs and the value to use is unclear. Something like
duke@435 2330 //
duke@435 2331 // cur_promoted_avg + absorb_size / number_of_scavenges_since_last_full_gc.
duke@435 2332
duke@435 2333 size_policy->set_bytes_absorbed_from_eden(absorb_size);
duke@435 2334 return true;
duke@435 2335 }
duke@435 2336
duke@435 2337 GCTaskManager* const PSParallelCompact::gc_task_manager() {
duke@435 2338 assert(ParallelScavengeHeap::gc_task_manager() != NULL,
duke@435 2339 "shouldn't return NULL");
duke@435 2340 return ParallelScavengeHeap::gc_task_manager();
duke@435 2341 }
duke@435 2342
duke@435 2343 void PSParallelCompact::marking_phase(ParCompactionManager* cm,
duke@435 2344 bool maximum_heap_compaction) {
duke@435 2345 // Recursively traverse all live objects and mark them
duke@435 2346 EventMark m("1 mark object");
duke@435 2347 TraceTime tm("marking phase", print_phases(), true, gclog_or_tty);
duke@435 2348
duke@435 2349 ParallelScavengeHeap* heap = gc_heap();
duke@435 2350 uint parallel_gc_threads = heap->gc_task_manager()->workers();
duke@435 2351 TaskQueueSetSuper* qset = ParCompactionManager::chunk_array();
duke@435 2352 ParallelTaskTerminator terminator(parallel_gc_threads, qset);
duke@435 2353
duke@435 2354 PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
duke@435 2355 PSParallelCompact::FollowStackClosure follow_stack_closure(cm);
duke@435 2356
duke@435 2357 {
duke@435 2358 TraceTime tm_m("par mark", print_phases(), true, gclog_or_tty);
duke@435 2359
duke@435 2360 GCTaskQueue* q = GCTaskQueue::create();
duke@435 2361
duke@435 2362 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::universe));
duke@435 2363 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::jni_handles));
duke@435 2364 // We scan the thread roots in parallel
duke@435 2365 Threads::create_thread_roots_marking_tasks(q);
duke@435 2366 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::object_synchronizer));
duke@435 2367 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::flat_profiler));
duke@435 2368 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::management));
duke@435 2369 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::system_dictionary));
duke@435 2370 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::jvmti));
duke@435 2371 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::vm_symbols));
duke@435 2372
duke@435 2373 if (parallel_gc_threads > 1) {
duke@435 2374 for (uint j = 0; j < parallel_gc_threads; j++) {
duke@435 2375 q->enqueue(new StealMarkingTask(&terminator));
duke@435 2376 }
duke@435 2377 }
duke@435 2378
duke@435 2379 WaitForBarrierGCTask* fin = WaitForBarrierGCTask::create();
duke@435 2380 q->enqueue(fin);
duke@435 2381
duke@435 2382 gc_task_manager()->add_list(q);
duke@435 2383
duke@435 2384 fin->wait_for();
duke@435 2385
duke@435 2386 // We have to release the barrier tasks!
duke@435 2387 WaitForBarrierGCTask::destroy(fin);
duke@435 2388 }
duke@435 2389
duke@435 2390 // Process reference objects found during marking
duke@435 2391 {
duke@435 2392 TraceTime tm_r("reference processing", print_phases(), true, gclog_or_tty);
duke@435 2393 ReferencePolicy *soft_ref_policy;
duke@435 2394 if (maximum_heap_compaction) {
duke@435 2395 soft_ref_policy = new AlwaysClearPolicy();
duke@435 2396 } else {
duke@435 2397 #ifdef COMPILER2
duke@435 2398 soft_ref_policy = new LRUMaxHeapPolicy();
duke@435 2399 #else
duke@435 2400 soft_ref_policy = new LRUCurrentHeapPolicy();
duke@435 2401 #endif // COMPILER2
duke@435 2402 }
duke@435 2403 assert(soft_ref_policy != NULL, "No soft reference policy");
duke@435 2404 if (ref_processor()->processing_is_mt()) {
duke@435 2405 RefProcTaskExecutor task_executor;
duke@435 2406 ref_processor()->process_discovered_references(
duke@435 2407 soft_ref_policy, is_alive_closure(), &mark_and_push_closure,
duke@435 2408 &follow_stack_closure, &task_executor);
duke@435 2409 } else {
duke@435 2410 ref_processor()->process_discovered_references(
duke@435 2411 soft_ref_policy, is_alive_closure(), &mark_and_push_closure,
duke@435 2412 &follow_stack_closure, NULL);
duke@435 2413 }
duke@435 2414 }
duke@435 2415
duke@435 2416 TraceTime tm_c("class unloading", print_phases(), true, gclog_or_tty);
duke@435 2417 // Follow system dictionary roots and unload classes.
duke@435 2418 bool purged_class = SystemDictionary::do_unloading(is_alive_closure());
duke@435 2419
duke@435 2420 // Follow code cache roots.
duke@435 2421 CodeCache::do_unloading(is_alive_closure(), &mark_and_push_closure,
duke@435 2422 purged_class);
duke@435 2423 follow_stack(cm); // Flush marking stack.
duke@435 2424
duke@435 2425 // Update subklass/sibling/implementor links of live klasses
duke@435 2426 // revisit_klass_stack is used in follow_weak_klass_links().
duke@435 2427 follow_weak_klass_links(cm);
duke@435 2428
duke@435 2429 // Visit symbol and interned string tables and delete unmarked oops
duke@435 2430 SymbolTable::unlink(is_alive_closure());
duke@435 2431 StringTable::unlink(is_alive_closure());
duke@435 2432
duke@435 2433 assert(cm->marking_stack()->size() == 0, "stack should be empty by now");
duke@435 2434 assert(cm->overflow_stack()->is_empty(), "stack should be empty by now");
duke@435 2435 }
duke@435 2436
duke@435 2437 // This should be moved to the shared markSweep code!
duke@435 2438 class PSAlwaysTrueClosure: public BoolObjectClosure {
duke@435 2439 public:
duke@435 2440 void do_object(oop p) { ShouldNotReachHere(); }
duke@435 2441 bool do_object_b(oop p) { return true; }
duke@435 2442 };
duke@435 2443 static PSAlwaysTrueClosure always_true;
duke@435 2444
duke@435 2445 void PSParallelCompact::adjust_roots() {
duke@435 2446 // Adjust the pointers to reflect the new locations
duke@435 2447 EventMark m("3 adjust roots");
duke@435 2448 TraceTime tm("adjust roots", print_phases(), true, gclog_or_tty);
duke@435 2449
duke@435 2450 // General strong roots.
duke@435 2451 Universe::oops_do(adjust_root_pointer_closure());
duke@435 2452 ReferenceProcessor::oops_do(adjust_root_pointer_closure());
duke@435 2453 JNIHandles::oops_do(adjust_root_pointer_closure()); // Global (strong) JNI handles
duke@435 2454 Threads::oops_do(adjust_root_pointer_closure());
duke@435 2455 ObjectSynchronizer::oops_do(adjust_root_pointer_closure());
duke@435 2456 FlatProfiler::oops_do(adjust_root_pointer_closure());
duke@435 2457 Management::oops_do(adjust_root_pointer_closure());
duke@435 2458 JvmtiExport::oops_do(adjust_root_pointer_closure());
duke@435 2459 // SO_AllClasses
duke@435 2460 SystemDictionary::oops_do(adjust_root_pointer_closure());
duke@435 2461 vmSymbols::oops_do(adjust_root_pointer_closure());
duke@435 2462
duke@435 2463 // Now adjust pointers in remaining weak roots. (All of which should
duke@435 2464 // have been cleared if they pointed to non-surviving objects.)
duke@435 2465 // Global (weak) JNI handles
duke@435 2466 JNIHandles::weak_oops_do(&always_true, adjust_root_pointer_closure());
duke@435 2467
duke@435 2468 CodeCache::oops_do(adjust_pointer_closure());
duke@435 2469 SymbolTable::oops_do(adjust_root_pointer_closure());
duke@435 2470 StringTable::oops_do(adjust_root_pointer_closure());
duke@435 2471 ref_processor()->weak_oops_do(adjust_root_pointer_closure());
duke@435 2472 // Roots were visited so references into the young gen in roots
duke@435 2473 // may have been scanned. Process them also.
duke@435 2474 // Should the reference processor have a span that excludes
duke@435 2475 // young gen objects?
duke@435 2476 PSScavenge::reference_processor()->weak_oops_do(
duke@435 2477 adjust_root_pointer_closure());
duke@435 2478 }
duke@435 2479
duke@435 2480 void PSParallelCompact::compact_perm(ParCompactionManager* cm) {
duke@435 2481 EventMark m("4 compact perm");
duke@435 2482 TraceTime tm("compact perm gen", print_phases(), true, gclog_or_tty);
duke@435 2483 // trace("4");
duke@435 2484
duke@435 2485 gc_heap()->perm_gen()->start_array()->reset();
duke@435 2486 move_and_update(cm, perm_space_id);
duke@435 2487 }
duke@435 2488
duke@435 2489 void PSParallelCompact::enqueue_chunk_draining_tasks(GCTaskQueue* q,
duke@435 2490 uint parallel_gc_threads) {
duke@435 2491 TraceTime tm("drain task setup", print_phases(), true, gclog_or_tty);
duke@435 2492
duke@435 2493 const unsigned int task_count = MAX2(parallel_gc_threads, 1U);
duke@435 2494 for (unsigned int j = 0; j < task_count; j++) {
duke@435 2495 q->enqueue(new DrainStacksCompactionTask());
duke@435 2496 }
duke@435 2497
duke@435 2498 // Find all chunks that are available (can be filled immediately) and
duke@435 2499 // distribute them to the thread stacks. The iteration is done in reverse
duke@435 2500 // order (high to low) so the chunks will be removed in ascending order.
duke@435 2501
duke@435 2502 const ParallelCompactData& sd = PSParallelCompact::summary_data();
duke@435 2503
duke@435 2504 size_t fillable_chunks = 0; // A count for diagnostic purposes.
duke@435 2505 unsigned int which = 0; // The worker thread number.
duke@435 2506
duke@435 2507 for (unsigned int id = to_space_id; id > perm_space_id; --id) {
duke@435 2508 SpaceInfo* const space_info = _space_info + id;
duke@435 2509 MutableSpace* const space = space_info->space();
duke@435 2510 HeapWord* const new_top = space_info->new_top();
duke@435 2511
duke@435 2512 const size_t beg_chunk = sd.addr_to_chunk_idx(space_info->dense_prefix());
duke@435 2513 const size_t end_chunk = sd.addr_to_chunk_idx(sd.chunk_align_up(new_top));
duke@435 2514 assert(end_chunk > 0, "perm gen cannot be empty");
duke@435 2515
duke@435 2516 for (size_t cur = end_chunk - 1; cur >= beg_chunk; --cur) {
duke@435 2517 if (sd.chunk(cur)->claim_unsafe()) {
duke@435 2518 ParCompactionManager* cm = ParCompactionManager::manager_array(which);
duke@435 2519 cm->save_for_processing(cur);
duke@435 2520
duke@435 2521 if (TraceParallelOldGCCompactionPhase && Verbose) {
duke@435 2522 const size_t count_mod_8 = fillable_chunks & 7;
duke@435 2523 if (count_mod_8 == 0) gclog_or_tty->print("fillable: ");
duke@435 2524 gclog_or_tty->print(" " SIZE_FORMAT_W("7"), cur);
duke@435 2525 if (count_mod_8 == 7) gclog_or_tty->cr();
duke@435 2526 }
duke@435 2527
duke@435 2528 NOT_PRODUCT(++fillable_chunks;)
duke@435 2529
duke@435 2530 // Assign chunks to threads in round-robin fashion.
duke@435 2531 if (++which == task_count) {
duke@435 2532 which = 0;
duke@435 2533 }
duke@435 2534 }
duke@435 2535 }
duke@435 2536 }
duke@435 2537
duke@435 2538 if (TraceParallelOldGCCompactionPhase) {
duke@435 2539 if (Verbose && (fillable_chunks & 7) != 0) gclog_or_tty->cr();
duke@435 2540 gclog_or_tty->print_cr("%u initially fillable chunks", fillable_chunks);
duke@435 2541 }
duke@435 2542 }
duke@435 2543
duke@435 2544 #define PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING 4
duke@435 2545
duke@435 2546 void PSParallelCompact::enqueue_dense_prefix_tasks(GCTaskQueue* q,
duke@435 2547 uint parallel_gc_threads) {
duke@435 2548 TraceTime tm("dense prefix task setup", print_phases(), true, gclog_or_tty);
duke@435 2549
duke@435 2550 ParallelCompactData& sd = PSParallelCompact::summary_data();
duke@435 2551
duke@435 2552 // Iterate over all the spaces adding tasks for updating
duke@435 2553 // chunks in the dense prefix. Assume that 1 gc thread
duke@435 2554 // will work on opening the gaps and the remaining gc threads
duke@435 2555 // will work on the dense prefix.
duke@435 2556 SpaceId space_id = old_space_id;
duke@435 2557 while (space_id != last_space_id) {
duke@435 2558 HeapWord* const dense_prefix_end = _space_info[space_id].dense_prefix();
duke@435 2559 const MutableSpace* const space = _space_info[space_id].space();
duke@435 2560
duke@435 2561 if (dense_prefix_end == space->bottom()) {
duke@435 2562 // There is no dense prefix for this space.
duke@435 2563 space_id = next_compaction_space_id(space_id);
duke@435 2564 continue;
duke@435 2565 }
duke@435 2566
duke@435 2567 // The dense prefix is before this chunk.
duke@435 2568 size_t chunk_index_end_dense_prefix =
duke@435 2569 sd.addr_to_chunk_idx(dense_prefix_end);
duke@435 2570 ChunkData* const dense_prefix_cp = sd.chunk(chunk_index_end_dense_prefix);
duke@435 2571 assert(dense_prefix_end == space->end() ||
duke@435 2572 dense_prefix_cp->available() ||
duke@435 2573 dense_prefix_cp->claimed(),
duke@435 2574 "The chunk after the dense prefix should always be ready to fill");
duke@435 2575
duke@435 2576 size_t chunk_index_start = sd.addr_to_chunk_idx(space->bottom());
duke@435 2577
duke@435 2578 // Is there dense prefix work?
duke@435 2579 size_t total_dense_prefix_chunks =
duke@435 2580 chunk_index_end_dense_prefix - chunk_index_start;
duke@435 2581 // How many chunks of the dense prefix should be given to
duke@435 2582 // each thread?
duke@435 2583 if (total_dense_prefix_chunks > 0) {
duke@435 2584 uint tasks_for_dense_prefix = 1;
duke@435 2585 if (UseParallelDensePrefixUpdate) {
duke@435 2586 if (total_dense_prefix_chunks <=
duke@435 2587 (parallel_gc_threads * PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING)) {
duke@435 2588 // Don't over partition. This assumes that
duke@435 2589 // PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING is a small integer value
duke@435 2590 // so there are not many chunks to process.
duke@435 2591 tasks_for_dense_prefix = parallel_gc_threads;
duke@435 2592 } else {
duke@435 2593 // Over partition
duke@435 2594 tasks_for_dense_prefix = parallel_gc_threads *
duke@435 2595 PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING;
duke@435 2596 }
duke@435 2597 }
duke@435 2598 size_t chunks_per_thread = total_dense_prefix_chunks /
duke@435 2599 tasks_for_dense_prefix;
duke@435 2600 // Give each thread at least 1 chunk.
duke@435 2601 if (chunks_per_thread == 0) {
duke@435 2602 chunks_per_thread = 1;
duke@435 2603 }
duke@435 2604
duke@435 2605 for (uint k = 0; k < tasks_for_dense_prefix; k++) {
duke@435 2606 if (chunk_index_start >= chunk_index_end_dense_prefix) {
duke@435 2607 break;
duke@435 2608 }
duke@435 2609 // chunk_index_end is not processed
duke@435 2610 size_t chunk_index_end = MIN2(chunk_index_start + chunks_per_thread,
duke@435 2611 chunk_index_end_dense_prefix);
duke@435 2612 q->enqueue(new UpdateDensePrefixTask(
duke@435 2613 space_id,
duke@435 2614 chunk_index_start,
duke@435 2615 chunk_index_end));
duke@435 2616 chunk_index_start = chunk_index_end;
duke@435 2617 }
duke@435 2618 }
duke@435 2619 // This gets any part of the dense prefix that did not
duke@435 2620 // fit evenly.
duke@435 2621 if (chunk_index_start < chunk_index_end_dense_prefix) {
duke@435 2622 q->enqueue(new UpdateDensePrefixTask(
duke@435 2623 space_id,
duke@435 2624 chunk_index_start,
duke@435 2625 chunk_index_end_dense_prefix));
duke@435 2626 }
duke@435 2627 space_id = next_compaction_space_id(space_id);
duke@435 2628 } // End tasks for dense prefix
duke@435 2629 }
duke@435 2630
duke@435 2631 void PSParallelCompact::enqueue_chunk_stealing_tasks(
duke@435 2632 GCTaskQueue* q,
duke@435 2633 ParallelTaskTerminator* terminator_ptr,
duke@435 2634 uint parallel_gc_threads) {
duke@435 2635 TraceTime tm("steal task setup", print_phases(), true, gclog_or_tty);
duke@435 2636
duke@435 2637 // Once a thread has drained it's stack, it should try to steal chunks from
duke@435 2638 // other threads.
duke@435 2639 if (parallel_gc_threads > 1) {
duke@435 2640 for (uint j = 0; j < parallel_gc_threads; j++) {
duke@435 2641 q->enqueue(new StealChunkCompactionTask(terminator_ptr));
duke@435 2642 }
duke@435 2643 }
duke@435 2644 }
duke@435 2645
duke@435 2646 void PSParallelCompact::compact() {
duke@435 2647 EventMark m("5 compact");
duke@435 2648 // trace("5");
duke@435 2649 TraceTime tm("compaction phase", print_phases(), true, gclog_or_tty);
duke@435 2650
duke@435 2651 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
duke@435 2652 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
duke@435 2653 PSOldGen* old_gen = heap->old_gen();
duke@435 2654 old_gen->start_array()->reset();
duke@435 2655 uint parallel_gc_threads = heap->gc_task_manager()->workers();
duke@435 2656 TaskQueueSetSuper* qset = ParCompactionManager::chunk_array();
duke@435 2657 ParallelTaskTerminator terminator(parallel_gc_threads, qset);
duke@435 2658
duke@435 2659 GCTaskQueue* q = GCTaskQueue::create();
duke@435 2660 enqueue_chunk_draining_tasks(q, parallel_gc_threads);
duke@435 2661 enqueue_dense_prefix_tasks(q, parallel_gc_threads);
duke@435 2662 enqueue_chunk_stealing_tasks(q, &terminator, parallel_gc_threads);
duke@435 2663
duke@435 2664 {
duke@435 2665 TraceTime tm_pc("par compact", print_phases(), true, gclog_or_tty);
duke@435 2666
duke@435 2667 WaitForBarrierGCTask* fin = WaitForBarrierGCTask::create();
duke@435 2668 q->enqueue(fin);
duke@435 2669
duke@435 2670 gc_task_manager()->add_list(q);
duke@435 2671
duke@435 2672 fin->wait_for();
duke@435 2673
duke@435 2674 // We have to release the barrier tasks!
duke@435 2675 WaitForBarrierGCTask::destroy(fin);
duke@435 2676
duke@435 2677 #ifdef ASSERT
duke@435 2678 // Verify that all chunks have been processed before the deferred updates.
duke@435 2679 // Note that perm_space_id is skipped; this type of verification is not
duke@435 2680 // valid until the perm gen is compacted by chunks.
duke@435 2681 for (unsigned int id = old_space_id; id < last_space_id; ++id) {
duke@435 2682 verify_complete(SpaceId(id));
duke@435 2683 }
duke@435 2684 #endif
duke@435 2685 }
duke@435 2686
duke@435 2687 {
duke@435 2688 // Update the deferred objects, if any. Any compaction manager can be used.
duke@435 2689 TraceTime tm_du("deferred updates", print_phases(), true, gclog_or_tty);
duke@435 2690 ParCompactionManager* cm = ParCompactionManager::manager_array(0);
duke@435 2691 for (unsigned int id = old_space_id; id < last_space_id; ++id) {
duke@435 2692 update_deferred_objects(cm, SpaceId(id));
duke@435 2693 }
duke@435 2694 }
duke@435 2695 }
duke@435 2696
duke@435 2697 #ifdef ASSERT
duke@435 2698 void PSParallelCompact::verify_complete(SpaceId space_id) {
duke@435 2699 // All Chunks between space bottom() to new_top() should be marked as filled
duke@435 2700 // and all Chunks between new_top() and top() should be available (i.e.,
duke@435 2701 // should have been emptied).
duke@435 2702 ParallelCompactData& sd = summary_data();
duke@435 2703 SpaceInfo si = _space_info[space_id];
duke@435 2704 HeapWord* new_top_addr = sd.chunk_align_up(si.new_top());
duke@435 2705 HeapWord* old_top_addr = sd.chunk_align_up(si.space()->top());
duke@435 2706 const size_t beg_chunk = sd.addr_to_chunk_idx(si.space()->bottom());
duke@435 2707 const size_t new_top_chunk = sd.addr_to_chunk_idx(new_top_addr);
duke@435 2708 const size_t old_top_chunk = sd.addr_to_chunk_idx(old_top_addr);
duke@435 2709
duke@435 2710 bool issued_a_warning = false;
duke@435 2711
duke@435 2712 size_t cur_chunk;
duke@435 2713 for (cur_chunk = beg_chunk; cur_chunk < new_top_chunk; ++cur_chunk) {
duke@435 2714 const ChunkData* const c = sd.chunk(cur_chunk);
duke@435 2715 if (!c->completed()) {
duke@435 2716 warning("chunk " SIZE_FORMAT " not filled: "
duke@435 2717 "destination_count=" SIZE_FORMAT,
duke@435 2718 cur_chunk, c->destination_count());
duke@435 2719 issued_a_warning = true;
duke@435 2720 }
duke@435 2721 }
duke@435 2722
duke@435 2723 for (cur_chunk = new_top_chunk; cur_chunk < old_top_chunk; ++cur_chunk) {
duke@435 2724 const ChunkData* const c = sd.chunk(cur_chunk);
duke@435 2725 if (!c->available()) {
duke@435 2726 warning("chunk " SIZE_FORMAT " not empty: "
duke@435 2727 "destination_count=" SIZE_FORMAT,
duke@435 2728 cur_chunk, c->destination_count());
duke@435 2729 issued_a_warning = true;
duke@435 2730 }
duke@435 2731 }
duke@435 2732
duke@435 2733 if (issued_a_warning) {
duke@435 2734 print_chunk_ranges();
duke@435 2735 }
duke@435 2736 }
duke@435 2737 #endif // #ifdef ASSERT
duke@435 2738
duke@435 2739 void PSParallelCompact::compact_serial(ParCompactionManager* cm) {
duke@435 2740 EventMark m("5 compact serial");
duke@435 2741 TraceTime tm("compact serial", print_phases(), true, gclog_or_tty);
duke@435 2742
duke@435 2743 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
duke@435 2744 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
duke@435 2745
duke@435 2746 PSYoungGen* young_gen = heap->young_gen();
duke@435 2747 PSOldGen* old_gen = heap->old_gen();
duke@435 2748
duke@435 2749 old_gen->start_array()->reset();
duke@435 2750 old_gen->move_and_update(cm);
duke@435 2751 young_gen->move_and_update(cm);
duke@435 2752 }
duke@435 2753
duke@435 2754 void PSParallelCompact::follow_root(ParCompactionManager* cm, oop* p) {
duke@435 2755 assert(!Universe::heap()->is_in_reserved(p),
duke@435 2756 "roots shouldn't be things within the heap");
duke@435 2757 #ifdef VALIDATE_MARK_SWEEP
duke@435 2758 if (ValidateMarkSweep) {
duke@435 2759 guarantee(!_root_refs_stack->contains(p), "should only be in here once");
duke@435 2760 _root_refs_stack->push(p);
duke@435 2761 }
duke@435 2762 #endif
duke@435 2763 oop m = *p;
duke@435 2764 if (m != NULL && mark_bitmap()->is_unmarked(m)) {
duke@435 2765 if (mark_obj(m)) {
duke@435 2766 m->follow_contents(cm); // Follow contents of the marked object
duke@435 2767 }
duke@435 2768 }
duke@435 2769 follow_stack(cm);
duke@435 2770 }
duke@435 2771
duke@435 2772 void PSParallelCompact::follow_stack(ParCompactionManager* cm) {
duke@435 2773 while(!cm->overflow_stack()->is_empty()) {
duke@435 2774 oop obj = cm->overflow_stack()->pop();
duke@435 2775 obj->follow_contents(cm);
duke@435 2776 }
duke@435 2777
duke@435 2778 oop obj;
duke@435 2779 // obj is a reference!!!
duke@435 2780 while (cm->marking_stack()->pop_local(obj)) {
duke@435 2781 // It would be nice to assert about the type of objects we might
duke@435 2782 // pop, but they can come from anywhere, unfortunately.
duke@435 2783 obj->follow_contents(cm);
duke@435 2784 }
duke@435 2785 }
duke@435 2786
duke@435 2787 void
duke@435 2788 PSParallelCompact::follow_weak_klass_links(ParCompactionManager* serial_cm) {
duke@435 2789 // All klasses on the revisit stack are marked at this point.
duke@435 2790 // Update and follow all subklass, sibling and implementor links.
duke@435 2791 for (uint i = 0; i < ParallelGCThreads+1; i++) {
duke@435 2792 ParCompactionManager* cm = ParCompactionManager::manager_array(i);
duke@435 2793 KeepAliveClosure keep_alive_closure(cm);
duke@435 2794 for (int i = 0; i < cm->revisit_klass_stack()->length(); i++) {
duke@435 2795 cm->revisit_klass_stack()->at(i)->follow_weak_klass_links(
duke@435 2796 is_alive_closure(),
duke@435 2797 &keep_alive_closure);
duke@435 2798 }
duke@435 2799 follow_stack(cm);
duke@435 2800 }
duke@435 2801 }
duke@435 2802
duke@435 2803 void
duke@435 2804 PSParallelCompact::revisit_weak_klass_link(ParCompactionManager* cm, Klass* k) {
duke@435 2805 cm->revisit_klass_stack()->push(k);
duke@435 2806 }
duke@435 2807
duke@435 2808 #ifdef VALIDATE_MARK_SWEEP
duke@435 2809
duke@435 2810 void PSParallelCompact::track_adjusted_pointer(oop* p, oop newobj, bool isroot) {
duke@435 2811 if (!ValidateMarkSweep)
duke@435 2812 return;
duke@435 2813
duke@435 2814 if (!isroot) {
duke@435 2815 if (_pointer_tracking) {
duke@435 2816 guarantee(_adjusted_pointers->contains(p), "should have seen this pointer");
duke@435 2817 _adjusted_pointers->remove(p);
duke@435 2818 }
duke@435 2819 } else {
duke@435 2820 ptrdiff_t index = _root_refs_stack->find(p);
duke@435 2821 if (index != -1) {
duke@435 2822 int l = _root_refs_stack->length();
duke@435 2823 if (l > 0 && l - 1 != index) {
duke@435 2824 oop* last = _root_refs_stack->pop();
duke@435 2825 assert(last != p, "should be different");
duke@435 2826 _root_refs_stack->at_put(index, last);
duke@435 2827 } else {
duke@435 2828 _root_refs_stack->remove(p);
duke@435 2829 }
duke@435 2830 }
duke@435 2831 }
duke@435 2832 }
duke@435 2833
duke@435 2834
duke@435 2835 void PSParallelCompact::check_adjust_pointer(oop* p) {
duke@435 2836 _adjusted_pointers->push(p);
duke@435 2837 }
duke@435 2838
duke@435 2839
duke@435 2840 class AdjusterTracker: public OopClosure {
duke@435 2841 public:
duke@435 2842 AdjusterTracker() {};
duke@435 2843 void do_oop(oop* o) { PSParallelCompact::check_adjust_pointer(o); }
duke@435 2844 };
duke@435 2845
duke@435 2846
duke@435 2847 void PSParallelCompact::track_interior_pointers(oop obj) {
duke@435 2848 if (ValidateMarkSweep) {
duke@435 2849 _adjusted_pointers->clear();
duke@435 2850 _pointer_tracking = true;
duke@435 2851
duke@435 2852 AdjusterTracker checker;
duke@435 2853 obj->oop_iterate(&checker);
duke@435 2854 }
duke@435 2855 }
duke@435 2856
duke@435 2857
duke@435 2858 void PSParallelCompact::check_interior_pointers() {
duke@435 2859 if (ValidateMarkSweep) {
duke@435 2860 _pointer_tracking = false;
duke@435 2861 guarantee(_adjusted_pointers->length() == 0, "should have processed the same pointers");
duke@435 2862 }
duke@435 2863 }
duke@435 2864
duke@435 2865
duke@435 2866 void PSParallelCompact::reset_live_oop_tracking(bool at_perm) {
duke@435 2867 if (ValidateMarkSweep) {
duke@435 2868 guarantee((size_t)_live_oops->length() == _live_oops_index, "should be at end of live oops");
duke@435 2869 _live_oops_index = at_perm ? _live_oops_index_at_perm : 0;
duke@435 2870 }
duke@435 2871 }
duke@435 2872
duke@435 2873
duke@435 2874 void PSParallelCompact::register_live_oop(oop p, size_t size) {
duke@435 2875 if (ValidateMarkSweep) {
duke@435 2876 _live_oops->push(p);
duke@435 2877 _live_oops_size->push(size);
duke@435 2878 _live_oops_index++;
duke@435 2879 }
duke@435 2880 }
duke@435 2881
duke@435 2882 void PSParallelCompact::validate_live_oop(oop p, size_t size) {
duke@435 2883 if (ValidateMarkSweep) {
duke@435 2884 oop obj = _live_oops->at((int)_live_oops_index);
duke@435 2885 guarantee(obj == p, "should be the same object");
duke@435 2886 guarantee(_live_oops_size->at((int)_live_oops_index) == size, "should be the same size");
duke@435 2887 _live_oops_index++;
duke@435 2888 }
duke@435 2889 }
duke@435 2890
duke@435 2891 void PSParallelCompact::live_oop_moved_to(HeapWord* q, size_t size,
duke@435 2892 HeapWord* compaction_top) {
duke@435 2893 assert(oop(q)->forwardee() == NULL || oop(q)->forwardee() == oop(compaction_top),
duke@435 2894 "should be moved to forwarded location");
duke@435 2895 if (ValidateMarkSweep) {
duke@435 2896 PSParallelCompact::validate_live_oop(oop(q), size);
duke@435 2897 _live_oops_moved_to->push(oop(compaction_top));
duke@435 2898 }
duke@435 2899 if (RecordMarkSweepCompaction) {
duke@435 2900 _cur_gc_live_oops->push(q);
duke@435 2901 _cur_gc_live_oops_moved_to->push(compaction_top);
duke@435 2902 _cur_gc_live_oops_size->push(size);
duke@435 2903 }
duke@435 2904 }
duke@435 2905
duke@435 2906
duke@435 2907 void PSParallelCompact::compaction_complete() {
duke@435 2908 if (RecordMarkSweepCompaction) {
duke@435 2909 GrowableArray<HeapWord*>* _tmp_live_oops = _cur_gc_live_oops;
duke@435 2910 GrowableArray<HeapWord*>* _tmp_live_oops_moved_to = _cur_gc_live_oops_moved_to;
duke@435 2911 GrowableArray<size_t> * _tmp_live_oops_size = _cur_gc_live_oops_size;
duke@435 2912
duke@435 2913 _cur_gc_live_oops = _last_gc_live_oops;
duke@435 2914 _cur_gc_live_oops_moved_to = _last_gc_live_oops_moved_to;
duke@435 2915 _cur_gc_live_oops_size = _last_gc_live_oops_size;
duke@435 2916 _last_gc_live_oops = _tmp_live_oops;
duke@435 2917 _last_gc_live_oops_moved_to = _tmp_live_oops_moved_to;
duke@435 2918 _last_gc_live_oops_size = _tmp_live_oops_size;
duke@435 2919 }
duke@435 2920 }
duke@435 2921
duke@435 2922
duke@435 2923 void PSParallelCompact::print_new_location_of_heap_address(HeapWord* q) {
duke@435 2924 if (!RecordMarkSweepCompaction) {
duke@435 2925 tty->print_cr("Requires RecordMarkSweepCompaction to be enabled");
duke@435 2926 return;
duke@435 2927 }
duke@435 2928
duke@435 2929 if (_last_gc_live_oops == NULL) {
duke@435 2930 tty->print_cr("No compaction information gathered yet");
duke@435 2931 return;
duke@435 2932 }
duke@435 2933
duke@435 2934 for (int i = 0; i < _last_gc_live_oops->length(); i++) {
duke@435 2935 HeapWord* old_oop = _last_gc_live_oops->at(i);
duke@435 2936 size_t sz = _last_gc_live_oops_size->at(i);
duke@435 2937 if (old_oop <= q && q < (old_oop + sz)) {
duke@435 2938 HeapWord* new_oop = _last_gc_live_oops_moved_to->at(i);
duke@435 2939 size_t offset = (q - old_oop);
duke@435 2940 tty->print_cr("Address " PTR_FORMAT, q);
duke@435 2941 tty->print_cr(" Was in oop " PTR_FORMAT ", size %d, at offset %d", old_oop, sz, offset);
duke@435 2942 tty->print_cr(" Now in oop " PTR_FORMAT ", actual address " PTR_FORMAT, new_oop, new_oop + offset);
duke@435 2943 return;
duke@435 2944 }
duke@435 2945 }
duke@435 2946
duke@435 2947 tty->print_cr("Address " PTR_FORMAT " not found in live oop information from last GC", q);
duke@435 2948 }
duke@435 2949 #endif //VALIDATE_MARK_SWEEP
duke@435 2950
duke@435 2951 void PSParallelCompact::adjust_pointer(oop* p, bool isroot) {
duke@435 2952 oop obj = *p;
duke@435 2953 VALIDATE_MARK_SWEEP_ONLY(oop saved_new_pointer = NULL);
duke@435 2954 if (obj != NULL) {
duke@435 2955 oop new_pointer = (oop) summary_data().calc_new_pointer(obj);
duke@435 2956 assert(new_pointer != NULL || // is forwarding ptr?
duke@435 2957 obj->is_shared(), // never forwarded?
duke@435 2958 "should have a new location");
duke@435 2959 // Just always do the update unconditionally?
duke@435 2960 if (new_pointer != NULL) {
duke@435 2961 *p = new_pointer;
duke@435 2962 assert(Universe::heap()->is_in_reserved(new_pointer),
duke@435 2963 "should be in object space");
duke@435 2964 VALIDATE_MARK_SWEEP_ONLY(saved_new_pointer = new_pointer);
duke@435 2965 }
duke@435 2966 }
duke@435 2967 VALIDATE_MARK_SWEEP_ONLY(track_adjusted_pointer(p, saved_new_pointer, isroot));
duke@435 2968 }
duke@435 2969
duke@435 2970 // Update interior oops in the ranges of chunks [beg_chunk, end_chunk).
duke@435 2971 void
duke@435 2972 PSParallelCompact::update_and_deadwood_in_dense_prefix(ParCompactionManager* cm,
duke@435 2973 SpaceId space_id,
duke@435 2974 size_t beg_chunk,
duke@435 2975 size_t end_chunk) {
duke@435 2976 ParallelCompactData& sd = summary_data();
duke@435 2977 ParMarkBitMap* const mbm = mark_bitmap();
duke@435 2978
duke@435 2979 HeapWord* beg_addr = sd.chunk_to_addr(beg_chunk);
duke@435 2980 HeapWord* const end_addr = sd.chunk_to_addr(end_chunk);
duke@435 2981 assert(beg_chunk <= end_chunk, "bad chunk range");
duke@435 2982 assert(end_addr <= dense_prefix(space_id), "not in the dense prefix");
duke@435 2983
duke@435 2984 #ifdef ASSERT
duke@435 2985 // Claim the chunks to avoid triggering an assert when they are marked as
duke@435 2986 // filled.
duke@435 2987 for (size_t claim_chunk = beg_chunk; claim_chunk < end_chunk; ++claim_chunk) {
duke@435 2988 assert(sd.chunk(claim_chunk)->claim_unsafe(), "claim() failed");
duke@435 2989 }
duke@435 2990 #endif // #ifdef ASSERT
duke@435 2991
duke@435 2992 if (beg_addr != space(space_id)->bottom()) {
duke@435 2993 // Find the first live object or block of dead space that *starts* in this
duke@435 2994 // range of chunks. If a partial object crosses onto the chunk, skip it; it
duke@435 2995 // will be marked for 'deferred update' when the object head is processed.
duke@435 2996 // If dead space crosses onto the chunk, it is also skipped; it will be
duke@435 2997 // filled when the prior chunk is processed. If neither of those apply, the
duke@435 2998 // first word in the chunk is the start of a live object or dead space.
duke@435 2999 assert(beg_addr > space(space_id)->bottom(), "sanity");
duke@435 3000 const ChunkData* const cp = sd.chunk(beg_chunk);
duke@435 3001 if (cp->partial_obj_size() != 0) {
duke@435 3002 beg_addr = sd.partial_obj_end(beg_chunk);
duke@435 3003 } else if (dead_space_crosses_boundary(cp, mbm->addr_to_bit(beg_addr))) {
duke@435 3004 beg_addr = mbm->find_obj_beg(beg_addr, end_addr);
duke@435 3005 }
duke@435 3006 }
duke@435 3007
duke@435 3008 if (beg_addr < end_addr) {
duke@435 3009 // A live object or block of dead space starts in this range of Chunks.
duke@435 3010 HeapWord* const dense_prefix_end = dense_prefix(space_id);
duke@435 3011
duke@435 3012 // Create closures and iterate.
duke@435 3013 UpdateOnlyClosure update_closure(mbm, cm, space_id);
duke@435 3014 FillClosure fill_closure(cm, space_id);
duke@435 3015 ParMarkBitMap::IterationStatus status;
duke@435 3016 status = mbm->iterate(&update_closure, &fill_closure, beg_addr, end_addr,
duke@435 3017 dense_prefix_end);
duke@435 3018 if (status == ParMarkBitMap::incomplete) {
duke@435 3019 update_closure.do_addr(update_closure.source());
duke@435 3020 }
duke@435 3021 }
duke@435 3022
duke@435 3023 // Mark the chunks as filled.
duke@435 3024 ChunkData* const beg_cp = sd.chunk(beg_chunk);
duke@435 3025 ChunkData* const end_cp = sd.chunk(end_chunk);
duke@435 3026 for (ChunkData* cp = beg_cp; cp < end_cp; ++cp) {
duke@435 3027 cp->set_completed();
duke@435 3028 }
duke@435 3029 }
duke@435 3030
duke@435 3031 // Return the SpaceId for the space containing addr. If addr is not in the
duke@435 3032 // heap, last_space_id is returned. In debug mode it expects the address to be
duke@435 3033 // in the heap and asserts such.
duke@435 3034 PSParallelCompact::SpaceId PSParallelCompact::space_id(HeapWord* addr) {
duke@435 3035 assert(Universe::heap()->is_in_reserved(addr), "addr not in the heap");
duke@435 3036
duke@435 3037 for (unsigned int id = perm_space_id; id < last_space_id; ++id) {
duke@435 3038 if (_space_info[id].space()->contains(addr)) {
duke@435 3039 return SpaceId(id);
duke@435 3040 }
duke@435 3041 }
duke@435 3042
duke@435 3043 assert(false, "no space contains the addr");
duke@435 3044 return last_space_id;
duke@435 3045 }
duke@435 3046
duke@435 3047 void PSParallelCompact::update_deferred_objects(ParCompactionManager* cm,
duke@435 3048 SpaceId id) {
duke@435 3049 assert(id < last_space_id, "bad space id");
duke@435 3050
duke@435 3051 ParallelCompactData& sd = summary_data();
duke@435 3052 const SpaceInfo* const space_info = _space_info + id;
duke@435 3053 ObjectStartArray* const start_array = space_info->start_array();
duke@435 3054
duke@435 3055 const MutableSpace* const space = space_info->space();
duke@435 3056 assert(space_info->dense_prefix() >= space->bottom(), "dense_prefix not set");
duke@435 3057 HeapWord* const beg_addr = space_info->dense_prefix();
duke@435 3058 HeapWord* const end_addr = sd.chunk_align_up(space_info->new_top());
duke@435 3059
duke@435 3060 const ChunkData* const beg_chunk = sd.addr_to_chunk_ptr(beg_addr);
duke@435 3061 const ChunkData* const end_chunk = sd.addr_to_chunk_ptr(end_addr);
duke@435 3062 const ChunkData* cur_chunk;
duke@435 3063 for (cur_chunk = beg_chunk; cur_chunk < end_chunk; ++cur_chunk) {
duke@435 3064 HeapWord* const addr = cur_chunk->deferred_obj_addr();
duke@435 3065 if (addr != NULL) {
duke@435 3066 if (start_array != NULL) {
duke@435 3067 start_array->allocate_block(addr);
duke@435 3068 }
duke@435 3069 oop(addr)->update_contents(cm);
duke@435 3070 assert(oop(addr)->is_oop_or_null(), "should be an oop now");
duke@435 3071 }
duke@435 3072 }
duke@435 3073 }
duke@435 3074
duke@435 3075 // Skip over count live words starting from beg, and return the address of the
duke@435 3076 // next live word. Unless marked, the word corresponding to beg is assumed to
duke@435 3077 // be dead. Callers must either ensure beg does not correspond to the middle of
duke@435 3078 // an object, or account for those live words in some other way. Callers must
duke@435 3079 // also ensure that there are enough live words in the range [beg, end) to skip.
duke@435 3080 HeapWord*
duke@435 3081 PSParallelCompact::skip_live_words(HeapWord* beg, HeapWord* end, size_t count)
duke@435 3082 {
duke@435 3083 assert(count > 0, "sanity");
duke@435 3084
duke@435 3085 ParMarkBitMap* m = mark_bitmap();
duke@435 3086 idx_t bits_to_skip = m->words_to_bits(count);
duke@435 3087 idx_t cur_beg = m->addr_to_bit(beg);
duke@435 3088 const idx_t search_end = BitMap::word_align_up(m->addr_to_bit(end));
duke@435 3089
duke@435 3090 do {
duke@435 3091 cur_beg = m->find_obj_beg(cur_beg, search_end);
duke@435 3092 idx_t cur_end = m->find_obj_end(cur_beg, search_end);
duke@435 3093 const size_t obj_bits = cur_end - cur_beg + 1;
duke@435 3094 if (obj_bits > bits_to_skip) {
duke@435 3095 return m->bit_to_addr(cur_beg + bits_to_skip);
duke@435 3096 }
duke@435 3097 bits_to_skip -= obj_bits;
duke@435 3098 cur_beg = cur_end + 1;
duke@435 3099 } while (bits_to_skip > 0);
duke@435 3100
duke@435 3101 // Skipping the desired number of words landed just past the end of an object.
duke@435 3102 // Find the start of the next object.
duke@435 3103 cur_beg = m->find_obj_beg(cur_beg, search_end);
duke@435 3104 assert(cur_beg < m->addr_to_bit(end), "not enough live words to skip");
duke@435 3105 return m->bit_to_addr(cur_beg);
duke@435 3106 }
duke@435 3107
duke@435 3108 HeapWord*
duke@435 3109 PSParallelCompact::first_src_addr(HeapWord* const dest_addr,
duke@435 3110 size_t src_chunk_idx)
duke@435 3111 {
duke@435 3112 ParMarkBitMap* const bitmap = mark_bitmap();
duke@435 3113 const ParallelCompactData& sd = summary_data();
duke@435 3114 const size_t ChunkSize = ParallelCompactData::ChunkSize;
duke@435 3115
duke@435 3116 assert(sd.is_chunk_aligned(dest_addr), "not aligned");
duke@435 3117
duke@435 3118 const ChunkData* const src_chunk_ptr = sd.chunk(src_chunk_idx);
duke@435 3119 const size_t partial_obj_size = src_chunk_ptr->partial_obj_size();
duke@435 3120 HeapWord* const src_chunk_destination = src_chunk_ptr->destination();
duke@435 3121
duke@435 3122 assert(dest_addr >= src_chunk_destination, "wrong src chunk");
duke@435 3123 assert(src_chunk_ptr->data_size() > 0, "src chunk cannot be empty");
duke@435 3124
duke@435 3125 HeapWord* const src_chunk_beg = sd.chunk_to_addr(src_chunk_idx);
duke@435 3126 HeapWord* const src_chunk_end = src_chunk_beg + ChunkSize;
duke@435 3127
duke@435 3128 HeapWord* addr = src_chunk_beg;
duke@435 3129 if (dest_addr == src_chunk_destination) {
duke@435 3130 // Return the first live word in the source chunk.
duke@435 3131 if (partial_obj_size == 0) {
duke@435 3132 addr = bitmap->find_obj_beg(addr, src_chunk_end);
duke@435 3133 assert(addr < src_chunk_end, "no objects start in src chunk");
duke@435 3134 }
duke@435 3135 return addr;
duke@435 3136 }
duke@435 3137
duke@435 3138 // Must skip some live data.
duke@435 3139 size_t words_to_skip = dest_addr - src_chunk_destination;
duke@435 3140 assert(src_chunk_ptr->data_size() > words_to_skip, "wrong src chunk");
duke@435 3141
duke@435 3142 if (partial_obj_size >= words_to_skip) {
duke@435 3143 // All the live words to skip are part of the partial object.
duke@435 3144 addr += words_to_skip;
duke@435 3145 if (partial_obj_size == words_to_skip) {
duke@435 3146 // Find the first live word past the partial object.
duke@435 3147 addr = bitmap->find_obj_beg(addr, src_chunk_end);
duke@435 3148 assert(addr < src_chunk_end, "wrong src chunk");
duke@435 3149 }
duke@435 3150 return addr;
duke@435 3151 }
duke@435 3152
duke@435 3153 // Skip over the partial object (if any).
duke@435 3154 if (partial_obj_size != 0) {
duke@435 3155 words_to_skip -= partial_obj_size;
duke@435 3156 addr += partial_obj_size;
duke@435 3157 }
duke@435 3158
duke@435 3159 // Skip over live words due to objects that start in the chunk.
duke@435 3160 addr = skip_live_words(addr, src_chunk_end, words_to_skip);
duke@435 3161 assert(addr < src_chunk_end, "wrong src chunk");
duke@435 3162 return addr;
duke@435 3163 }
duke@435 3164
duke@435 3165 void PSParallelCompact::decrement_destination_counts(ParCompactionManager* cm,
duke@435 3166 size_t beg_chunk,
duke@435 3167 HeapWord* end_addr)
duke@435 3168 {
duke@435 3169 ParallelCompactData& sd = summary_data();
duke@435 3170 ChunkData* const beg = sd.chunk(beg_chunk);
duke@435 3171 HeapWord* const end_addr_aligned_up = sd.chunk_align_up(end_addr);
duke@435 3172 ChunkData* const end = sd.addr_to_chunk_ptr(end_addr_aligned_up);
duke@435 3173 size_t cur_idx = beg_chunk;
duke@435 3174 for (ChunkData* cur = beg; cur < end; ++cur, ++cur_idx) {
duke@435 3175 assert(cur->data_size() > 0, "chunk must have live data");
duke@435 3176 cur->decrement_destination_count();
duke@435 3177 if (cur_idx <= cur->source_chunk() && cur->available() && cur->claim()) {
duke@435 3178 cm->save_for_processing(cur_idx);
duke@435 3179 }
duke@435 3180 }
duke@435 3181 }
duke@435 3182
duke@435 3183 size_t PSParallelCompact::next_src_chunk(MoveAndUpdateClosure& closure,
duke@435 3184 SpaceId& src_space_id,
duke@435 3185 HeapWord*& src_space_top,
duke@435 3186 HeapWord* end_addr)
duke@435 3187 {
duke@435 3188 typedef ParallelCompactData::ChunkData ChunkData;
duke@435 3189
duke@435 3190 ParallelCompactData& sd = PSParallelCompact::summary_data();
duke@435 3191 const size_t chunk_size = ParallelCompactData::ChunkSize;
duke@435 3192
duke@435 3193 size_t src_chunk_idx = 0;
duke@435 3194
duke@435 3195 // Skip empty chunks (if any) up to the top of the space.
duke@435 3196 HeapWord* const src_aligned_up = sd.chunk_align_up(end_addr);
duke@435 3197 ChunkData* src_chunk_ptr = sd.addr_to_chunk_ptr(src_aligned_up);
duke@435 3198 HeapWord* const top_aligned_up = sd.chunk_align_up(src_space_top);
duke@435 3199 const ChunkData* const top_chunk_ptr = sd.addr_to_chunk_ptr(top_aligned_up);
duke@435 3200 while (src_chunk_ptr < top_chunk_ptr && src_chunk_ptr->data_size() == 0) {
duke@435 3201 ++src_chunk_ptr;
duke@435 3202 }
duke@435 3203
duke@435 3204 if (src_chunk_ptr < top_chunk_ptr) {
duke@435 3205 // The next source chunk is in the current space. Update src_chunk_idx and
duke@435 3206 // the source address to match src_chunk_ptr.
duke@435 3207 src_chunk_idx = sd.chunk(src_chunk_ptr);
duke@435 3208 HeapWord* const src_chunk_addr = sd.chunk_to_addr(src_chunk_idx);
duke@435 3209 if (src_chunk_addr > closure.source()) {
duke@435 3210 closure.set_source(src_chunk_addr);
duke@435 3211 }
duke@435 3212 return src_chunk_idx;
duke@435 3213 }
duke@435 3214
duke@435 3215 // Switch to a new source space and find the first non-empty chunk.
duke@435 3216 unsigned int space_id = src_space_id + 1;
duke@435 3217 assert(space_id < last_space_id, "not enough spaces");
duke@435 3218
duke@435 3219 HeapWord* const destination = closure.destination();
duke@435 3220
duke@435 3221 do {
duke@435 3222 MutableSpace* space = _space_info[space_id].space();
duke@435 3223 HeapWord* const bottom = space->bottom();
duke@435 3224 const ChunkData* const bottom_cp = sd.addr_to_chunk_ptr(bottom);
duke@435 3225
duke@435 3226 // Iterate over the spaces that do not compact into themselves.
duke@435 3227 if (bottom_cp->destination() != bottom) {
duke@435 3228 HeapWord* const top_aligned_up = sd.chunk_align_up(space->top());
duke@435 3229 const ChunkData* const top_cp = sd.addr_to_chunk_ptr(top_aligned_up);
duke@435 3230
duke@435 3231 for (const ChunkData* src_cp = bottom_cp; src_cp < top_cp; ++src_cp) {
duke@435 3232 if (src_cp->live_obj_size() > 0) {
duke@435 3233 // Found it.
duke@435 3234 assert(src_cp->destination() == destination,
duke@435 3235 "first live obj in the space must match the destination");
duke@435 3236 assert(src_cp->partial_obj_size() == 0,
duke@435 3237 "a space cannot begin with a partial obj");
duke@435 3238
duke@435 3239 src_space_id = SpaceId(space_id);
duke@435 3240 src_space_top = space->top();
duke@435 3241 const size_t src_chunk_idx = sd.chunk(src_cp);
duke@435 3242 closure.set_source(sd.chunk_to_addr(src_chunk_idx));
duke@435 3243 return src_chunk_idx;
duke@435 3244 } else {
duke@435 3245 assert(src_cp->data_size() == 0, "sanity");
duke@435 3246 }
duke@435 3247 }
duke@435 3248 }
duke@435 3249 } while (++space_id < last_space_id);
duke@435 3250
duke@435 3251 assert(false, "no source chunk was found");
duke@435 3252 return 0;
duke@435 3253 }
duke@435 3254
duke@435 3255 void PSParallelCompact::fill_chunk(ParCompactionManager* cm, size_t chunk_idx)
duke@435 3256 {
duke@435 3257 typedef ParMarkBitMap::IterationStatus IterationStatus;
duke@435 3258 const size_t ChunkSize = ParallelCompactData::ChunkSize;
duke@435 3259 ParMarkBitMap* const bitmap = mark_bitmap();
duke@435 3260 ParallelCompactData& sd = summary_data();
duke@435 3261 ChunkData* const chunk_ptr = sd.chunk(chunk_idx);
duke@435 3262
duke@435 3263 // Get the items needed to construct the closure.
duke@435 3264 HeapWord* dest_addr = sd.chunk_to_addr(chunk_idx);
duke@435 3265 SpaceId dest_space_id = space_id(dest_addr);
duke@435 3266 ObjectStartArray* start_array = _space_info[dest_space_id].start_array();
duke@435 3267 HeapWord* new_top = _space_info[dest_space_id].new_top();
duke@435 3268 assert(dest_addr < new_top, "sanity");
duke@435 3269 const size_t words = MIN2(pointer_delta(new_top, dest_addr), ChunkSize);
duke@435 3270
duke@435 3271 // Get the source chunk and related info.
duke@435 3272 size_t src_chunk_idx = chunk_ptr->source_chunk();
duke@435 3273 SpaceId src_space_id = space_id(sd.chunk_to_addr(src_chunk_idx));
duke@435 3274 HeapWord* src_space_top = _space_info[src_space_id].space()->top();
duke@435 3275
duke@435 3276 MoveAndUpdateClosure closure(bitmap, cm, start_array, dest_addr, words);
duke@435 3277 closure.set_source(first_src_addr(dest_addr, src_chunk_idx));
duke@435 3278
duke@435 3279 // Adjust src_chunk_idx to prepare for decrementing destination counts (the
duke@435 3280 // destination count is not decremented when a chunk is copied to itself).
duke@435 3281 if (src_chunk_idx == chunk_idx) {
duke@435 3282 src_chunk_idx += 1;
duke@435 3283 }
duke@435 3284
duke@435 3285 if (bitmap->is_unmarked(closure.source())) {
duke@435 3286 // The first source word is in the middle of an object; copy the remainder
duke@435 3287 // of the object or as much as will fit. The fact that pointer updates were
duke@435 3288 // deferred will be noted when the object header is processed.
duke@435 3289 HeapWord* const old_src_addr = closure.source();
duke@435 3290 closure.copy_partial_obj();
duke@435 3291 if (closure.is_full()) {
duke@435 3292 decrement_destination_counts(cm, src_chunk_idx, closure.source());
duke@435 3293 chunk_ptr->set_deferred_obj_addr(NULL);
duke@435 3294 chunk_ptr->set_completed();
duke@435 3295 return;
duke@435 3296 }
duke@435 3297
duke@435 3298 HeapWord* const end_addr = sd.chunk_align_down(closure.source());
duke@435 3299 if (sd.chunk_align_down(old_src_addr) != end_addr) {
duke@435 3300 // The partial object was copied from more than one source chunk.
duke@435 3301 decrement_destination_counts(cm, src_chunk_idx, end_addr);
duke@435 3302
duke@435 3303 // Move to the next source chunk, possibly switching spaces as well. All
duke@435 3304 // args except end_addr may be modified.
duke@435 3305 src_chunk_idx = next_src_chunk(closure, src_space_id, src_space_top,
duke@435 3306 end_addr);
duke@435 3307 }
duke@435 3308 }
duke@435 3309
duke@435 3310 do {
duke@435 3311 HeapWord* const cur_addr = closure.source();
duke@435 3312 HeapWord* const end_addr = MIN2(sd.chunk_align_up(cur_addr + 1),
duke@435 3313 src_space_top);
duke@435 3314 IterationStatus status = bitmap->iterate(&closure, cur_addr, end_addr);
duke@435 3315
duke@435 3316 if (status == ParMarkBitMap::incomplete) {
duke@435 3317 // The last obj that starts in the source chunk does not end in the chunk.
duke@435 3318 assert(closure.source() < end_addr, "sanity")
duke@435 3319 HeapWord* const obj_beg = closure.source();
duke@435 3320 HeapWord* const range_end = MIN2(obj_beg + closure.words_remaining(),
duke@435 3321 src_space_top);
duke@435 3322 HeapWord* const obj_end = bitmap->find_obj_end(obj_beg, range_end);
duke@435 3323 if (obj_end < range_end) {
duke@435 3324 // The end was found; the entire object will fit.
duke@435 3325 status = closure.do_addr(obj_beg, bitmap->obj_size(obj_beg, obj_end));
duke@435 3326 assert(status != ParMarkBitMap::would_overflow, "sanity");
duke@435 3327 } else {
duke@435 3328 // The end was not found; the object will not fit.
duke@435 3329 assert(range_end < src_space_top, "obj cannot cross space boundary");
duke@435 3330 status = ParMarkBitMap::would_overflow;
duke@435 3331 }
duke@435 3332 }
duke@435 3333
duke@435 3334 if (status == ParMarkBitMap::would_overflow) {
duke@435 3335 // The last object did not fit. Note that interior oop updates were
duke@435 3336 // deferred, then copy enough of the object to fill the chunk.
duke@435 3337 chunk_ptr->set_deferred_obj_addr(closure.destination());
duke@435 3338 status = closure.copy_until_full(); // copies from closure.source()
duke@435 3339
duke@435 3340 decrement_destination_counts(cm, src_chunk_idx, closure.source());
duke@435 3341 chunk_ptr->set_completed();
duke@435 3342 return;
duke@435 3343 }
duke@435 3344
duke@435 3345 if (status == ParMarkBitMap::full) {
duke@435 3346 decrement_destination_counts(cm, src_chunk_idx, closure.source());
duke@435 3347 chunk_ptr->set_deferred_obj_addr(NULL);
duke@435 3348 chunk_ptr->set_completed();
duke@435 3349 return;
duke@435 3350 }
duke@435 3351
duke@435 3352 decrement_destination_counts(cm, src_chunk_idx, end_addr);
duke@435 3353
duke@435 3354 // Move to the next source chunk, possibly switching spaces as well. All
duke@435 3355 // args except end_addr may be modified.
duke@435 3356 src_chunk_idx = next_src_chunk(closure, src_space_id, src_space_top,
duke@435 3357 end_addr);
duke@435 3358 } while (true);
duke@435 3359 }
duke@435 3360
duke@435 3361 void
duke@435 3362 PSParallelCompact::move_and_update(ParCompactionManager* cm, SpaceId space_id) {
duke@435 3363 const MutableSpace* sp = space(space_id);
duke@435 3364 if (sp->is_empty()) {
duke@435 3365 return;
duke@435 3366 }
duke@435 3367
duke@435 3368 ParallelCompactData& sd = PSParallelCompact::summary_data();
duke@435 3369 ParMarkBitMap* const bitmap = mark_bitmap();
duke@435 3370 HeapWord* const dp_addr = dense_prefix(space_id);
duke@435 3371 HeapWord* beg_addr = sp->bottom();
duke@435 3372 HeapWord* end_addr = sp->top();
duke@435 3373
duke@435 3374 #ifdef ASSERT
duke@435 3375 assert(beg_addr <= dp_addr && dp_addr <= end_addr, "bad dense prefix");
duke@435 3376 if (cm->should_verify_only()) {
duke@435 3377 VerifyUpdateClosure verify_update(cm, sp);
duke@435 3378 bitmap->iterate(&verify_update, beg_addr, end_addr);
duke@435 3379 return;
duke@435 3380 }
duke@435 3381
duke@435 3382 if (cm->should_reset_only()) {
duke@435 3383 ResetObjectsClosure reset_objects(cm);
duke@435 3384 bitmap->iterate(&reset_objects, beg_addr, end_addr);
duke@435 3385 return;
duke@435 3386 }
duke@435 3387 #endif
duke@435 3388
duke@435 3389 const size_t beg_chunk = sd.addr_to_chunk_idx(beg_addr);
duke@435 3390 const size_t dp_chunk = sd.addr_to_chunk_idx(dp_addr);
duke@435 3391 if (beg_chunk < dp_chunk) {
duke@435 3392 update_and_deadwood_in_dense_prefix(cm, space_id, beg_chunk, dp_chunk);
duke@435 3393 }
duke@435 3394
duke@435 3395 // The destination of the first live object that starts in the chunk is one
duke@435 3396 // past the end of the partial object entering the chunk (if any).
duke@435 3397 HeapWord* const dest_addr = sd.partial_obj_end(dp_chunk);
duke@435 3398 HeapWord* const new_top = _space_info[space_id].new_top();
duke@435 3399 assert(new_top >= dest_addr, "bad new_top value");
duke@435 3400 const size_t words = pointer_delta(new_top, dest_addr);
duke@435 3401
duke@435 3402 if (words > 0) {
duke@435 3403 ObjectStartArray* start_array = _space_info[space_id].start_array();
duke@435 3404 MoveAndUpdateClosure closure(bitmap, cm, start_array, dest_addr, words);
duke@435 3405
duke@435 3406 ParMarkBitMap::IterationStatus status;
duke@435 3407 status = bitmap->iterate(&closure, dest_addr, end_addr);
duke@435 3408 assert(status == ParMarkBitMap::full, "iteration not complete");
duke@435 3409 assert(bitmap->find_obj_beg(closure.source(), end_addr) == end_addr,
duke@435 3410 "live objects skipped because closure is full");
duke@435 3411 }
duke@435 3412 }
duke@435 3413
duke@435 3414 jlong PSParallelCompact::millis_since_last_gc() {
duke@435 3415 jlong ret_val = os::javaTimeMillis() - _time_of_last_gc;
duke@435 3416 // XXX See note in genCollectedHeap::millis_since_last_gc().
duke@435 3417 if (ret_val < 0) {
duke@435 3418 NOT_PRODUCT(warning("time warp: %d", ret_val);)
duke@435 3419 return 0;
duke@435 3420 }
duke@435 3421 return ret_val;
duke@435 3422 }
duke@435 3423
duke@435 3424 void PSParallelCompact::reset_millis_since_last_gc() {
duke@435 3425 _time_of_last_gc = os::javaTimeMillis();
duke@435 3426 }
duke@435 3427
duke@435 3428 ParMarkBitMap::IterationStatus MoveAndUpdateClosure::copy_until_full()
duke@435 3429 {
duke@435 3430 if (source() != destination()) {
duke@435 3431 assert(source() > destination(), "must copy to the left");
duke@435 3432 Copy::aligned_conjoint_words(source(), destination(), words_remaining());
duke@435 3433 }
duke@435 3434 update_state(words_remaining());
duke@435 3435 assert(is_full(), "sanity");
duke@435 3436 return ParMarkBitMap::full;
duke@435 3437 }
duke@435 3438
duke@435 3439 void MoveAndUpdateClosure::copy_partial_obj()
duke@435 3440 {
duke@435 3441 size_t words = words_remaining();
duke@435 3442
duke@435 3443 HeapWord* const range_end = MIN2(source() + words, bitmap()->region_end());
duke@435 3444 HeapWord* const end_addr = bitmap()->find_obj_end(source(), range_end);
duke@435 3445 if (end_addr < range_end) {
duke@435 3446 words = bitmap()->obj_size(source(), end_addr);
duke@435 3447 }
duke@435 3448
duke@435 3449 // This test is necessary; if omitted, the pointer updates to a partial object
duke@435 3450 // that crosses the dense prefix boundary could be overwritten.
duke@435 3451 if (source() != destination()) {
duke@435 3452 assert(source() > destination(), "must copy to the left");
duke@435 3453 Copy::aligned_conjoint_words(source(), destination(), words);
duke@435 3454 }
duke@435 3455 update_state(words);
duke@435 3456 }
duke@435 3457
duke@435 3458 ParMarkBitMapClosure::IterationStatus
duke@435 3459 MoveAndUpdateClosure::do_addr(HeapWord* addr, size_t words) {
duke@435 3460 assert(destination() != NULL, "sanity");
duke@435 3461 assert(bitmap()->obj_size(addr) == words, "bad size");
duke@435 3462
duke@435 3463 _source = addr;
duke@435 3464 assert(PSParallelCompact::summary_data().calc_new_pointer(source()) ==
duke@435 3465 destination(), "wrong destination");
duke@435 3466
duke@435 3467 if (words > words_remaining()) {
duke@435 3468 return ParMarkBitMap::would_overflow;
duke@435 3469 }
duke@435 3470
duke@435 3471 // The start_array must be updated even if the object is not moving.
duke@435 3472 if (_start_array != NULL) {
duke@435 3473 _start_array->allocate_block(destination());
duke@435 3474 }
duke@435 3475
duke@435 3476 if (destination() != source()) {
duke@435 3477 assert(destination() < source(), "must copy to the left");
duke@435 3478 Copy::aligned_conjoint_words(source(), destination(), words);
duke@435 3479 }
duke@435 3480
duke@435 3481 oop moved_oop = (oop) destination();
duke@435 3482 moved_oop->update_contents(compaction_manager());
duke@435 3483 assert(moved_oop->is_oop_or_null(), "Object should be whole at this point");
duke@435 3484
duke@435 3485 update_state(words);
duke@435 3486 assert(destination() == (HeapWord*)moved_oop + moved_oop->size(), "sanity");
duke@435 3487 return is_full() ? ParMarkBitMap::full : ParMarkBitMap::incomplete;
duke@435 3488 }
duke@435 3489
duke@435 3490 UpdateOnlyClosure::UpdateOnlyClosure(ParMarkBitMap* mbm,
duke@435 3491 ParCompactionManager* cm,
duke@435 3492 PSParallelCompact::SpaceId space_id) :
duke@435 3493 ParMarkBitMapClosure(mbm, cm),
duke@435 3494 _space_id(space_id),
duke@435 3495 _start_array(PSParallelCompact::start_array(space_id))
duke@435 3496 {
duke@435 3497 }
duke@435 3498
duke@435 3499 // Updates the references in the object to their new values.
duke@435 3500 ParMarkBitMapClosure::IterationStatus
duke@435 3501 UpdateOnlyClosure::do_addr(HeapWord* addr, size_t words) {
duke@435 3502 do_addr(addr);
duke@435 3503 return ParMarkBitMap::incomplete;
duke@435 3504 }
duke@435 3505
duke@435 3506 BitBlockUpdateClosure::BitBlockUpdateClosure(ParMarkBitMap* mbm,
duke@435 3507 ParCompactionManager* cm,
duke@435 3508 size_t chunk_index) :
duke@435 3509 ParMarkBitMapClosure(mbm, cm),
duke@435 3510 _live_data_left(0),
duke@435 3511 _cur_block(0) {
duke@435 3512 _chunk_start =
duke@435 3513 PSParallelCompact::summary_data().chunk_to_addr(chunk_index);
duke@435 3514 _chunk_end =
duke@435 3515 PSParallelCompact::summary_data().chunk_to_addr(chunk_index) +
duke@435 3516 ParallelCompactData::ChunkSize;
duke@435 3517 _chunk_index = chunk_index;
duke@435 3518 _cur_block =
duke@435 3519 PSParallelCompact::summary_data().addr_to_block_idx(_chunk_start);
duke@435 3520 }
duke@435 3521
duke@435 3522 bool BitBlockUpdateClosure::chunk_contains_cur_block() {
duke@435 3523 return ParallelCompactData::chunk_contains_block(_chunk_index, _cur_block);
duke@435 3524 }
duke@435 3525
duke@435 3526 void BitBlockUpdateClosure::reset_chunk(size_t chunk_index) {
duke@435 3527 DEBUG_ONLY(ParallelCompactData::BlockData::set_cur_phase(7);)
duke@435 3528 ParallelCompactData& sd = PSParallelCompact::summary_data();
duke@435 3529 _chunk_index = chunk_index;
duke@435 3530 _live_data_left = 0;
duke@435 3531 _chunk_start = sd.chunk_to_addr(chunk_index);
duke@435 3532 _chunk_end = sd.chunk_to_addr(chunk_index) + ParallelCompactData::ChunkSize;
duke@435 3533
duke@435 3534 // The first block in this chunk
duke@435 3535 size_t first_block = sd.addr_to_block_idx(_chunk_start);
duke@435 3536 size_t partial_live_size = sd.chunk(chunk_index)->partial_obj_size();
duke@435 3537
duke@435 3538 // Set the offset to 0. By definition it should have that value
duke@435 3539 // but it may have been written while processing an earlier chunk.
duke@435 3540 if (partial_live_size == 0) {
duke@435 3541 // No live object extends onto the chunk. The first bit
duke@435 3542 // in the bit map for the first chunk must be a start bit.
duke@435 3543 // Although there may not be any marked bits, it is safe
duke@435 3544 // to set it as a start bit.
duke@435 3545 sd.block(first_block)->set_start_bit_offset(0);
duke@435 3546 sd.block(first_block)->set_first_is_start_bit(true);
duke@435 3547 } else if (sd.partial_obj_ends_in_block(first_block)) {
duke@435 3548 sd.block(first_block)->set_end_bit_offset(0);
duke@435 3549 sd.block(first_block)->set_first_is_start_bit(false);
duke@435 3550 } else {
duke@435 3551 // The partial object extends beyond the first block.
duke@435 3552 // There is no object starting in the first block
duke@435 3553 // so the offset and bit parity are not needed.
duke@435 3554 // Set the the bit parity to start bit so assertions
duke@435 3555 // work when not bit is found.
duke@435 3556 sd.block(first_block)->set_end_bit_offset(0);
duke@435 3557 sd.block(first_block)->set_first_is_start_bit(false);
duke@435 3558 }
duke@435 3559 _cur_block = first_block;
duke@435 3560 #ifdef ASSERT
duke@435 3561 if (sd.block(first_block)->first_is_start_bit()) {
duke@435 3562 assert(!sd.partial_obj_ends_in_block(first_block),
duke@435 3563 "Partial object cannot end in first block");
duke@435 3564 }
duke@435 3565
duke@435 3566 if (PrintGCDetails && Verbose) {
duke@435 3567 if (partial_live_size == 1) {
duke@435 3568 gclog_or_tty->print_cr("first_block " PTR_FORMAT
duke@435 3569 " _offset " PTR_FORMAT
duke@435 3570 " _first_is_start_bit %d",
duke@435 3571 first_block,
duke@435 3572 sd.block(first_block)->raw_offset(),
duke@435 3573 sd.block(first_block)->first_is_start_bit());
duke@435 3574 }
duke@435 3575 }
duke@435 3576 #endif
duke@435 3577 DEBUG_ONLY(ParallelCompactData::BlockData::set_cur_phase(17);)
duke@435 3578 }
duke@435 3579
duke@435 3580 // This method is called when a object has been found (both beginning
duke@435 3581 // and end of the object) in the range of iteration. This method is
duke@435 3582 // calculating the words of live data to the left of a block. That live
duke@435 3583 // data includes any object starting to the left of the block (i.e.,
duke@435 3584 // the live-data-to-the-left of block AAA will include the full size
duke@435 3585 // of any object entering AAA).
duke@435 3586
duke@435 3587 ParMarkBitMapClosure::IterationStatus
duke@435 3588 BitBlockUpdateClosure::do_addr(HeapWord* addr, size_t words) {
duke@435 3589 // add the size to the block data.
duke@435 3590 HeapWord* obj = addr;
duke@435 3591 ParallelCompactData& sd = PSParallelCompact::summary_data();
duke@435 3592
duke@435 3593 assert(bitmap()->obj_size(obj) == words, "bad size");
duke@435 3594 assert(_chunk_start <= obj, "object is not in chunk");
duke@435 3595 assert(obj + words <= _chunk_end, "object is not in chunk");
duke@435 3596
duke@435 3597 // Update the live data to the left
duke@435 3598 size_t prev_live_data_left = _live_data_left;
duke@435 3599 _live_data_left = _live_data_left + words;
duke@435 3600
duke@435 3601 // Is this object in the current block.
duke@435 3602 size_t block_of_obj = sd.addr_to_block_idx(obj);
duke@435 3603 size_t block_of_obj_last = sd.addr_to_block_idx(obj + words - 1);
duke@435 3604 HeapWord* block_of_obj_last_addr = sd.block_to_addr(block_of_obj_last);
duke@435 3605 if (_cur_block < block_of_obj) {
duke@435 3606
duke@435 3607 //
duke@435 3608 // No object crossed the block boundary and this object was found
duke@435 3609 // on the other side of the block boundary. Update the offset for
duke@435 3610 // the new block with the data size that does not include this object.
duke@435 3611 //
duke@435 3612 // The first bit in block_of_obj is a start bit except in the
duke@435 3613 // case where the partial object for the chunk extends into
duke@435 3614 // this block.
duke@435 3615 if (sd.partial_obj_ends_in_block(block_of_obj)) {
duke@435 3616 sd.block(block_of_obj)->set_end_bit_offset(prev_live_data_left);
duke@435 3617 } else {
duke@435 3618 sd.block(block_of_obj)->set_start_bit_offset(prev_live_data_left);
duke@435 3619 }
duke@435 3620
duke@435 3621 // Does this object pass beyond the its block?
duke@435 3622 if (block_of_obj < block_of_obj_last) {
duke@435 3623 // Object crosses block boundary. Two blocks need to be udpated:
duke@435 3624 // the current block where the object started
duke@435 3625 // the block where the object ends
duke@435 3626 //
duke@435 3627 // The offset for blocks with no objects starting in them
duke@435 3628 // (e.g., blocks between _cur_block and block_of_obj_last)
duke@435 3629 // should not be needed.
duke@435 3630 // Note that block_of_obj_last may be in another chunk. If so,
duke@435 3631 // it should be overwritten later. This is a problem (writting
duke@435 3632 // into a block in a later chunk) for parallel execution.
duke@435 3633 assert(obj < block_of_obj_last_addr,
duke@435 3634 "Object should start in previous block");
duke@435 3635
duke@435 3636 // obj is crossing into block_of_obj_last so the first bit
duke@435 3637 // is and end bit.
duke@435 3638 sd.block(block_of_obj_last)->set_end_bit_offset(_live_data_left);
duke@435 3639
duke@435 3640 _cur_block = block_of_obj_last;
duke@435 3641 } else {
duke@435 3642 // _first_is_start_bit has already been set correctly
duke@435 3643 // in the if-then-else above so don't reset it here.
duke@435 3644 _cur_block = block_of_obj;
duke@435 3645 }
duke@435 3646 } else {
duke@435 3647 // The current block only changes if the object extends beyound
duke@435 3648 // the block it starts in.
duke@435 3649 //
duke@435 3650 // The object starts in the current block.
duke@435 3651 // Does this object pass beyond the end of it?
duke@435 3652 if (block_of_obj < block_of_obj_last) {
duke@435 3653 // Object crosses block boundary.
duke@435 3654 // See note above on possible blocks between block_of_obj and
duke@435 3655 // block_of_obj_last
duke@435 3656 assert(obj < block_of_obj_last_addr,
duke@435 3657 "Object should start in previous block");
duke@435 3658
duke@435 3659 sd.block(block_of_obj_last)->set_end_bit_offset(_live_data_left);
duke@435 3660
duke@435 3661 _cur_block = block_of_obj_last;
duke@435 3662 }
duke@435 3663 }
duke@435 3664
duke@435 3665 // Return incomplete if there are more blocks to be done.
duke@435 3666 if (chunk_contains_cur_block()) {
duke@435 3667 return ParMarkBitMap::incomplete;
duke@435 3668 }
duke@435 3669 return ParMarkBitMap::complete;
duke@435 3670 }
duke@435 3671
duke@435 3672 // Verify the new location using the forwarding pointer
duke@435 3673 // from MarkSweep::mark_sweep_phase2(). Set the mark_word
duke@435 3674 // to the initial value.
duke@435 3675 ParMarkBitMapClosure::IterationStatus
duke@435 3676 PSParallelCompact::VerifyUpdateClosure::do_addr(HeapWord* addr, size_t words) {
duke@435 3677 // The second arg (words) is not used.
duke@435 3678 oop obj = (oop) addr;
duke@435 3679 HeapWord* forwarding_ptr = (HeapWord*) obj->mark()->decode_pointer();
duke@435 3680 HeapWord* new_pointer = summary_data().calc_new_pointer(obj);
duke@435 3681 if (forwarding_ptr == NULL) {
duke@435 3682 // The object is dead or not moving.
duke@435 3683 assert(bitmap()->is_unmarked(obj) || (new_pointer == (HeapWord*) obj),
duke@435 3684 "Object liveness is wrong.");
duke@435 3685 return ParMarkBitMap::incomplete;
duke@435 3686 }
duke@435 3687 assert(UseParallelOldGCDensePrefix ||
duke@435 3688 (HeapMaximumCompactionInterval > 1) ||
duke@435 3689 (MarkSweepAlwaysCompactCount > 1) ||
duke@435 3690 (forwarding_ptr == new_pointer),
duke@435 3691 "Calculation of new location is incorrect");
duke@435 3692 return ParMarkBitMap::incomplete;
duke@435 3693 }
duke@435 3694
duke@435 3695 // Reset objects modified for debug checking.
duke@435 3696 ParMarkBitMapClosure::IterationStatus
duke@435 3697 PSParallelCompact::ResetObjectsClosure::do_addr(HeapWord* addr, size_t words) {
duke@435 3698 // The second arg (words) is not used.
duke@435 3699 oop obj = (oop) addr;
duke@435 3700 obj->init_mark();
duke@435 3701 return ParMarkBitMap::incomplete;
duke@435 3702 }
duke@435 3703
duke@435 3704 // Prepare for compaction. This method is executed once
duke@435 3705 // (i.e., by a single thread) before compaction.
duke@435 3706 // Save the updated location of the intArrayKlassObj for
duke@435 3707 // filling holes in the dense prefix.
duke@435 3708 void PSParallelCompact::compact_prologue() {
duke@435 3709 _updated_int_array_klass_obj = (klassOop)
duke@435 3710 summary_data().calc_new_pointer(Universe::intArrayKlassObj());
duke@435 3711 }
duke@435 3712
duke@435 3713 // The initial implementation of this method created a field
duke@435 3714 // _next_compaction_space_id in SpaceInfo and initialized
duke@435 3715 // that field in SpaceInfo::initialize_space_info(). That
duke@435 3716 // required that _next_compaction_space_id be declared a
duke@435 3717 // SpaceId in SpaceInfo and that would have required that
duke@435 3718 // either SpaceId be declared in a separate class or that
duke@435 3719 // it be declared in SpaceInfo. It didn't seem consistent
duke@435 3720 // to declare it in SpaceInfo (didn't really fit logically).
duke@435 3721 // Alternatively, defining a separate class to define SpaceId
duke@435 3722 // seem excessive. This implementation is simple and localizes
duke@435 3723 // the knowledge.
duke@435 3724
duke@435 3725 PSParallelCompact::SpaceId
duke@435 3726 PSParallelCompact::next_compaction_space_id(SpaceId id) {
duke@435 3727 assert(id < last_space_id, "id out of range");
duke@435 3728 switch (id) {
duke@435 3729 case perm_space_id :
duke@435 3730 return last_space_id;
duke@435 3731 case old_space_id :
duke@435 3732 return eden_space_id;
duke@435 3733 case eden_space_id :
duke@435 3734 return from_space_id;
duke@435 3735 case from_space_id :
duke@435 3736 return to_space_id;
duke@435 3737 case to_space_id :
duke@435 3738 return last_space_id;
duke@435 3739 default:
duke@435 3740 assert(false, "Bad space id");
duke@435 3741 return last_space_id;
duke@435 3742 }
duke@435 3743 }
duke@435 3744
duke@435 3745 // Here temporarily for debugging
duke@435 3746 #ifdef ASSERT
duke@435 3747 size_t ParallelCompactData::block_idx(BlockData* block) {
duke@435 3748 size_t index = pointer_delta(block,
duke@435 3749 PSParallelCompact::summary_data()._block_data, sizeof(BlockData));
duke@435 3750 return index;
duke@435 3751 }
duke@435 3752 #endif

mercurial