src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp

Thu, 20 Nov 2008 16:56:09 -0800

author
ysr
date
Thu, 20 Nov 2008 16:56:09 -0800
changeset 888
c96030fff130
parent 811
0166ac265d53
child 892
27a80744a83b
permissions
-rw-r--r--

6684579: SoftReference processing can be made more efficient
Summary: For current soft-ref clearing policies, we can decide at marking time if a soft-reference will definitely not be cleared, postponing the decision of whether it will definitely be cleared to the final reference processing phase. This can be especially beneficial in the case of concurrent collectors where the marking is usually concurrent but reference processing is usually not.
Reviewed-by: jmasa

duke@435 1 /*
xdono@631 2 * Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
duke@435 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
duke@435 20 * CA 95054 USA or visit www.sun.com if you need additional information or
duke@435 21 * have any questions.
duke@435 22 *
duke@435 23 */
duke@435 24
duke@435 25 #include "incls/_precompiled.incl"
duke@435 26 #include "incls/_psParallelCompact.cpp.incl"
duke@435 27
duke@435 28 #include <math.h>
duke@435 29
duke@435 30 // All sizes are in HeapWords.
jcoomes@810 31 const size_t ParallelCompactData::Log2RegionSize = 9; // 512 words
jcoomes@810 32 const size_t ParallelCompactData::RegionSize = (size_t)1 << Log2RegionSize;
jcoomes@810 33 const size_t ParallelCompactData::RegionSizeBytes =
jcoomes@810 34 RegionSize << LogHeapWordSize;
jcoomes@810 35 const size_t ParallelCompactData::RegionSizeOffsetMask = RegionSize - 1;
jcoomes@810 36 const size_t ParallelCompactData::RegionAddrOffsetMask = RegionSizeBytes - 1;
jcoomes@810 37 const size_t ParallelCompactData::RegionAddrMask = ~RegionAddrOffsetMask;
duke@435 38
jcoomes@810 39 const ParallelCompactData::RegionData::region_sz_t
jcoomes@810 40 ParallelCompactData::RegionData::dc_shift = 27;
jcoomes@810 41
jcoomes@810 42 const ParallelCompactData::RegionData::region_sz_t
jcoomes@810 43 ParallelCompactData::RegionData::dc_mask = ~0U << dc_shift;
jcoomes@810 44
jcoomes@810 45 const ParallelCompactData::RegionData::region_sz_t
jcoomes@810 46 ParallelCompactData::RegionData::dc_one = 0x1U << dc_shift;
jcoomes@810 47
jcoomes@810 48 const ParallelCompactData::RegionData::region_sz_t
jcoomes@810 49 ParallelCompactData::RegionData::los_mask = ~dc_mask;
jcoomes@810 50
jcoomes@810 51 const ParallelCompactData::RegionData::region_sz_t
jcoomes@810 52 ParallelCompactData::RegionData::dc_claimed = 0x8U << dc_shift;
jcoomes@810 53
jcoomes@810 54 const ParallelCompactData::RegionData::region_sz_t
jcoomes@810 55 ParallelCompactData::RegionData::dc_completed = 0xcU << dc_shift;
duke@435 56
duke@435 57 SpaceInfo PSParallelCompact::_space_info[PSParallelCompact::last_space_id];
duke@435 58 bool PSParallelCompact::_print_phases = false;
duke@435 59
duke@435 60 ReferenceProcessor* PSParallelCompact::_ref_processor = NULL;
duke@435 61 klassOop PSParallelCompact::_updated_int_array_klass_obj = NULL;
duke@435 62
duke@435 63 double PSParallelCompact::_dwl_mean;
duke@435 64 double PSParallelCompact::_dwl_std_dev;
duke@435 65 double PSParallelCompact::_dwl_first_term;
duke@435 66 double PSParallelCompact::_dwl_adjustment;
duke@435 67 #ifdef ASSERT
duke@435 68 bool PSParallelCompact::_dwl_initialized = false;
duke@435 69 #endif // #ifdef ASSERT
duke@435 70
duke@435 71 #ifdef VALIDATE_MARK_SWEEP
coleenp@548 72 GrowableArray<void*>* PSParallelCompact::_root_refs_stack = NULL;
duke@435 73 GrowableArray<oop> * PSParallelCompact::_live_oops = NULL;
duke@435 74 GrowableArray<oop> * PSParallelCompact::_live_oops_moved_to = NULL;
duke@435 75 GrowableArray<size_t>* PSParallelCompact::_live_oops_size = NULL;
duke@435 76 size_t PSParallelCompact::_live_oops_index = 0;
duke@435 77 size_t PSParallelCompact::_live_oops_index_at_perm = 0;
coleenp@548 78 GrowableArray<void*>* PSParallelCompact::_other_refs_stack = NULL;
coleenp@548 79 GrowableArray<void*>* PSParallelCompact::_adjusted_pointers = NULL;
duke@435 80 bool PSParallelCompact::_pointer_tracking = false;
duke@435 81 bool PSParallelCompact::_root_tracking = true;
duke@435 82
duke@435 83 GrowableArray<HeapWord*>* PSParallelCompact::_cur_gc_live_oops = NULL;
duke@435 84 GrowableArray<HeapWord*>* PSParallelCompact::_cur_gc_live_oops_moved_to = NULL;
duke@435 85 GrowableArray<size_t> * PSParallelCompact::_cur_gc_live_oops_size = NULL;
duke@435 86 GrowableArray<HeapWord*>* PSParallelCompact::_last_gc_live_oops = NULL;
duke@435 87 GrowableArray<HeapWord*>* PSParallelCompact::_last_gc_live_oops_moved_to = NULL;
duke@435 88 GrowableArray<size_t> * PSParallelCompact::_last_gc_live_oops_size = NULL;
duke@435 89 #endif
duke@435 90
duke@435 91 #ifndef PRODUCT
duke@435 92 const char* PSParallelCompact::space_names[] = {
duke@435 93 "perm", "old ", "eden", "from", "to "
duke@435 94 };
duke@435 95
jcoomes@810 96 void PSParallelCompact::print_region_ranges()
duke@435 97 {
duke@435 98 tty->print_cr("space bottom top end new_top");
duke@435 99 tty->print_cr("------ ---------- ---------- ---------- ----------");
duke@435 100
duke@435 101 for (unsigned int id = 0; id < last_space_id; ++id) {
duke@435 102 const MutableSpace* space = _space_info[id].space();
duke@435 103 tty->print_cr("%u %s "
jcoomes@699 104 SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10) " "
jcoomes@699 105 SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10) " ",
duke@435 106 id, space_names[id],
jcoomes@810 107 summary_data().addr_to_region_idx(space->bottom()),
jcoomes@810 108 summary_data().addr_to_region_idx(space->top()),
jcoomes@810 109 summary_data().addr_to_region_idx(space->end()),
jcoomes@810 110 summary_data().addr_to_region_idx(_space_info[id].new_top()));
duke@435 111 }
duke@435 112 }
duke@435 113
duke@435 114 void
jcoomes@810 115 print_generic_summary_region(size_t i, const ParallelCompactData::RegionData* c)
duke@435 116 {
jcoomes@810 117 #define REGION_IDX_FORMAT SIZE_FORMAT_W(7)
jcoomes@810 118 #define REGION_DATA_FORMAT SIZE_FORMAT_W(5)
duke@435 119
duke@435 120 ParallelCompactData& sd = PSParallelCompact::summary_data();
jcoomes@810 121 size_t dci = c->destination() ? sd.addr_to_region_idx(c->destination()) : 0;
jcoomes@810 122 tty->print_cr(REGION_IDX_FORMAT " " PTR_FORMAT " "
jcoomes@810 123 REGION_IDX_FORMAT " " PTR_FORMAT " "
jcoomes@810 124 REGION_DATA_FORMAT " " REGION_DATA_FORMAT " "
jcoomes@810 125 REGION_DATA_FORMAT " " REGION_IDX_FORMAT " %d",
duke@435 126 i, c->data_location(), dci, c->destination(),
duke@435 127 c->partial_obj_size(), c->live_obj_size(),
jcoomes@810 128 c->data_size(), c->source_region(), c->destination_count());
jcoomes@810 129
jcoomes@810 130 #undef REGION_IDX_FORMAT
jcoomes@810 131 #undef REGION_DATA_FORMAT
duke@435 132 }
duke@435 133
duke@435 134 void
duke@435 135 print_generic_summary_data(ParallelCompactData& summary_data,
duke@435 136 HeapWord* const beg_addr,
duke@435 137 HeapWord* const end_addr)
duke@435 138 {
duke@435 139 size_t total_words = 0;
jcoomes@810 140 size_t i = summary_data.addr_to_region_idx(beg_addr);
jcoomes@810 141 const size_t last = summary_data.addr_to_region_idx(end_addr);
duke@435 142 HeapWord* pdest = 0;
duke@435 143
duke@435 144 while (i <= last) {
jcoomes@810 145 ParallelCompactData::RegionData* c = summary_data.region(i);
duke@435 146 if (c->data_size() != 0 || c->destination() != pdest) {
jcoomes@810 147 print_generic_summary_region(i, c);
duke@435 148 total_words += c->data_size();
duke@435 149 pdest = c->destination();
duke@435 150 }
duke@435 151 ++i;
duke@435 152 }
duke@435 153
duke@435 154 tty->print_cr("summary_data_bytes=" SIZE_FORMAT, total_words * HeapWordSize);
duke@435 155 }
duke@435 156
duke@435 157 void
duke@435 158 print_generic_summary_data(ParallelCompactData& summary_data,
duke@435 159 SpaceInfo* space_info)
duke@435 160 {
duke@435 161 for (unsigned int id = 0; id < PSParallelCompact::last_space_id; ++id) {
duke@435 162 const MutableSpace* space = space_info[id].space();
duke@435 163 print_generic_summary_data(summary_data, space->bottom(),
duke@435 164 MAX2(space->top(), space_info[id].new_top()));
duke@435 165 }
duke@435 166 }
duke@435 167
duke@435 168 void
jcoomes@810 169 print_initial_summary_region(size_t i,
jcoomes@810 170 const ParallelCompactData::RegionData* c,
jcoomes@810 171 bool newline = true)
duke@435 172 {
jcoomes@699 173 tty->print(SIZE_FORMAT_W(5) " " PTR_FORMAT " "
jcoomes@699 174 SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " "
jcoomes@699 175 SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " %d",
duke@435 176 i, c->destination(),
duke@435 177 c->partial_obj_size(), c->live_obj_size(),
jcoomes@810 178 c->data_size(), c->source_region(), c->destination_count());
duke@435 179 if (newline) tty->cr();
duke@435 180 }
duke@435 181
duke@435 182 void
duke@435 183 print_initial_summary_data(ParallelCompactData& summary_data,
duke@435 184 const MutableSpace* space) {
duke@435 185 if (space->top() == space->bottom()) {
duke@435 186 return;
duke@435 187 }
duke@435 188
jcoomes@810 189 const size_t region_size = ParallelCompactData::RegionSize;
jcoomes@810 190 typedef ParallelCompactData::RegionData RegionData;
jcoomes@810 191 HeapWord* const top_aligned_up = summary_data.region_align_up(space->top());
jcoomes@810 192 const size_t end_region = summary_data.addr_to_region_idx(top_aligned_up);
jcoomes@810 193 const RegionData* c = summary_data.region(end_region - 1);
duke@435 194 HeapWord* end_addr = c->destination() + c->data_size();
duke@435 195 const size_t live_in_space = pointer_delta(end_addr, space->bottom());
duke@435 196
jcoomes@810 197 // Print (and count) the full regions at the beginning of the space.
jcoomes@810 198 size_t full_region_count = 0;
jcoomes@810 199 size_t i = summary_data.addr_to_region_idx(space->bottom());
jcoomes@810 200 while (i < end_region && summary_data.region(i)->data_size() == region_size) {
jcoomes@810 201 print_initial_summary_region(i, summary_data.region(i));
jcoomes@810 202 ++full_region_count;
duke@435 203 ++i;
duke@435 204 }
duke@435 205
jcoomes@810 206 size_t live_to_right = live_in_space - full_region_count * region_size;
duke@435 207
duke@435 208 double max_reclaimed_ratio = 0.0;
jcoomes@810 209 size_t max_reclaimed_ratio_region = 0;
duke@435 210 size_t max_dead_to_right = 0;
duke@435 211 size_t max_live_to_right = 0;
duke@435 212
jcoomes@810 213 // Print the 'reclaimed ratio' for regions while there is something live in
jcoomes@810 214 // the region or to the right of it. The remaining regions are empty (and
duke@435 215 // uninteresting), and computing the ratio will result in division by 0.
jcoomes@810 216 while (i < end_region && live_to_right > 0) {
jcoomes@810 217 c = summary_data.region(i);
jcoomes@810 218 HeapWord* const region_addr = summary_data.region_to_addr(i);
jcoomes@810 219 const size_t used_to_right = pointer_delta(space->top(), region_addr);
duke@435 220 const size_t dead_to_right = used_to_right - live_to_right;
duke@435 221 const double reclaimed_ratio = double(dead_to_right) / live_to_right;
duke@435 222
duke@435 223 if (reclaimed_ratio > max_reclaimed_ratio) {
duke@435 224 max_reclaimed_ratio = reclaimed_ratio;
jcoomes@810 225 max_reclaimed_ratio_region = i;
duke@435 226 max_dead_to_right = dead_to_right;
duke@435 227 max_live_to_right = live_to_right;
duke@435 228 }
duke@435 229
jcoomes@810 230 print_initial_summary_region(i, c, false);
jcoomes@699 231 tty->print_cr(" %12.10f " SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10),
duke@435 232 reclaimed_ratio, dead_to_right, live_to_right);
duke@435 233
duke@435 234 live_to_right -= c->data_size();
duke@435 235 ++i;
duke@435 236 }
duke@435 237
jcoomes@810 238 // Any remaining regions are empty. Print one more if there is one.
jcoomes@810 239 if (i < end_region) {
jcoomes@810 240 print_initial_summary_region(i, summary_data.region(i));
duke@435 241 }
duke@435 242
jcoomes@699 243 tty->print_cr("max: " SIZE_FORMAT_W(4) " d2r=" SIZE_FORMAT_W(10) " "
jcoomes@699 244 "l2r=" SIZE_FORMAT_W(10) " max_ratio=%14.12f",
jcoomes@810 245 max_reclaimed_ratio_region, max_dead_to_right,
duke@435 246 max_live_to_right, max_reclaimed_ratio);
duke@435 247 }
duke@435 248
duke@435 249 void
duke@435 250 print_initial_summary_data(ParallelCompactData& summary_data,
duke@435 251 SpaceInfo* space_info) {
duke@435 252 unsigned int id = PSParallelCompact::perm_space_id;
duke@435 253 const MutableSpace* space;
duke@435 254 do {
duke@435 255 space = space_info[id].space();
duke@435 256 print_initial_summary_data(summary_data, space);
duke@435 257 } while (++id < PSParallelCompact::eden_space_id);
duke@435 258
duke@435 259 do {
duke@435 260 space = space_info[id].space();
duke@435 261 print_generic_summary_data(summary_data, space->bottom(), space->top());
duke@435 262 } while (++id < PSParallelCompact::last_space_id);
duke@435 263 }
duke@435 264 #endif // #ifndef PRODUCT
duke@435 265
duke@435 266 #ifdef ASSERT
duke@435 267 size_t add_obj_count;
duke@435 268 size_t add_obj_size;
duke@435 269 size_t mark_bitmap_count;
duke@435 270 size_t mark_bitmap_size;
duke@435 271 #endif // #ifdef ASSERT
duke@435 272
duke@435 273 ParallelCompactData::ParallelCompactData()
duke@435 274 {
duke@435 275 _region_start = 0;
duke@435 276
jcoomes@810 277 _region_vspace = 0;
jcoomes@810 278 _region_data = 0;
jcoomes@810 279 _region_count = 0;
duke@435 280 }
duke@435 281
duke@435 282 bool ParallelCompactData::initialize(MemRegion covered_region)
duke@435 283 {
duke@435 284 _region_start = covered_region.start();
duke@435 285 const size_t region_size = covered_region.word_size();
duke@435 286 DEBUG_ONLY(_region_end = _region_start + region_size;)
duke@435 287
jcoomes@810 288 assert(region_align_down(_region_start) == _region_start,
duke@435 289 "region start not aligned");
jcoomes@810 290 assert((region_size & RegionSizeOffsetMask) == 0,
jcoomes@810 291 "region size not a multiple of RegionSize");
jcoomes@810 292
jcoomes@810 293 bool result = initialize_region_data(region_size);
duke@435 294
duke@435 295 return result;
duke@435 296 }
duke@435 297
duke@435 298 PSVirtualSpace*
duke@435 299 ParallelCompactData::create_vspace(size_t count, size_t element_size)
duke@435 300 {
duke@435 301 const size_t raw_bytes = count * element_size;
duke@435 302 const size_t page_sz = os::page_size_for_region(raw_bytes, raw_bytes, 10);
duke@435 303 const size_t granularity = os::vm_allocation_granularity();
duke@435 304 const size_t bytes = align_size_up(raw_bytes, MAX2(page_sz, granularity));
duke@435 305
duke@435 306 const size_t rs_align = page_sz == (size_t) os::vm_page_size() ? 0 :
duke@435 307 MAX2(page_sz, granularity);
jcoomes@514 308 ReservedSpace rs(bytes, rs_align, rs_align > 0);
duke@435 309 os::trace_page_sizes("par compact", raw_bytes, raw_bytes, page_sz, rs.base(),
duke@435 310 rs.size());
duke@435 311 PSVirtualSpace* vspace = new PSVirtualSpace(rs, page_sz);
duke@435 312 if (vspace != 0) {
duke@435 313 if (vspace->expand_by(bytes)) {
duke@435 314 return vspace;
duke@435 315 }
duke@435 316 delete vspace;
coleenp@672 317 // Release memory reserved in the space.
coleenp@672 318 rs.release();
duke@435 319 }
duke@435 320
duke@435 321 return 0;
duke@435 322 }
duke@435 323
jcoomes@810 324 bool ParallelCompactData::initialize_region_data(size_t region_size)
duke@435 325 {
jcoomes@810 326 const size_t count = (region_size + RegionSizeOffsetMask) >> Log2RegionSize;
jcoomes@810 327 _region_vspace = create_vspace(count, sizeof(RegionData));
jcoomes@810 328 if (_region_vspace != 0) {
jcoomes@810 329 _region_data = (RegionData*)_region_vspace->reserved_low_addr();
jcoomes@810 330 _region_count = count;
duke@435 331 return true;
duke@435 332 }
duke@435 333 return false;
duke@435 334 }
duke@435 335
duke@435 336 void ParallelCompactData::clear()
duke@435 337 {
jcoomes@810 338 memset(_region_data, 0, _region_vspace->committed_size());
duke@435 339 }
duke@435 340
jcoomes@810 341 void ParallelCompactData::clear_range(size_t beg_region, size_t end_region) {
jcoomes@810 342 assert(beg_region <= _region_count, "beg_region out of range");
jcoomes@810 343 assert(end_region <= _region_count, "end_region out of range");
jcoomes@810 344
jcoomes@810 345 const size_t region_cnt = end_region - beg_region;
jcoomes@810 346 memset(_region_data + beg_region, 0, region_cnt * sizeof(RegionData));
duke@435 347 }
duke@435 348
jcoomes@810 349 HeapWord* ParallelCompactData::partial_obj_end(size_t region_idx) const
duke@435 350 {
jcoomes@810 351 const RegionData* cur_cp = region(region_idx);
jcoomes@810 352 const RegionData* const end_cp = region(region_count() - 1);
jcoomes@810 353
jcoomes@810 354 HeapWord* result = region_to_addr(region_idx);
duke@435 355 if (cur_cp < end_cp) {
duke@435 356 do {
duke@435 357 result += cur_cp->partial_obj_size();
jcoomes@810 358 } while (cur_cp->partial_obj_size() == RegionSize && ++cur_cp < end_cp);
duke@435 359 }
duke@435 360 return result;
duke@435 361 }
duke@435 362
duke@435 363 void ParallelCompactData::add_obj(HeapWord* addr, size_t len)
duke@435 364 {
duke@435 365 const size_t obj_ofs = pointer_delta(addr, _region_start);
jcoomes@810 366 const size_t beg_region = obj_ofs >> Log2RegionSize;
jcoomes@810 367 const size_t end_region = (obj_ofs + len - 1) >> Log2RegionSize;
duke@435 368
duke@435 369 DEBUG_ONLY(Atomic::inc_ptr(&add_obj_count);)
duke@435 370 DEBUG_ONLY(Atomic::add_ptr(len, &add_obj_size);)
duke@435 371
jcoomes@810 372 if (beg_region == end_region) {
jcoomes@810 373 // All in one region.
jcoomes@810 374 _region_data[beg_region].add_live_obj(len);
duke@435 375 return;
duke@435 376 }
duke@435 377
jcoomes@810 378 // First region.
jcoomes@810 379 const size_t beg_ofs = region_offset(addr);
jcoomes@810 380 _region_data[beg_region].add_live_obj(RegionSize - beg_ofs);
duke@435 381
duke@435 382 klassOop klass = ((oop)addr)->klass();
jcoomes@810 383 // Middle regions--completely spanned by this object.
jcoomes@810 384 for (size_t region = beg_region + 1; region < end_region; ++region) {
jcoomes@810 385 _region_data[region].set_partial_obj_size(RegionSize);
jcoomes@810 386 _region_data[region].set_partial_obj_addr(addr);
duke@435 387 }
duke@435 388
jcoomes@810 389 // Last region.
jcoomes@810 390 const size_t end_ofs = region_offset(addr + len - 1);
jcoomes@810 391 _region_data[end_region].set_partial_obj_size(end_ofs + 1);
jcoomes@810 392 _region_data[end_region].set_partial_obj_addr(addr);
duke@435 393 }
duke@435 394
duke@435 395 void
duke@435 396 ParallelCompactData::summarize_dense_prefix(HeapWord* beg, HeapWord* end)
duke@435 397 {
jcoomes@810 398 assert(region_offset(beg) == 0, "not RegionSize aligned");
jcoomes@810 399 assert(region_offset(end) == 0, "not RegionSize aligned");
jcoomes@810 400
jcoomes@810 401 size_t cur_region = addr_to_region_idx(beg);
jcoomes@810 402 const size_t end_region = addr_to_region_idx(end);
duke@435 403 HeapWord* addr = beg;
jcoomes@810 404 while (cur_region < end_region) {
jcoomes@810 405 _region_data[cur_region].set_destination(addr);
jcoomes@810 406 _region_data[cur_region].set_destination_count(0);
jcoomes@810 407 _region_data[cur_region].set_source_region(cur_region);
jcoomes@810 408 _region_data[cur_region].set_data_location(addr);
jcoomes@810 409
jcoomes@810 410 // Update live_obj_size so the region appears completely full.
jcoomes@810 411 size_t live_size = RegionSize - _region_data[cur_region].partial_obj_size();
jcoomes@810 412 _region_data[cur_region].set_live_obj_size(live_size);
jcoomes@810 413
jcoomes@810 414 ++cur_region;
jcoomes@810 415 addr += RegionSize;
duke@435 416 }
duke@435 417 }
duke@435 418
duke@435 419 bool ParallelCompactData::summarize(HeapWord* target_beg, HeapWord* target_end,
duke@435 420 HeapWord* source_beg, HeapWord* source_end,
duke@435 421 HeapWord** target_next,
duke@435 422 HeapWord** source_next) {
duke@435 423 // This is too strict.
jcoomes@810 424 // assert(region_offset(source_beg) == 0, "not RegionSize aligned");
duke@435 425
duke@435 426 if (TraceParallelOldGCSummaryPhase) {
duke@435 427 tty->print_cr("tb=" PTR_FORMAT " te=" PTR_FORMAT " "
duke@435 428 "sb=" PTR_FORMAT " se=" PTR_FORMAT " "
duke@435 429 "tn=" PTR_FORMAT " sn=" PTR_FORMAT,
duke@435 430 target_beg, target_end,
duke@435 431 source_beg, source_end,
duke@435 432 target_next != 0 ? *target_next : (HeapWord*) 0,
duke@435 433 source_next != 0 ? *source_next : (HeapWord*) 0);
duke@435 434 }
duke@435 435
jcoomes@810 436 size_t cur_region = addr_to_region_idx(source_beg);
jcoomes@810 437 const size_t end_region = addr_to_region_idx(region_align_up(source_end));
duke@435 438
duke@435 439 HeapWord *dest_addr = target_beg;
jcoomes@810 440 while (cur_region < end_region) {
jcoomes@810 441 size_t words = _region_data[cur_region].data_size();
duke@435 442
duke@435 443 #if 1
duke@435 444 assert(pointer_delta(target_end, dest_addr) >= words,
duke@435 445 "source region does not fit into target region");
duke@435 446 #else
jcoomes@810 447 // XXX - need some work on the corner cases here. If the region does not
jcoomes@810 448 // fit, then must either make sure any partial_obj from the region fits, or
jcoomes@810 449 // "undo" the initial part of the partial_obj that is in the previous
jcoomes@810 450 // region.
duke@435 451 if (dest_addr + words >= target_end) {
duke@435 452 // Let the caller know where to continue.
duke@435 453 *target_next = dest_addr;
jcoomes@810 454 *source_next = region_to_addr(cur_region);
duke@435 455 return false;
duke@435 456 }
duke@435 457 #endif // #if 1
duke@435 458
jcoomes@810 459 _region_data[cur_region].set_destination(dest_addr);
jcoomes@810 460
jcoomes@810 461 // Set the destination_count for cur_region, and if necessary, update
jcoomes@810 462 // source_region for a destination region. The source_region field is
jcoomes@810 463 // updated if cur_region is the first (left-most) region to be copied to a
jcoomes@810 464 // destination region.
duke@435 465 //
jcoomes@810 466 // The destination_count calculation is a bit subtle. A region that has
jcoomes@810 467 // data that compacts into itself does not count itself as a destination.
jcoomes@810 468 // This maintains the invariant that a zero count means the region is
jcoomes@810 469 // available and can be claimed and then filled.
duke@435 470 if (words > 0) {
duke@435 471 HeapWord* const last_addr = dest_addr + words - 1;
jcoomes@810 472 const size_t dest_region_1 = addr_to_region_idx(dest_addr);
jcoomes@810 473 const size_t dest_region_2 = addr_to_region_idx(last_addr);
duke@435 474 #if 0
jcoomes@810 475 // Initially assume that the destination regions will be the same and
duke@435 476 // adjust the value below if necessary. Under this assumption, if
jcoomes@810 477 // cur_region == dest_region_2, then cur_region will be compacted
jcoomes@810 478 // completely into itself.
jcoomes@810 479 uint destination_count = cur_region == dest_region_2 ? 0 : 1;
jcoomes@810 480 if (dest_region_1 != dest_region_2) {
jcoomes@810 481 // Destination regions differ; adjust destination_count.
duke@435 482 destination_count += 1;
jcoomes@810 483 // Data from cur_region will be copied to the start of dest_region_2.
jcoomes@810 484 _region_data[dest_region_2].set_source_region(cur_region);
jcoomes@810 485 } else if (region_offset(dest_addr) == 0) {
jcoomes@810 486 // Data from cur_region will be copied to the start of the destination
jcoomes@810 487 // region.
jcoomes@810 488 _region_data[dest_region_1].set_source_region(cur_region);
duke@435 489 }
duke@435 490 #else
jcoomes@810 491 // Initially assume that the destination regions will be different and
duke@435 492 // adjust the value below if necessary. Under this assumption, if
jcoomes@810 493 // cur_region == dest_region2, then cur_region will be compacted partially
jcoomes@810 494 // into dest_region_1 and partially into itself.
jcoomes@810 495 uint destination_count = cur_region == dest_region_2 ? 1 : 2;
jcoomes@810 496 if (dest_region_1 != dest_region_2) {
jcoomes@810 497 // Data from cur_region will be copied to the start of dest_region_2.
jcoomes@810 498 _region_data[dest_region_2].set_source_region(cur_region);
duke@435 499 } else {
jcoomes@810 500 // Destination regions are the same; adjust destination_count.
duke@435 501 destination_count -= 1;
jcoomes@810 502 if (region_offset(dest_addr) == 0) {
jcoomes@810 503 // Data from cur_region will be copied to the start of the destination
jcoomes@810 504 // region.
jcoomes@810 505 _region_data[dest_region_1].set_source_region(cur_region);
duke@435 506 }
duke@435 507 }
duke@435 508 #endif // #if 0
duke@435 509
jcoomes@810 510 _region_data[cur_region].set_destination_count(destination_count);
jcoomes@810 511 _region_data[cur_region].set_data_location(region_to_addr(cur_region));
duke@435 512 dest_addr += words;
duke@435 513 }
duke@435 514
jcoomes@810 515 ++cur_region;
duke@435 516 }
duke@435 517
duke@435 518 *target_next = dest_addr;
duke@435 519 return true;
duke@435 520 }
duke@435 521
duke@435 522 HeapWord* ParallelCompactData::calc_new_pointer(HeapWord* addr) {
duke@435 523 assert(addr != NULL, "Should detect NULL oop earlier");
duke@435 524 assert(PSParallelCompact::gc_heap()->is_in(addr), "addr not in heap");
duke@435 525 #ifdef ASSERT
duke@435 526 if (PSParallelCompact::mark_bitmap()->is_unmarked(addr)) {
duke@435 527 gclog_or_tty->print_cr("calc_new_pointer:: addr " PTR_FORMAT, addr);
duke@435 528 }
duke@435 529 #endif
duke@435 530 assert(PSParallelCompact::mark_bitmap()->is_marked(addr), "obj not marked");
duke@435 531
jcoomes@810 532 // Region covering the object.
jcoomes@810 533 size_t region_index = addr_to_region_idx(addr);
jcoomes@810 534 const RegionData* const region_ptr = region(region_index);
jcoomes@810 535 HeapWord* const region_addr = region_align_down(addr);
jcoomes@810 536
jcoomes@810 537 assert(addr < region_addr + RegionSize, "Region does not cover object");
jcoomes@810 538 assert(addr_to_region_ptr(region_addr) == region_ptr, "sanity check");
jcoomes@810 539
jcoomes@810 540 HeapWord* result = region_ptr->destination();
jcoomes@810 541
jcoomes@810 542 // If all the data in the region is live, then the new location of the object
jcoomes@810 543 // can be calculated from the destination of the region plus the offset of the
jcoomes@810 544 // object in the region.
jcoomes@810 545 if (region_ptr->data_size() == RegionSize) {
jcoomes@810 546 result += pointer_delta(addr, region_addr);
duke@435 547 return result;
duke@435 548 }
duke@435 549
duke@435 550 // The new location of the object is
jcoomes@810 551 // region destination +
jcoomes@810 552 // size of the partial object extending onto the region +
jcoomes@810 553 // sizes of the live objects in the Region that are to the left of addr
jcoomes@810 554 const size_t partial_obj_size = region_ptr->partial_obj_size();
jcoomes@810 555 HeapWord* const search_start = region_addr + partial_obj_size;
duke@435 556
duke@435 557 const ParMarkBitMap* bitmap = PSParallelCompact::mark_bitmap();
duke@435 558 size_t live_to_left = bitmap->live_words_in_range(search_start, oop(addr));
duke@435 559
duke@435 560 result += partial_obj_size + live_to_left;
duke@435 561 assert(result <= addr, "object cannot move to the right");
duke@435 562 return result;
duke@435 563 }
duke@435 564
duke@435 565 klassOop ParallelCompactData::calc_new_klass(klassOop old_klass) {
duke@435 566 klassOop updated_klass;
duke@435 567 if (PSParallelCompact::should_update_klass(old_klass)) {
duke@435 568 updated_klass = (klassOop) calc_new_pointer(old_klass);
duke@435 569 } else {
duke@435 570 updated_klass = old_klass;
duke@435 571 }
duke@435 572
duke@435 573 return updated_klass;
duke@435 574 }
duke@435 575
duke@435 576 #ifdef ASSERT
duke@435 577 void ParallelCompactData::verify_clear(const PSVirtualSpace* vspace)
duke@435 578 {
duke@435 579 const size_t* const beg = (const size_t*)vspace->committed_low_addr();
duke@435 580 const size_t* const end = (const size_t*)vspace->committed_high_addr();
duke@435 581 for (const size_t* p = beg; p < end; ++p) {
duke@435 582 assert(*p == 0, "not zero");
duke@435 583 }
duke@435 584 }
duke@435 585
duke@435 586 void ParallelCompactData::verify_clear()
duke@435 587 {
jcoomes@810 588 verify_clear(_region_vspace);
duke@435 589 }
duke@435 590 #endif // #ifdef ASSERT
duke@435 591
duke@435 592 #ifdef NOT_PRODUCT
jcoomes@810 593 ParallelCompactData::RegionData* debug_region(size_t region_index) {
duke@435 594 ParallelCompactData& sd = PSParallelCompact::summary_data();
jcoomes@810 595 return sd.region(region_index);
duke@435 596 }
duke@435 597 #endif
duke@435 598
duke@435 599 elapsedTimer PSParallelCompact::_accumulated_time;
duke@435 600 unsigned int PSParallelCompact::_total_invocations = 0;
duke@435 601 unsigned int PSParallelCompact::_maximum_compaction_gc_num = 0;
duke@435 602 jlong PSParallelCompact::_time_of_last_gc = 0;
duke@435 603 CollectorCounters* PSParallelCompact::_counters = NULL;
duke@435 604 ParMarkBitMap PSParallelCompact::_mark_bitmap;
duke@435 605 ParallelCompactData PSParallelCompact::_summary_data;
duke@435 606
duke@435 607 PSParallelCompact::IsAliveClosure PSParallelCompact::_is_alive_closure;
coleenp@548 608
coleenp@548 609 void PSParallelCompact::IsAliveClosure::do_object(oop p) { ShouldNotReachHere(); }
coleenp@548 610 bool PSParallelCompact::IsAliveClosure::do_object_b(oop p) { return mark_bitmap()->is_marked(p); }
coleenp@548 611
coleenp@548 612 void PSParallelCompact::KeepAliveClosure::do_oop(oop* p) { PSParallelCompact::KeepAliveClosure::do_oop_work(p); }
coleenp@548 613 void PSParallelCompact::KeepAliveClosure::do_oop(narrowOop* p) { PSParallelCompact::KeepAliveClosure::do_oop_work(p); }
coleenp@548 614
duke@435 615 PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_root_pointer_closure(true);
duke@435 616 PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_pointer_closure(false);
duke@435 617
coleenp@548 618 void PSParallelCompact::AdjustPointerClosure::do_oop(oop* p) { adjust_pointer(p, _is_root); }
coleenp@548 619 void PSParallelCompact::AdjustPointerClosure::do_oop(narrowOop* p) { adjust_pointer(p, _is_root); }
coleenp@548 620
coleenp@548 621 void PSParallelCompact::FollowStackClosure::do_void() { follow_stack(_compaction_manager); }
coleenp@548 622
coleenp@548 623 void PSParallelCompact::MarkAndPushClosure::do_oop(oop* p) { mark_and_push(_compaction_manager, p); }
coleenp@548 624 void PSParallelCompact::MarkAndPushClosure::do_oop(narrowOop* p) { mark_and_push(_compaction_manager, p); }
duke@435 625
duke@435 626 void PSParallelCompact::post_initialize() {
duke@435 627 ParallelScavengeHeap* heap = gc_heap();
duke@435 628 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
duke@435 629
duke@435 630 MemRegion mr = heap->reserved_region();
duke@435 631 _ref_processor = ReferenceProcessor::create_ref_processor(
duke@435 632 mr, // span
duke@435 633 true, // atomic_discovery
duke@435 634 true, // mt_discovery
duke@435 635 &_is_alive_closure,
duke@435 636 ParallelGCThreads,
duke@435 637 ParallelRefProcEnabled);
duke@435 638 _counters = new CollectorCounters("PSParallelCompact", 1);
duke@435 639
duke@435 640 // Initialize static fields in ParCompactionManager.
duke@435 641 ParCompactionManager::initialize(mark_bitmap());
duke@435 642 }
duke@435 643
duke@435 644 bool PSParallelCompact::initialize() {
duke@435 645 ParallelScavengeHeap* heap = gc_heap();
duke@435 646 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
duke@435 647 MemRegion mr = heap->reserved_region();
duke@435 648
duke@435 649 // Was the old gen get allocated successfully?
duke@435 650 if (!heap->old_gen()->is_allocated()) {
duke@435 651 return false;
duke@435 652 }
duke@435 653
duke@435 654 initialize_space_info();
duke@435 655 initialize_dead_wood_limiter();
duke@435 656
duke@435 657 if (!_mark_bitmap.initialize(mr)) {
duke@435 658 vm_shutdown_during_initialization("Unable to allocate bit map for "
duke@435 659 "parallel garbage collection for the requested heap size.");
duke@435 660 return false;
duke@435 661 }
duke@435 662
duke@435 663 if (!_summary_data.initialize(mr)) {
duke@435 664 vm_shutdown_during_initialization("Unable to allocate tables for "
duke@435 665 "parallel garbage collection for the requested heap size.");
duke@435 666 return false;
duke@435 667 }
duke@435 668
duke@435 669 return true;
duke@435 670 }
duke@435 671
duke@435 672 void PSParallelCompact::initialize_space_info()
duke@435 673 {
duke@435 674 memset(&_space_info, 0, sizeof(_space_info));
duke@435 675
duke@435 676 ParallelScavengeHeap* heap = gc_heap();
duke@435 677 PSYoungGen* young_gen = heap->young_gen();
duke@435 678 MutableSpace* perm_space = heap->perm_gen()->object_space();
duke@435 679
duke@435 680 _space_info[perm_space_id].set_space(perm_space);
duke@435 681 _space_info[old_space_id].set_space(heap->old_gen()->object_space());
duke@435 682 _space_info[eden_space_id].set_space(young_gen->eden_space());
duke@435 683 _space_info[from_space_id].set_space(young_gen->from_space());
duke@435 684 _space_info[to_space_id].set_space(young_gen->to_space());
duke@435 685
duke@435 686 _space_info[perm_space_id].set_start_array(heap->perm_gen()->start_array());
duke@435 687 _space_info[old_space_id].set_start_array(heap->old_gen()->start_array());
duke@435 688
duke@435 689 _space_info[perm_space_id].set_min_dense_prefix(perm_space->top());
duke@435 690 if (TraceParallelOldGCDensePrefix) {
duke@435 691 tty->print_cr("perm min_dense_prefix=" PTR_FORMAT,
duke@435 692 _space_info[perm_space_id].min_dense_prefix());
duke@435 693 }
duke@435 694 }
duke@435 695
duke@435 696 void PSParallelCompact::initialize_dead_wood_limiter()
duke@435 697 {
duke@435 698 const size_t max = 100;
duke@435 699 _dwl_mean = double(MIN2(ParallelOldDeadWoodLimiterMean, max)) / 100.0;
duke@435 700 _dwl_std_dev = double(MIN2(ParallelOldDeadWoodLimiterStdDev, max)) / 100.0;
duke@435 701 _dwl_first_term = 1.0 / (sqrt(2.0 * M_PI) * _dwl_std_dev);
duke@435 702 DEBUG_ONLY(_dwl_initialized = true;)
duke@435 703 _dwl_adjustment = normal_distribution(1.0);
duke@435 704 }
duke@435 705
duke@435 706 // Simple class for storing info about the heap at the start of GC, to be used
duke@435 707 // after GC for comparison/printing.
duke@435 708 class PreGCValues {
duke@435 709 public:
duke@435 710 PreGCValues() { }
duke@435 711 PreGCValues(ParallelScavengeHeap* heap) { fill(heap); }
duke@435 712
duke@435 713 void fill(ParallelScavengeHeap* heap) {
duke@435 714 _heap_used = heap->used();
duke@435 715 _young_gen_used = heap->young_gen()->used_in_bytes();
duke@435 716 _old_gen_used = heap->old_gen()->used_in_bytes();
duke@435 717 _perm_gen_used = heap->perm_gen()->used_in_bytes();
duke@435 718 };
duke@435 719
duke@435 720 size_t heap_used() const { return _heap_used; }
duke@435 721 size_t young_gen_used() const { return _young_gen_used; }
duke@435 722 size_t old_gen_used() const { return _old_gen_used; }
duke@435 723 size_t perm_gen_used() const { return _perm_gen_used; }
duke@435 724
duke@435 725 private:
duke@435 726 size_t _heap_used;
duke@435 727 size_t _young_gen_used;
duke@435 728 size_t _old_gen_used;
duke@435 729 size_t _perm_gen_used;
duke@435 730 };
duke@435 731
duke@435 732 void
duke@435 733 PSParallelCompact::clear_data_covering_space(SpaceId id)
duke@435 734 {
duke@435 735 // At this point, top is the value before GC, new_top() is the value that will
duke@435 736 // be set at the end of GC. The marking bitmap is cleared to top; nothing
duke@435 737 // should be marked above top. The summary data is cleared to the larger of
duke@435 738 // top & new_top.
duke@435 739 MutableSpace* const space = _space_info[id].space();
duke@435 740 HeapWord* const bot = space->bottom();
duke@435 741 HeapWord* const top = space->top();
duke@435 742 HeapWord* const max_top = MAX2(top, _space_info[id].new_top());
duke@435 743
duke@435 744 const idx_t beg_bit = _mark_bitmap.addr_to_bit(bot);
duke@435 745 const idx_t end_bit = BitMap::word_align_up(_mark_bitmap.addr_to_bit(top));
duke@435 746 _mark_bitmap.clear_range(beg_bit, end_bit);
duke@435 747
jcoomes@810 748 const size_t beg_region = _summary_data.addr_to_region_idx(bot);
jcoomes@810 749 const size_t end_region =
jcoomes@810 750 _summary_data.addr_to_region_idx(_summary_data.region_align_up(max_top));
jcoomes@810 751 _summary_data.clear_range(beg_region, end_region);
duke@435 752 }
duke@435 753
duke@435 754 void PSParallelCompact::pre_compact(PreGCValues* pre_gc_values)
duke@435 755 {
duke@435 756 // Update the from & to space pointers in space_info, since they are swapped
duke@435 757 // at each young gen gc. Do the update unconditionally (even though a
duke@435 758 // promotion failure does not swap spaces) because an unknown number of minor
duke@435 759 // collections will have swapped the spaces an unknown number of times.
duke@435 760 TraceTime tm("pre compact", print_phases(), true, gclog_or_tty);
duke@435 761 ParallelScavengeHeap* heap = gc_heap();
duke@435 762 _space_info[from_space_id].set_space(heap->young_gen()->from_space());
duke@435 763 _space_info[to_space_id].set_space(heap->young_gen()->to_space());
duke@435 764
duke@435 765 pre_gc_values->fill(heap);
duke@435 766
duke@435 767 ParCompactionManager::reset();
duke@435 768 NOT_PRODUCT(_mark_bitmap.reset_counters());
duke@435 769 DEBUG_ONLY(add_obj_count = add_obj_size = 0;)
duke@435 770 DEBUG_ONLY(mark_bitmap_count = mark_bitmap_size = 0;)
duke@435 771
duke@435 772 // Increment the invocation count
apetrusenko@574 773 heap->increment_total_collections(true);
duke@435 774
duke@435 775 // We need to track unique mark sweep invocations as well.
duke@435 776 _total_invocations++;
duke@435 777
duke@435 778 if (PrintHeapAtGC) {
duke@435 779 Universe::print_heap_before_gc();
duke@435 780 }
duke@435 781
duke@435 782 // Fill in TLABs
duke@435 783 heap->accumulate_statistics_all_tlabs();
duke@435 784 heap->ensure_parsability(true); // retire TLABs
duke@435 785
duke@435 786 if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
duke@435 787 HandleMark hm; // Discard invalid handles created during verification
duke@435 788 gclog_or_tty->print(" VerifyBeforeGC:");
duke@435 789 Universe::verify(true);
duke@435 790 }
duke@435 791
duke@435 792 // Verify object start arrays
duke@435 793 if (VerifyObjectStartArray &&
duke@435 794 VerifyBeforeGC) {
duke@435 795 heap->old_gen()->verify_object_start_array();
duke@435 796 heap->perm_gen()->verify_object_start_array();
duke@435 797 }
duke@435 798
duke@435 799 DEBUG_ONLY(mark_bitmap()->verify_clear();)
duke@435 800 DEBUG_ONLY(summary_data().verify_clear();)
jcoomes@645 801
jcoomes@645 802 // Have worker threads release resources the next time they run a task.
jcoomes@645 803 gc_task_manager()->release_all_resources();
duke@435 804 }
duke@435 805
duke@435 806 void PSParallelCompact::post_compact()
duke@435 807 {
duke@435 808 TraceTime tm("post compact", print_phases(), true, gclog_or_tty);
duke@435 809
duke@435 810 // Clear the marking bitmap and summary data and update top() in each space.
duke@435 811 for (unsigned int id = perm_space_id; id < last_space_id; ++id) {
duke@435 812 clear_data_covering_space(SpaceId(id));
duke@435 813 _space_info[id].space()->set_top(_space_info[id].new_top());
duke@435 814 }
duke@435 815
duke@435 816 MutableSpace* const eden_space = _space_info[eden_space_id].space();
duke@435 817 MutableSpace* const from_space = _space_info[from_space_id].space();
duke@435 818 MutableSpace* const to_space = _space_info[to_space_id].space();
duke@435 819
duke@435 820 ParallelScavengeHeap* heap = gc_heap();
duke@435 821 bool eden_empty = eden_space->is_empty();
duke@435 822 if (!eden_empty) {
duke@435 823 eden_empty = absorb_live_data_from_eden(heap->size_policy(),
duke@435 824 heap->young_gen(), heap->old_gen());
duke@435 825 }
duke@435 826
duke@435 827 // Update heap occupancy information which is used as input to the soft ref
duke@435 828 // clearing policy at the next gc.
duke@435 829 Universe::update_heap_info_at_gc();
duke@435 830
duke@435 831 bool young_gen_empty = eden_empty && from_space->is_empty() &&
duke@435 832 to_space->is_empty();
duke@435 833
duke@435 834 BarrierSet* bs = heap->barrier_set();
duke@435 835 if (bs->is_a(BarrierSet::ModRef)) {
duke@435 836 ModRefBarrierSet* modBS = (ModRefBarrierSet*)bs;
duke@435 837 MemRegion old_mr = heap->old_gen()->reserved();
duke@435 838 MemRegion perm_mr = heap->perm_gen()->reserved();
duke@435 839 assert(perm_mr.end() <= old_mr.start(), "Generations out of order");
duke@435 840
duke@435 841 if (young_gen_empty) {
duke@435 842 modBS->clear(MemRegion(perm_mr.start(), old_mr.end()));
duke@435 843 } else {
duke@435 844 modBS->invalidate(MemRegion(perm_mr.start(), old_mr.end()));
duke@435 845 }
duke@435 846 }
duke@435 847
duke@435 848 Threads::gc_epilogue();
duke@435 849 CodeCache::gc_epilogue();
duke@435 850
duke@435 851 COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
duke@435 852
duke@435 853 ref_processor()->enqueue_discovered_references(NULL);
duke@435 854
jmasa@698 855 if (ZapUnusedHeapArea) {
jmasa@698 856 heap->gen_mangle_unused_area();
jmasa@698 857 }
jmasa@698 858
duke@435 859 // Update time of last GC
duke@435 860 reset_millis_since_last_gc();
duke@435 861 }
duke@435 862
duke@435 863 HeapWord*
duke@435 864 PSParallelCompact::compute_dense_prefix_via_density(const SpaceId id,
duke@435 865 bool maximum_compaction)
duke@435 866 {
jcoomes@810 867 const size_t region_size = ParallelCompactData::RegionSize;
duke@435 868 const ParallelCompactData& sd = summary_data();
duke@435 869
duke@435 870 const MutableSpace* const space = _space_info[id].space();
jcoomes@810 871 HeapWord* const top_aligned_up = sd.region_align_up(space->top());
jcoomes@810 872 const RegionData* const beg_cp = sd.addr_to_region_ptr(space->bottom());
jcoomes@810 873 const RegionData* const end_cp = sd.addr_to_region_ptr(top_aligned_up);
jcoomes@810 874
jcoomes@810 875 // Skip full regions at the beginning of the space--they are necessarily part
duke@435 876 // of the dense prefix.
duke@435 877 size_t full_count = 0;
jcoomes@810 878 const RegionData* cp;
jcoomes@810 879 for (cp = beg_cp; cp < end_cp && cp->data_size() == region_size; ++cp) {
duke@435 880 ++full_count;
duke@435 881 }
duke@435 882
duke@435 883 assert(total_invocations() >= _maximum_compaction_gc_num, "sanity");
duke@435 884 const size_t gcs_since_max = total_invocations() - _maximum_compaction_gc_num;
duke@435 885 const bool interval_ended = gcs_since_max > HeapMaximumCompactionInterval;
duke@435 886 if (maximum_compaction || cp == end_cp || interval_ended) {
duke@435 887 _maximum_compaction_gc_num = total_invocations();
jcoomes@810 888 return sd.region_to_addr(cp);
duke@435 889 }
duke@435 890
duke@435 891 HeapWord* const new_top = _space_info[id].new_top();
duke@435 892 const size_t space_live = pointer_delta(new_top, space->bottom());
duke@435 893 const size_t space_used = space->used_in_words();
duke@435 894 const size_t space_capacity = space->capacity_in_words();
duke@435 895
duke@435 896 const double cur_density = double(space_live) / space_capacity;
duke@435 897 const double deadwood_density =
duke@435 898 (1.0 - cur_density) * (1.0 - cur_density) * cur_density * cur_density;
duke@435 899 const size_t deadwood_goal = size_t(space_capacity * deadwood_density);
duke@435 900
duke@435 901 if (TraceParallelOldGCDensePrefix) {
duke@435 902 tty->print_cr("cur_dens=%5.3f dw_dens=%5.3f dw_goal=" SIZE_FORMAT,
duke@435 903 cur_density, deadwood_density, deadwood_goal);
duke@435 904 tty->print_cr("space_live=" SIZE_FORMAT " " "space_used=" SIZE_FORMAT " "
duke@435 905 "space_cap=" SIZE_FORMAT,
duke@435 906 space_live, space_used,
duke@435 907 space_capacity);
duke@435 908 }
duke@435 909
duke@435 910 // XXX - Use binary search?
jcoomes@810 911 HeapWord* dense_prefix = sd.region_to_addr(cp);
jcoomes@810 912 const RegionData* full_cp = cp;
jcoomes@810 913 const RegionData* const top_cp = sd.addr_to_region_ptr(space->top() - 1);
duke@435 914 while (cp < end_cp) {
jcoomes@810 915 HeapWord* region_destination = cp->destination();
jcoomes@810 916 const size_t cur_deadwood = pointer_delta(dense_prefix, region_destination);
duke@435 917 if (TraceParallelOldGCDensePrefix && Verbose) {
jcoomes@699 918 tty->print_cr("c#=" SIZE_FORMAT_W(4) " dst=" PTR_FORMAT " "
jcoomes@699 919 "dp=" SIZE_FORMAT_W(8) " " "cdw=" SIZE_FORMAT_W(8),
jcoomes@810 920 sd.region(cp), region_destination,
duke@435 921 dense_prefix, cur_deadwood);
duke@435 922 }
duke@435 923
duke@435 924 if (cur_deadwood >= deadwood_goal) {
jcoomes@810 925 // Found the region that has the correct amount of deadwood to the left.
jcoomes@810 926 // This typically occurs after crossing a fairly sparse set of regions, so
jcoomes@810 927 // iterate backwards over those sparse regions, looking for the region
jcoomes@810 928 // that has the lowest density of live objects 'to the right.'
jcoomes@810 929 size_t space_to_left = sd.region(cp) * region_size;
duke@435 930 size_t live_to_left = space_to_left - cur_deadwood;
duke@435 931 size_t space_to_right = space_capacity - space_to_left;
duke@435 932 size_t live_to_right = space_live - live_to_left;
duke@435 933 double density_to_right = double(live_to_right) / space_to_right;
duke@435 934 while (cp > full_cp) {
duke@435 935 --cp;
jcoomes@810 936 const size_t prev_region_live_to_right = live_to_right -
jcoomes@810 937 cp->data_size();
jcoomes@810 938 const size_t prev_region_space_to_right = space_to_right + region_size;
jcoomes@810 939 double prev_region_density_to_right =
jcoomes@810 940 double(prev_region_live_to_right) / prev_region_space_to_right;
jcoomes@810 941 if (density_to_right <= prev_region_density_to_right) {
duke@435 942 return dense_prefix;
duke@435 943 }
duke@435 944 if (TraceParallelOldGCDensePrefix && Verbose) {
jcoomes@699 945 tty->print_cr("backing up from c=" SIZE_FORMAT_W(4) " d2r=%10.8f "
jcoomes@810 946 "pc_d2r=%10.8f", sd.region(cp), density_to_right,
jcoomes@810 947 prev_region_density_to_right);
duke@435 948 }
jcoomes@810 949 dense_prefix -= region_size;
jcoomes@810 950 live_to_right = prev_region_live_to_right;
jcoomes@810 951 space_to_right = prev_region_space_to_right;
jcoomes@810 952 density_to_right = prev_region_density_to_right;
duke@435 953 }
duke@435 954 return dense_prefix;
duke@435 955 }
duke@435 956
jcoomes@810 957 dense_prefix += region_size;
duke@435 958 ++cp;
duke@435 959 }
duke@435 960
duke@435 961 return dense_prefix;
duke@435 962 }
duke@435 963
duke@435 964 #ifndef PRODUCT
duke@435 965 void PSParallelCompact::print_dense_prefix_stats(const char* const algorithm,
duke@435 966 const SpaceId id,
duke@435 967 const bool maximum_compaction,
duke@435 968 HeapWord* const addr)
duke@435 969 {
jcoomes@810 970 const size_t region_idx = summary_data().addr_to_region_idx(addr);
jcoomes@810 971 RegionData* const cp = summary_data().region(region_idx);
duke@435 972 const MutableSpace* const space = _space_info[id].space();
duke@435 973 HeapWord* const new_top = _space_info[id].new_top();
duke@435 974
duke@435 975 const size_t space_live = pointer_delta(new_top, space->bottom());
duke@435 976 const size_t dead_to_left = pointer_delta(addr, cp->destination());
duke@435 977 const size_t space_cap = space->capacity_in_words();
duke@435 978 const double dead_to_left_pct = double(dead_to_left) / space_cap;
duke@435 979 const size_t live_to_right = new_top - cp->destination();
duke@435 980 const size_t dead_to_right = space->top() - addr - live_to_right;
duke@435 981
jcoomes@699 982 tty->print_cr("%s=" PTR_FORMAT " dpc=" SIZE_FORMAT_W(5) " "
duke@435 983 "spl=" SIZE_FORMAT " "
duke@435 984 "d2l=" SIZE_FORMAT " d2l%%=%6.4f "
duke@435 985 "d2r=" SIZE_FORMAT " l2r=" SIZE_FORMAT
duke@435 986 " ratio=%10.8f",
jcoomes@810 987 algorithm, addr, region_idx,
duke@435 988 space_live,
duke@435 989 dead_to_left, dead_to_left_pct,
duke@435 990 dead_to_right, live_to_right,
duke@435 991 double(dead_to_right) / live_to_right);
duke@435 992 }
duke@435 993 #endif // #ifndef PRODUCT
duke@435 994
duke@435 995 // Return a fraction indicating how much of the generation can be treated as
duke@435 996 // "dead wood" (i.e., not reclaimed). The function uses a normal distribution
duke@435 997 // based on the density of live objects in the generation to determine a limit,
duke@435 998 // which is then adjusted so the return value is min_percent when the density is
duke@435 999 // 1.
duke@435 1000 //
duke@435 1001 // The following table shows some return values for a different values of the
duke@435 1002 // standard deviation (ParallelOldDeadWoodLimiterStdDev); the mean is 0.5 and
duke@435 1003 // min_percent is 1.
duke@435 1004 //
duke@435 1005 // fraction allowed as dead wood
duke@435 1006 // -----------------------------------------------------------------
duke@435 1007 // density std_dev=70 std_dev=75 std_dev=80 std_dev=85 std_dev=90 std_dev=95
duke@435 1008 // ------- ---------- ---------- ---------- ---------- ---------- ----------
duke@435 1009 // 0.00000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000
duke@435 1010 // 0.05000 0.03193096 0.02836880 0.02550828 0.02319280 0.02130337 0.01974941
duke@435 1011 // 0.10000 0.05247504 0.04547452 0.03988045 0.03537016 0.03170171 0.02869272
duke@435 1012 // 0.15000 0.07135702 0.06111390 0.05296419 0.04641639 0.04110601 0.03676066
duke@435 1013 // 0.20000 0.08831616 0.07509618 0.06461766 0.05622444 0.04943437 0.04388975
duke@435 1014 // 0.25000 0.10311208 0.08724696 0.07471205 0.06469760 0.05661313 0.05002313
duke@435 1015 // 0.30000 0.11553050 0.09741183 0.08313394 0.07175114 0.06257797 0.05511132
duke@435 1016 // 0.35000 0.12538832 0.10545958 0.08978741 0.07731366 0.06727491 0.05911289
duke@435 1017 // 0.40000 0.13253818 0.11128511 0.09459590 0.08132834 0.07066107 0.06199500
duke@435 1018 // 0.45000 0.13687208 0.11481163 0.09750361 0.08375387 0.07270534 0.06373386
duke@435 1019 // 0.50000 0.13832410 0.11599237 0.09847664 0.08456518 0.07338887 0.06431510
duke@435 1020 // 0.55000 0.13687208 0.11481163 0.09750361 0.08375387 0.07270534 0.06373386
duke@435 1021 // 0.60000 0.13253818 0.11128511 0.09459590 0.08132834 0.07066107 0.06199500
duke@435 1022 // 0.65000 0.12538832 0.10545958 0.08978741 0.07731366 0.06727491 0.05911289
duke@435 1023 // 0.70000 0.11553050 0.09741183 0.08313394 0.07175114 0.06257797 0.05511132
duke@435 1024 // 0.75000 0.10311208 0.08724696 0.07471205 0.06469760 0.05661313 0.05002313
duke@435 1025 // 0.80000 0.08831616 0.07509618 0.06461766 0.05622444 0.04943437 0.04388975
duke@435 1026 // 0.85000 0.07135702 0.06111390 0.05296419 0.04641639 0.04110601 0.03676066
duke@435 1027 // 0.90000 0.05247504 0.04547452 0.03988045 0.03537016 0.03170171 0.02869272
duke@435 1028 // 0.95000 0.03193096 0.02836880 0.02550828 0.02319280 0.02130337 0.01974941
duke@435 1029 // 1.00000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000
duke@435 1030
duke@435 1031 double PSParallelCompact::dead_wood_limiter(double density, size_t min_percent)
duke@435 1032 {
duke@435 1033 assert(_dwl_initialized, "uninitialized");
duke@435 1034
duke@435 1035 // The raw limit is the value of the normal distribution at x = density.
duke@435 1036 const double raw_limit = normal_distribution(density);
duke@435 1037
duke@435 1038 // Adjust the raw limit so it becomes the minimum when the density is 1.
duke@435 1039 //
duke@435 1040 // First subtract the adjustment value (which is simply the precomputed value
duke@435 1041 // normal_distribution(1.0)); this yields a value of 0 when the density is 1.
duke@435 1042 // Then add the minimum value, so the minimum is returned when the density is
duke@435 1043 // 1. Finally, prevent negative values, which occur when the mean is not 0.5.
duke@435 1044 const double min = double(min_percent) / 100.0;
duke@435 1045 const double limit = raw_limit - _dwl_adjustment + min;
duke@435 1046 return MAX2(limit, 0.0);
duke@435 1047 }
duke@435 1048
jcoomes@810 1049 ParallelCompactData::RegionData*
jcoomes@810 1050 PSParallelCompact::first_dead_space_region(const RegionData* beg,
jcoomes@810 1051 const RegionData* end)
duke@435 1052 {
jcoomes@810 1053 const size_t region_size = ParallelCompactData::RegionSize;
duke@435 1054 ParallelCompactData& sd = summary_data();
jcoomes@810 1055 size_t left = sd.region(beg);
jcoomes@810 1056 size_t right = end > beg ? sd.region(end) - 1 : left;
duke@435 1057
duke@435 1058 // Binary search.
duke@435 1059 while (left < right) {
duke@435 1060 // Equivalent to (left + right) / 2, but does not overflow.
duke@435 1061 const size_t middle = left + (right - left) / 2;
jcoomes@810 1062 RegionData* const middle_ptr = sd.region(middle);
duke@435 1063 HeapWord* const dest = middle_ptr->destination();
jcoomes@810 1064 HeapWord* const addr = sd.region_to_addr(middle);
duke@435 1065 assert(dest != NULL, "sanity");
duke@435 1066 assert(dest <= addr, "must move left");
duke@435 1067
duke@435 1068 if (middle > left && dest < addr) {
duke@435 1069 right = middle - 1;
jcoomes@810 1070 } else if (middle < right && middle_ptr->data_size() == region_size) {
duke@435 1071 left = middle + 1;
duke@435 1072 } else {
duke@435 1073 return middle_ptr;
duke@435 1074 }
duke@435 1075 }
jcoomes@810 1076 return sd.region(left);
duke@435 1077 }
duke@435 1078
jcoomes@810 1079 ParallelCompactData::RegionData*
jcoomes@810 1080 PSParallelCompact::dead_wood_limit_region(const RegionData* beg,
jcoomes@810 1081 const RegionData* end,
jcoomes@810 1082 size_t dead_words)
duke@435 1083 {
duke@435 1084 ParallelCompactData& sd = summary_data();
jcoomes@810 1085 size_t left = sd.region(beg);
jcoomes@810 1086 size_t right = end > beg ? sd.region(end) - 1 : left;
duke@435 1087
duke@435 1088 // Binary search.
duke@435 1089 while (left < right) {
duke@435 1090 // Equivalent to (left + right) / 2, but does not overflow.
duke@435 1091 const size_t middle = left + (right - left) / 2;
jcoomes@810 1092 RegionData* const middle_ptr = sd.region(middle);
duke@435 1093 HeapWord* const dest = middle_ptr->destination();
jcoomes@810 1094 HeapWord* const addr = sd.region_to_addr(middle);
duke@435 1095 assert(dest != NULL, "sanity");
duke@435 1096 assert(dest <= addr, "must move left");
duke@435 1097
duke@435 1098 const size_t dead_to_left = pointer_delta(addr, dest);
duke@435 1099 if (middle > left && dead_to_left > dead_words) {
duke@435 1100 right = middle - 1;
duke@435 1101 } else if (middle < right && dead_to_left < dead_words) {
duke@435 1102 left = middle + 1;
duke@435 1103 } else {
duke@435 1104 return middle_ptr;
duke@435 1105 }
duke@435 1106 }
jcoomes@810 1107 return sd.region(left);
duke@435 1108 }
duke@435 1109
duke@435 1110 // The result is valid during the summary phase, after the initial summarization
duke@435 1111 // of each space into itself, and before final summarization.
duke@435 1112 inline double
jcoomes@810 1113 PSParallelCompact::reclaimed_ratio(const RegionData* const cp,
duke@435 1114 HeapWord* const bottom,
duke@435 1115 HeapWord* const top,
duke@435 1116 HeapWord* const new_top)
duke@435 1117 {
duke@435 1118 ParallelCompactData& sd = summary_data();
duke@435 1119
duke@435 1120 assert(cp != NULL, "sanity");
duke@435 1121 assert(bottom != NULL, "sanity");
duke@435 1122 assert(top != NULL, "sanity");
duke@435 1123 assert(new_top != NULL, "sanity");
duke@435 1124 assert(top >= new_top, "summary data problem?");
duke@435 1125 assert(new_top > bottom, "space is empty; should not be here");
duke@435 1126 assert(new_top >= cp->destination(), "sanity");
jcoomes@810 1127 assert(top >= sd.region_to_addr(cp), "sanity");
duke@435 1128
duke@435 1129 HeapWord* const destination = cp->destination();
duke@435 1130 const size_t dense_prefix_live = pointer_delta(destination, bottom);
duke@435 1131 const size_t compacted_region_live = pointer_delta(new_top, destination);
jcoomes@810 1132 const size_t compacted_region_used = pointer_delta(top,
jcoomes@810 1133 sd.region_to_addr(cp));
duke@435 1134 const size_t reclaimable = compacted_region_used - compacted_region_live;
duke@435 1135
duke@435 1136 const double divisor = dense_prefix_live + 1.25 * compacted_region_live;
duke@435 1137 return double(reclaimable) / divisor;
duke@435 1138 }
duke@435 1139
duke@435 1140 // Return the address of the end of the dense prefix, a.k.a. the start of the
jcoomes@810 1141 // compacted region. The address is always on a region boundary.
duke@435 1142 //
jcoomes@810 1143 // Completely full regions at the left are skipped, since no compaction can
jcoomes@810 1144 // occur in those regions. Then the maximum amount of dead wood to allow is
jcoomes@810 1145 // computed, based on the density (amount live / capacity) of the generation;
jcoomes@810 1146 // the region with approximately that amount of dead space to the left is
jcoomes@810 1147 // identified as the limit region. Regions between the last completely full
jcoomes@810 1148 // region and the limit region are scanned and the one that has the best
jcoomes@810 1149 // (maximum) reclaimed_ratio() is selected.
duke@435 1150 HeapWord*
duke@435 1151 PSParallelCompact::compute_dense_prefix(const SpaceId id,
duke@435 1152 bool maximum_compaction)
duke@435 1153 {
jcoomes@810 1154 const size_t region_size = ParallelCompactData::RegionSize;
duke@435 1155 const ParallelCompactData& sd = summary_data();
duke@435 1156
duke@435 1157 const MutableSpace* const space = _space_info[id].space();
duke@435 1158 HeapWord* const top = space->top();
jcoomes@810 1159 HeapWord* const top_aligned_up = sd.region_align_up(top);
duke@435 1160 HeapWord* const new_top = _space_info[id].new_top();
jcoomes@810 1161 HeapWord* const new_top_aligned_up = sd.region_align_up(new_top);
duke@435 1162 HeapWord* const bottom = space->bottom();
jcoomes@810 1163 const RegionData* const beg_cp = sd.addr_to_region_ptr(bottom);
jcoomes@810 1164 const RegionData* const top_cp = sd.addr_to_region_ptr(top_aligned_up);
jcoomes@810 1165 const RegionData* const new_top_cp =
jcoomes@810 1166 sd.addr_to_region_ptr(new_top_aligned_up);
jcoomes@810 1167
jcoomes@810 1168 // Skip full regions at the beginning of the space--they are necessarily part
duke@435 1169 // of the dense prefix.
jcoomes@810 1170 const RegionData* const full_cp = first_dead_space_region(beg_cp, new_top_cp);
jcoomes@810 1171 assert(full_cp->destination() == sd.region_to_addr(full_cp) ||
duke@435 1172 space->is_empty(), "no dead space allowed to the left");
jcoomes@810 1173 assert(full_cp->data_size() < region_size || full_cp == new_top_cp - 1,
jcoomes@810 1174 "region must have dead space");
duke@435 1175
duke@435 1176 // The gc number is saved whenever a maximum compaction is done, and used to
duke@435 1177 // determine when the maximum compaction interval has expired. This avoids
duke@435 1178 // successive max compactions for different reasons.
duke@435 1179 assert(total_invocations() >= _maximum_compaction_gc_num, "sanity");
duke@435 1180 const size_t gcs_since_max = total_invocations() - _maximum_compaction_gc_num;
duke@435 1181 const bool interval_ended = gcs_since_max > HeapMaximumCompactionInterval ||
duke@435 1182 total_invocations() == HeapFirstMaximumCompactionCount;
duke@435 1183 if (maximum_compaction || full_cp == top_cp || interval_ended) {
duke@435 1184 _maximum_compaction_gc_num = total_invocations();
jcoomes@810 1185 return sd.region_to_addr(full_cp);
duke@435 1186 }
duke@435 1187
duke@435 1188 const size_t space_live = pointer_delta(new_top, bottom);
duke@435 1189 const size_t space_used = space->used_in_words();
duke@435 1190 const size_t space_capacity = space->capacity_in_words();
duke@435 1191
duke@435 1192 const double density = double(space_live) / double(space_capacity);
duke@435 1193 const size_t min_percent_free =
duke@435 1194 id == perm_space_id ? PermMarkSweepDeadRatio : MarkSweepDeadRatio;
duke@435 1195 const double limiter = dead_wood_limiter(density, min_percent_free);
duke@435 1196 const size_t dead_wood_max = space_used - space_live;
duke@435 1197 const size_t dead_wood_limit = MIN2(size_t(space_capacity * limiter),
duke@435 1198 dead_wood_max);
duke@435 1199
duke@435 1200 if (TraceParallelOldGCDensePrefix) {
duke@435 1201 tty->print_cr("space_live=" SIZE_FORMAT " " "space_used=" SIZE_FORMAT " "
duke@435 1202 "space_cap=" SIZE_FORMAT,
duke@435 1203 space_live, space_used,
duke@435 1204 space_capacity);
duke@435 1205 tty->print_cr("dead_wood_limiter(%6.4f, %d)=%6.4f "
duke@435 1206 "dead_wood_max=" SIZE_FORMAT " dead_wood_limit=" SIZE_FORMAT,
duke@435 1207 density, min_percent_free, limiter,
duke@435 1208 dead_wood_max, dead_wood_limit);
duke@435 1209 }
duke@435 1210
jcoomes@810 1211 // Locate the region with the desired amount of dead space to the left.
jcoomes@810 1212 const RegionData* const limit_cp =
jcoomes@810 1213 dead_wood_limit_region(full_cp, top_cp, dead_wood_limit);
jcoomes@810 1214
jcoomes@810 1215 // Scan from the first region with dead space to the limit region and find the
duke@435 1216 // one with the best (largest) reclaimed ratio.
duke@435 1217 double best_ratio = 0.0;
jcoomes@810 1218 const RegionData* best_cp = full_cp;
jcoomes@810 1219 for (const RegionData* cp = full_cp; cp < limit_cp; ++cp) {
duke@435 1220 double tmp_ratio = reclaimed_ratio(cp, bottom, top, new_top);
duke@435 1221 if (tmp_ratio > best_ratio) {
duke@435 1222 best_cp = cp;
duke@435 1223 best_ratio = tmp_ratio;
duke@435 1224 }
duke@435 1225 }
duke@435 1226
duke@435 1227 #if 0
jcoomes@810 1228 // Something to consider: if the region with the best ratio is 'close to' the
jcoomes@810 1229 // first region w/free space, choose the first region with free space
jcoomes@810 1230 // ("first-free"). The first-free region is usually near the start of the
duke@435 1231 // heap, which means we are copying most of the heap already, so copy a bit
duke@435 1232 // more to get complete compaction.
jcoomes@810 1233 if (pointer_delta(best_cp, full_cp, sizeof(RegionData)) < 4) {
duke@435 1234 _maximum_compaction_gc_num = total_invocations();
duke@435 1235 best_cp = full_cp;
duke@435 1236 }
duke@435 1237 #endif // #if 0
duke@435 1238
jcoomes@810 1239 return sd.region_to_addr(best_cp);
duke@435 1240 }
duke@435 1241
duke@435 1242 void PSParallelCompact::summarize_spaces_quick()
duke@435 1243 {
duke@435 1244 for (unsigned int i = 0; i < last_space_id; ++i) {
duke@435 1245 const MutableSpace* space = _space_info[i].space();
duke@435 1246 bool result = _summary_data.summarize(space->bottom(), space->end(),
duke@435 1247 space->bottom(), space->top(),
duke@435 1248 _space_info[i].new_top_addr());
duke@435 1249 assert(result, "should never fail");
duke@435 1250 _space_info[i].set_dense_prefix(space->bottom());
duke@435 1251 }
duke@435 1252 }
duke@435 1253
duke@435 1254 void PSParallelCompact::fill_dense_prefix_end(SpaceId id)
duke@435 1255 {
duke@435 1256 HeapWord* const dense_prefix_end = dense_prefix(id);
jcoomes@810 1257 const RegionData* region = _summary_data.addr_to_region_ptr(dense_prefix_end);
duke@435 1258 const idx_t dense_prefix_bit = _mark_bitmap.addr_to_bit(dense_prefix_end);
jcoomes@810 1259 if (dead_space_crosses_boundary(region, dense_prefix_bit)) {
duke@435 1260 // Only enough dead space is filled so that any remaining dead space to the
duke@435 1261 // left is larger than the minimum filler object. (The remainder is filled
duke@435 1262 // during the copy/update phase.)
duke@435 1263 //
duke@435 1264 // The size of the dead space to the right of the boundary is not a
duke@435 1265 // concern, since compaction will be able to use whatever space is
duke@435 1266 // available.
duke@435 1267 //
duke@435 1268 // Here '||' is the boundary, 'x' represents a don't care bit and a box
duke@435 1269 // surrounds the space to be filled with an object.
duke@435 1270 //
duke@435 1271 // In the 32-bit VM, each bit represents two 32-bit words:
duke@435 1272 // +---+
duke@435 1273 // a) beg_bits: ... x x x | 0 | || 0 x x ...
duke@435 1274 // end_bits: ... x x x | 0 | || 0 x x ...
duke@435 1275 // +---+
duke@435 1276 //
duke@435 1277 // In the 64-bit VM, each bit represents one 64-bit word:
duke@435 1278 // +------------+
duke@435 1279 // b) beg_bits: ... x x x | 0 || 0 | x x ...
duke@435 1280 // end_bits: ... x x 1 | 0 || 0 | x x ...
duke@435 1281 // +------------+
duke@435 1282 // +-------+
duke@435 1283 // c) beg_bits: ... x x | 0 0 | || 0 x x ...
duke@435 1284 // end_bits: ... x 1 | 0 0 | || 0 x x ...
duke@435 1285 // +-------+
duke@435 1286 // +-----------+
duke@435 1287 // d) beg_bits: ... x | 0 0 0 | || 0 x x ...
duke@435 1288 // end_bits: ... 1 | 0 0 0 | || 0 x x ...
duke@435 1289 // +-----------+
duke@435 1290 // +-------+
duke@435 1291 // e) beg_bits: ... 0 0 | 0 0 | || 0 x x ...
duke@435 1292 // end_bits: ... 0 0 | 0 0 | || 0 x x ...
duke@435 1293 // +-------+
duke@435 1294
duke@435 1295 // Initially assume case a, c or e will apply.
duke@435 1296 size_t obj_len = (size_t)oopDesc::header_size();
duke@435 1297 HeapWord* obj_beg = dense_prefix_end - obj_len;
duke@435 1298
duke@435 1299 #ifdef _LP64
duke@435 1300 if (_mark_bitmap.is_obj_end(dense_prefix_bit - 2)) {
duke@435 1301 // Case b above.
duke@435 1302 obj_beg = dense_prefix_end - 1;
duke@435 1303 } else if (!_mark_bitmap.is_obj_end(dense_prefix_bit - 3) &&
duke@435 1304 _mark_bitmap.is_obj_end(dense_prefix_bit - 4)) {
duke@435 1305 // Case d above.
duke@435 1306 obj_beg = dense_prefix_end - 3;
duke@435 1307 obj_len = 3;
duke@435 1308 }
duke@435 1309 #endif // #ifdef _LP64
duke@435 1310
duke@435 1311 MemRegion region(obj_beg, obj_len);
duke@435 1312 SharedHeap::fill_region_with_object(region);
duke@435 1313 _mark_bitmap.mark_obj(obj_beg, obj_len);
duke@435 1314 _summary_data.add_obj(obj_beg, obj_len);
duke@435 1315 assert(start_array(id) != NULL, "sanity");
duke@435 1316 start_array(id)->allocate_block(obj_beg);
duke@435 1317 }
duke@435 1318 }
duke@435 1319
duke@435 1320 void
duke@435 1321 PSParallelCompact::summarize_space(SpaceId id, bool maximum_compaction)
duke@435 1322 {
duke@435 1323 assert(id < last_space_id, "id out of range");
jcoomes@700 1324 assert(_space_info[id].dense_prefix() == _space_info[id].space()->bottom(),
jcoomes@700 1325 "should have been set in summarize_spaces_quick()");
duke@435 1326
duke@435 1327 const MutableSpace* space = _space_info[id].space();
jcoomes@700 1328 if (_space_info[id].new_top() != space->bottom()) {
jcoomes@700 1329 HeapWord* dense_prefix_end = compute_dense_prefix(id, maximum_compaction);
jcoomes@700 1330 _space_info[id].set_dense_prefix(dense_prefix_end);
duke@435 1331
duke@435 1332 #ifndef PRODUCT
jcoomes@700 1333 if (TraceParallelOldGCDensePrefix) {
jcoomes@700 1334 print_dense_prefix_stats("ratio", id, maximum_compaction,
jcoomes@700 1335 dense_prefix_end);
jcoomes@700 1336 HeapWord* addr = compute_dense_prefix_via_density(id, maximum_compaction);
jcoomes@700 1337 print_dense_prefix_stats("density", id, maximum_compaction, addr);
jcoomes@700 1338 }
jcoomes@700 1339 #endif // #ifndef PRODUCT
jcoomes@700 1340
jcoomes@700 1341 // If dead space crosses the dense prefix boundary, it is (at least
jcoomes@700 1342 // partially) filled with a dummy object, marked live and added to the
jcoomes@700 1343 // summary data. This simplifies the copy/update phase and must be done
jcoomes@700 1344 // before the final locations of objects are determined, to prevent leaving
jcoomes@700 1345 // a fragment of dead space that is too small to fill with an object.
jcoomes@700 1346 if (!maximum_compaction && dense_prefix_end != space->bottom()) {
jcoomes@700 1347 fill_dense_prefix_end(id);
jcoomes@700 1348 }
jcoomes@700 1349
jcoomes@810 1350 // Compute the destination of each Region, and thus each object.
jcoomes@700 1351 _summary_data.summarize_dense_prefix(space->bottom(), dense_prefix_end);
jcoomes@700 1352 _summary_data.summarize(dense_prefix_end, space->end(),
jcoomes@700 1353 dense_prefix_end, space->top(),
jcoomes@700 1354 _space_info[id].new_top_addr());
duke@435 1355 }
duke@435 1356
duke@435 1357 if (TraceParallelOldGCSummaryPhase) {
jcoomes@810 1358 const size_t region_size = ParallelCompactData::RegionSize;
jcoomes@700 1359 HeapWord* const dense_prefix_end = _space_info[id].dense_prefix();
jcoomes@810 1360 const size_t dp_region = _summary_data.addr_to_region_idx(dense_prefix_end);
duke@435 1361 const size_t dp_words = pointer_delta(dense_prefix_end, space->bottom());
jcoomes@700 1362 HeapWord* const new_top = _space_info[id].new_top();
jcoomes@810 1363 const HeapWord* nt_aligned_up = _summary_data.region_align_up(new_top);
duke@435 1364 const size_t cr_words = pointer_delta(nt_aligned_up, dense_prefix_end);
duke@435 1365 tty->print_cr("id=%d cap=" SIZE_FORMAT " dp=" PTR_FORMAT " "
jcoomes@810 1366 "dp_region=" SIZE_FORMAT " " "dp_count=" SIZE_FORMAT " "
duke@435 1367 "cr_count=" SIZE_FORMAT " " "nt=" PTR_FORMAT,
duke@435 1368 id, space->capacity_in_words(), dense_prefix_end,
jcoomes@810 1369 dp_region, dp_words / region_size,
jcoomes@810 1370 cr_words / region_size, new_top);
duke@435 1371 }
duke@435 1372 }
duke@435 1373
duke@435 1374 void PSParallelCompact::summary_phase(ParCompactionManager* cm,
duke@435 1375 bool maximum_compaction)
duke@435 1376 {
duke@435 1377 EventMark m("2 summarize");
duke@435 1378 TraceTime tm("summary phase", print_phases(), true, gclog_or_tty);
duke@435 1379 // trace("2");
duke@435 1380
duke@435 1381 #ifdef ASSERT
duke@435 1382 if (TraceParallelOldGCMarkingPhase) {
duke@435 1383 tty->print_cr("add_obj_count=" SIZE_FORMAT " "
duke@435 1384 "add_obj_bytes=" SIZE_FORMAT,
duke@435 1385 add_obj_count, add_obj_size * HeapWordSize);
duke@435 1386 tty->print_cr("mark_bitmap_count=" SIZE_FORMAT " "
duke@435 1387 "mark_bitmap_bytes=" SIZE_FORMAT,
duke@435 1388 mark_bitmap_count, mark_bitmap_size * HeapWordSize);
duke@435 1389 }
duke@435 1390 #endif // #ifdef ASSERT
duke@435 1391
duke@435 1392 // Quick summarization of each space into itself, to see how much is live.
duke@435 1393 summarize_spaces_quick();
duke@435 1394
duke@435 1395 if (TraceParallelOldGCSummaryPhase) {
duke@435 1396 tty->print_cr("summary_phase: after summarizing each space to self");
duke@435 1397 Universe::print();
jcoomes@810 1398 NOT_PRODUCT(print_region_ranges());
duke@435 1399 if (Verbose) {
duke@435 1400 NOT_PRODUCT(print_initial_summary_data(_summary_data, _space_info));
duke@435 1401 }
duke@435 1402 }
duke@435 1403
duke@435 1404 // The amount of live data that will end up in old space (assuming it fits).
duke@435 1405 size_t old_space_total_live = 0;
duke@435 1406 unsigned int id;
duke@435 1407 for (id = old_space_id; id < last_space_id; ++id) {
duke@435 1408 old_space_total_live += pointer_delta(_space_info[id].new_top(),
duke@435 1409 _space_info[id].space()->bottom());
duke@435 1410 }
duke@435 1411
duke@435 1412 const MutableSpace* old_space = _space_info[old_space_id].space();
duke@435 1413 if (old_space_total_live > old_space->capacity_in_words()) {
duke@435 1414 // XXX - should also try to expand
duke@435 1415 maximum_compaction = true;
duke@435 1416 } else if (!UseParallelOldGCDensePrefix) {
duke@435 1417 maximum_compaction = true;
duke@435 1418 }
duke@435 1419
duke@435 1420 // Permanent and Old generations.
duke@435 1421 summarize_space(perm_space_id, maximum_compaction);
duke@435 1422 summarize_space(old_space_id, maximum_compaction);
duke@435 1423
duke@435 1424 // Summarize the remaining spaces (those in the young gen) into old space. If
duke@435 1425 // the live data from a space doesn't fit, the existing summarization is left
duke@435 1426 // intact, so the data is compacted down within the space itself.
duke@435 1427 HeapWord** new_top_addr = _space_info[old_space_id].new_top_addr();
duke@435 1428 HeapWord* const target_space_end = old_space->end();
duke@435 1429 for (id = eden_space_id; id < last_space_id; ++id) {
duke@435 1430 const MutableSpace* space = _space_info[id].space();
duke@435 1431 const size_t live = pointer_delta(_space_info[id].new_top(),
duke@435 1432 space->bottom());
duke@435 1433 const size_t available = pointer_delta(target_space_end, *new_top_addr);
jcoomes@701 1434 if (live > 0 && live <= available) {
duke@435 1435 // All the live data will fit.
duke@435 1436 if (TraceParallelOldGCSummaryPhase) {
duke@435 1437 tty->print_cr("summarizing %d into old_space @ " PTR_FORMAT,
duke@435 1438 id, *new_top_addr);
duke@435 1439 }
duke@435 1440 _summary_data.summarize(*new_top_addr, target_space_end,
duke@435 1441 space->bottom(), space->top(),
duke@435 1442 new_top_addr);
duke@435 1443
jcoomes@810 1444 // Clear the source_region field for each region in the space.
jcoomes@701 1445 HeapWord* const new_top = _space_info[id].new_top();
jcoomes@810 1446 HeapWord* const clear_end = _summary_data.region_align_up(new_top);
jcoomes@810 1447 RegionData* beg_region =
jcoomes@810 1448 _summary_data.addr_to_region_ptr(space->bottom());
jcoomes@810 1449 RegionData* end_region = _summary_data.addr_to_region_ptr(clear_end);
jcoomes@810 1450 while (beg_region < end_region) {
jcoomes@810 1451 beg_region->set_source_region(0);
jcoomes@810 1452 ++beg_region;
duke@435 1453 }
jcoomes@701 1454
jcoomes@701 1455 // Reset the new_top value for the space.
jcoomes@701 1456 _space_info[id].set_new_top(space->bottom());
duke@435 1457 }
duke@435 1458 }
duke@435 1459
duke@435 1460 if (TraceParallelOldGCSummaryPhase) {
duke@435 1461 tty->print_cr("summary_phase: after final summarization");
duke@435 1462 Universe::print();
jcoomes@810 1463 NOT_PRODUCT(print_region_ranges());
duke@435 1464 if (Verbose) {
duke@435 1465 NOT_PRODUCT(print_generic_summary_data(_summary_data, _space_info));
duke@435 1466 }
duke@435 1467 }
duke@435 1468 }
duke@435 1469
duke@435 1470 // This method should contain all heap-specific policy for invoking a full
duke@435 1471 // collection. invoke_no_policy() will only attempt to compact the heap; it
duke@435 1472 // will do nothing further. If we need to bail out for policy reasons, scavenge
duke@435 1473 // before full gc, or any other specialized behavior, it needs to be added here.
duke@435 1474 //
duke@435 1475 // Note that this method should only be called from the vm_thread while at a
duke@435 1476 // safepoint.
duke@435 1477 void PSParallelCompact::invoke(bool maximum_heap_compaction) {
duke@435 1478 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
duke@435 1479 assert(Thread::current() == (Thread*)VMThread::vm_thread(),
duke@435 1480 "should be in vm thread");
duke@435 1481 ParallelScavengeHeap* heap = gc_heap();
duke@435 1482 GCCause::Cause gc_cause = heap->gc_cause();
duke@435 1483 assert(!heap->is_gc_active(), "not reentrant");
duke@435 1484
duke@435 1485 PSAdaptiveSizePolicy* policy = heap->size_policy();
duke@435 1486
duke@435 1487 // Before each allocation/collection attempt, find out from the
duke@435 1488 // policy object if GCs are, on the whole, taking too long. If so,
duke@435 1489 // bail out without attempting a collection. The exceptions are
duke@435 1490 // for explicitly requested GC's.
duke@435 1491 if (!policy->gc_time_limit_exceeded() ||
duke@435 1492 GCCause::is_user_requested_gc(gc_cause) ||
duke@435 1493 GCCause::is_serviceability_requested_gc(gc_cause)) {
duke@435 1494 IsGCActiveMark mark;
duke@435 1495
duke@435 1496 if (ScavengeBeforeFullGC) {
duke@435 1497 PSScavenge::invoke_no_policy();
duke@435 1498 }
duke@435 1499
duke@435 1500 PSParallelCompact::invoke_no_policy(maximum_heap_compaction);
duke@435 1501 }
duke@435 1502 }
duke@435 1503
jcoomes@810 1504 bool ParallelCompactData::region_contains(size_t region_index, HeapWord* addr) {
jcoomes@810 1505 size_t addr_region_index = addr_to_region_idx(addr);
jcoomes@810 1506 return region_index == addr_region_index;
duke@435 1507 }
duke@435 1508
duke@435 1509 // This method contains no policy. You should probably
duke@435 1510 // be calling invoke() instead.
duke@435 1511 void PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
duke@435 1512 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
duke@435 1513 assert(ref_processor() != NULL, "Sanity");
duke@435 1514
apetrusenko@574 1515 if (GC_locker::check_active_before_gc()) {
duke@435 1516 return;
duke@435 1517 }
duke@435 1518
duke@435 1519 TimeStamp marking_start;
duke@435 1520 TimeStamp compaction_start;
duke@435 1521 TimeStamp collection_exit;
duke@435 1522
duke@435 1523 ParallelScavengeHeap* heap = gc_heap();
duke@435 1524 GCCause::Cause gc_cause = heap->gc_cause();
duke@435 1525 PSYoungGen* young_gen = heap->young_gen();
duke@435 1526 PSOldGen* old_gen = heap->old_gen();
duke@435 1527 PSPermGen* perm_gen = heap->perm_gen();
duke@435 1528 PSAdaptiveSizePolicy* size_policy = heap->size_policy();
duke@435 1529
jmasa@698 1530 if (ZapUnusedHeapArea) {
jmasa@698 1531 // Save information needed to minimize mangling
jmasa@698 1532 heap->record_gen_tops_before_GC();
jmasa@698 1533 }
jmasa@698 1534
duke@435 1535 _print_phases = PrintGCDetails && PrintParallelOldGCPhaseTimes;
duke@435 1536
duke@435 1537 // Make sure data structures are sane, make the heap parsable, and do other
duke@435 1538 // miscellaneous bookkeeping.
duke@435 1539 PreGCValues pre_gc_values;
duke@435 1540 pre_compact(&pre_gc_values);
duke@435 1541
jcoomes@645 1542 // Get the compaction manager reserved for the VM thread.
jcoomes@645 1543 ParCompactionManager* const vmthread_cm =
jcoomes@645 1544 ParCompactionManager::manager_array(gc_task_manager()->workers());
jcoomes@645 1545
duke@435 1546 // Place after pre_compact() where the number of invocations is incremented.
duke@435 1547 AdaptiveSizePolicyOutput(size_policy, heap->total_collections());
duke@435 1548
duke@435 1549 {
duke@435 1550 ResourceMark rm;
duke@435 1551 HandleMark hm;
duke@435 1552
duke@435 1553 const bool is_system_gc = gc_cause == GCCause::_java_lang_system_gc;
duke@435 1554
duke@435 1555 // This is useful for debugging but don't change the output the
duke@435 1556 // the customer sees.
duke@435 1557 const char* gc_cause_str = "Full GC";
duke@435 1558 if (is_system_gc && PrintGCDetails) {
duke@435 1559 gc_cause_str = "Full GC (System)";
duke@435 1560 }
duke@435 1561 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
duke@435 1562 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
duke@435 1563 TraceTime t1(gc_cause_str, PrintGC, !PrintGCDetails, gclog_or_tty);
duke@435 1564 TraceCollectorStats tcs(counters());
duke@435 1565 TraceMemoryManagerStats tms(true /* Full GC */);
duke@435 1566
duke@435 1567 if (TraceGen1Time) accumulated_time()->start();
duke@435 1568
duke@435 1569 // Let the size policy know we're starting
duke@435 1570 size_policy->major_collection_begin();
duke@435 1571
duke@435 1572 // When collecting the permanent generation methodOops may be moving,
duke@435 1573 // so we either have to flush all bcp data or convert it into bci.
duke@435 1574 CodeCache::gc_prologue();
duke@435 1575 Threads::gc_prologue();
duke@435 1576
duke@435 1577 NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
duke@435 1578 COMPILER2_PRESENT(DerivedPointerTable::clear());
duke@435 1579
duke@435 1580 ref_processor()->enable_discovery();
ysr@888 1581 ref_processor()->snap_policy(maximum_heap_compaction);
duke@435 1582
duke@435 1583 bool marked_for_unloading = false;
duke@435 1584
duke@435 1585 marking_start.update();
jcoomes@645 1586 marking_phase(vmthread_cm, maximum_heap_compaction);
duke@435 1587
duke@435 1588 #ifndef PRODUCT
duke@435 1589 if (TraceParallelOldGCMarkingPhase) {
duke@435 1590 gclog_or_tty->print_cr("marking_phase: cas_tries %d cas_retries %d "
duke@435 1591 "cas_by_another %d",
duke@435 1592 mark_bitmap()->cas_tries(), mark_bitmap()->cas_retries(),
duke@435 1593 mark_bitmap()->cas_by_another());
duke@435 1594 }
duke@435 1595 #endif // #ifndef PRODUCT
duke@435 1596
duke@435 1597 bool max_on_system_gc = UseMaximumCompactionOnSystemGC && is_system_gc;
jcoomes@645 1598 summary_phase(vmthread_cm, maximum_heap_compaction || max_on_system_gc);
duke@435 1599
duke@435 1600 COMPILER2_PRESENT(assert(DerivedPointerTable::is_active(), "Sanity"));
duke@435 1601 COMPILER2_PRESENT(DerivedPointerTable::set_active(false));
duke@435 1602
duke@435 1603 // adjust_roots() updates Universe::_intArrayKlassObj which is
duke@435 1604 // needed by the compaction for filling holes in the dense prefix.
duke@435 1605 adjust_roots();
duke@435 1606
duke@435 1607 compaction_start.update();
duke@435 1608 // Does the perm gen always have to be done serially because
duke@435 1609 // klasses are used in the update of an object?
jcoomes@645 1610 compact_perm(vmthread_cm);
duke@435 1611
duke@435 1612 if (UseParallelOldGCCompacting) {
duke@435 1613 compact();
duke@435 1614 } else {
jcoomes@645 1615 compact_serial(vmthread_cm);
duke@435 1616 }
duke@435 1617
duke@435 1618 // Reset the mark bitmap, summary data, and do other bookkeeping. Must be
duke@435 1619 // done before resizing.
duke@435 1620 post_compact();
duke@435 1621
duke@435 1622 // Let the size policy know we're done
duke@435 1623 size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause);
duke@435 1624
duke@435 1625 if (UseAdaptiveSizePolicy) {
duke@435 1626 if (PrintAdaptiveSizePolicy) {
duke@435 1627 gclog_or_tty->print("AdaptiveSizeStart: ");
duke@435 1628 gclog_or_tty->stamp();
duke@435 1629 gclog_or_tty->print_cr(" collection: %d ",
duke@435 1630 heap->total_collections());
duke@435 1631 if (Verbose) {
duke@435 1632 gclog_or_tty->print("old_gen_capacity: %d young_gen_capacity: %d"
duke@435 1633 " perm_gen_capacity: %d ",
duke@435 1634 old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes(),
duke@435 1635 perm_gen->capacity_in_bytes());
duke@435 1636 }
duke@435 1637 }
duke@435 1638
duke@435 1639 // Don't check if the size_policy is ready here. Let
duke@435 1640 // the size_policy check that internally.
duke@435 1641 if (UseAdaptiveGenerationSizePolicyAtMajorCollection &&
duke@435 1642 ((gc_cause != GCCause::_java_lang_system_gc) ||
duke@435 1643 UseAdaptiveSizePolicyWithSystemGC)) {
duke@435 1644 // Calculate optimal free space amounts
duke@435 1645 assert(young_gen->max_size() >
duke@435 1646 young_gen->from_space()->capacity_in_bytes() +
duke@435 1647 young_gen->to_space()->capacity_in_bytes(),
duke@435 1648 "Sizes of space in young gen are out-of-bounds");
duke@435 1649 size_t max_eden_size = young_gen->max_size() -
duke@435 1650 young_gen->from_space()->capacity_in_bytes() -
duke@435 1651 young_gen->to_space()->capacity_in_bytes();
jmasa@698 1652 size_policy->compute_generation_free_space(
jmasa@698 1653 young_gen->used_in_bytes(),
jmasa@698 1654 young_gen->eden_space()->used_in_bytes(),
jmasa@698 1655 old_gen->used_in_bytes(),
jmasa@698 1656 perm_gen->used_in_bytes(),
jmasa@698 1657 young_gen->eden_space()->capacity_in_bytes(),
jmasa@698 1658 old_gen->max_gen_size(),
jmasa@698 1659 max_eden_size,
jmasa@698 1660 true /* full gc*/,
jmasa@698 1661 gc_cause);
jmasa@698 1662
jmasa@698 1663 heap->resize_old_gen(
jmasa@698 1664 size_policy->calculated_old_free_size_in_bytes());
duke@435 1665
duke@435 1666 // Don't resize the young generation at an major collection. A
duke@435 1667 // desired young generation size may have been calculated but
duke@435 1668 // resizing the young generation complicates the code because the
duke@435 1669 // resizing of the old generation may have moved the boundary
duke@435 1670 // between the young generation and the old generation. Let the
duke@435 1671 // young generation resizing happen at the minor collections.
duke@435 1672 }
duke@435 1673 if (PrintAdaptiveSizePolicy) {
duke@435 1674 gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ",
duke@435 1675 heap->total_collections());
duke@435 1676 }
duke@435 1677 }
duke@435 1678
duke@435 1679 if (UsePerfData) {
duke@435 1680 PSGCAdaptivePolicyCounters* const counters = heap->gc_policy_counters();
duke@435 1681 counters->update_counters();
duke@435 1682 counters->update_old_capacity(old_gen->capacity_in_bytes());
duke@435 1683 counters->update_young_capacity(young_gen->capacity_in_bytes());
duke@435 1684 }
duke@435 1685
duke@435 1686 heap->resize_all_tlabs();
duke@435 1687
duke@435 1688 // We collected the perm gen, so we'll resize it here.
duke@435 1689 perm_gen->compute_new_size(pre_gc_values.perm_gen_used());
duke@435 1690
duke@435 1691 if (TraceGen1Time) accumulated_time()->stop();
duke@435 1692
duke@435 1693 if (PrintGC) {
duke@435 1694 if (PrintGCDetails) {
duke@435 1695 // No GC timestamp here. This is after GC so it would be confusing.
duke@435 1696 young_gen->print_used_change(pre_gc_values.young_gen_used());
duke@435 1697 old_gen->print_used_change(pre_gc_values.old_gen_used());
duke@435 1698 heap->print_heap_change(pre_gc_values.heap_used());
duke@435 1699 // Print perm gen last (print_heap_change() excludes the perm gen).
duke@435 1700 perm_gen->print_used_change(pre_gc_values.perm_gen_used());
duke@435 1701 } else {
duke@435 1702 heap->print_heap_change(pre_gc_values.heap_used());
duke@435 1703 }
duke@435 1704 }
duke@435 1705
duke@435 1706 // Track memory usage and detect low memory
duke@435 1707 MemoryService::track_memory_usage();
duke@435 1708 heap->update_counters();
duke@435 1709
duke@435 1710 if (PrintGCDetails) {
duke@435 1711 if (size_policy->print_gc_time_limit_would_be_exceeded()) {
duke@435 1712 if (size_policy->gc_time_limit_exceeded()) {
duke@435 1713 gclog_or_tty->print_cr(" GC time is exceeding GCTimeLimit "
duke@435 1714 "of %d%%", GCTimeLimit);
duke@435 1715 } else {
duke@435 1716 gclog_or_tty->print_cr(" GC time would exceed GCTimeLimit "
duke@435 1717 "of %d%%", GCTimeLimit);
duke@435 1718 }
duke@435 1719 }
duke@435 1720 size_policy->set_print_gc_time_limit_would_be_exceeded(false);
duke@435 1721 }
duke@435 1722 }
duke@435 1723
duke@435 1724 if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
duke@435 1725 HandleMark hm; // Discard invalid handles created during verification
duke@435 1726 gclog_or_tty->print(" VerifyAfterGC:");
duke@435 1727 Universe::verify(false);
duke@435 1728 }
duke@435 1729
duke@435 1730 // Re-verify object start arrays
duke@435 1731 if (VerifyObjectStartArray &&
duke@435 1732 VerifyAfterGC) {
duke@435 1733 old_gen->verify_object_start_array();
duke@435 1734 perm_gen->verify_object_start_array();
duke@435 1735 }
duke@435 1736
jmasa@698 1737 if (ZapUnusedHeapArea) {
jmasa@698 1738 old_gen->object_space()->check_mangled_unused_area_complete();
jmasa@698 1739 perm_gen->object_space()->check_mangled_unused_area_complete();
jmasa@698 1740 }
jmasa@698 1741
duke@435 1742 NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
duke@435 1743
duke@435 1744 collection_exit.update();
duke@435 1745
duke@435 1746 if (PrintHeapAtGC) {
duke@435 1747 Universe::print_heap_after_gc();
duke@435 1748 }
duke@435 1749 if (PrintGCTaskTimeStamps) {
duke@435 1750 gclog_or_tty->print_cr("VM-Thread " INT64_FORMAT " " INT64_FORMAT " "
duke@435 1751 INT64_FORMAT,
duke@435 1752 marking_start.ticks(), compaction_start.ticks(),
duke@435 1753 collection_exit.ticks());
duke@435 1754 gc_task_manager()->print_task_time_stamps();
duke@435 1755 }
duke@435 1756 }
duke@435 1757
duke@435 1758 bool PSParallelCompact::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
duke@435 1759 PSYoungGen* young_gen,
duke@435 1760 PSOldGen* old_gen) {
duke@435 1761 MutableSpace* const eden_space = young_gen->eden_space();
duke@435 1762 assert(!eden_space->is_empty(), "eden must be non-empty");
duke@435 1763 assert(young_gen->virtual_space()->alignment() ==
duke@435 1764 old_gen->virtual_space()->alignment(), "alignments do not match");
duke@435 1765
duke@435 1766 if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary)) {
duke@435 1767 return false;
duke@435 1768 }
duke@435 1769
duke@435 1770 // Both generations must be completely committed.
duke@435 1771 if (young_gen->virtual_space()->uncommitted_size() != 0) {
duke@435 1772 return false;
duke@435 1773 }
duke@435 1774 if (old_gen->virtual_space()->uncommitted_size() != 0) {
duke@435 1775 return false;
duke@435 1776 }
duke@435 1777
duke@435 1778 // Figure out how much to take from eden. Include the average amount promoted
duke@435 1779 // in the total; otherwise the next young gen GC will simply bail out to a
duke@435 1780 // full GC.
duke@435 1781 const size_t alignment = old_gen->virtual_space()->alignment();
duke@435 1782 const size_t eden_used = eden_space->used_in_bytes();
duke@435 1783 const size_t promoted = (size_t)size_policy->avg_promoted()->padded_average();
duke@435 1784 const size_t absorb_size = align_size_up(eden_used + promoted, alignment);
duke@435 1785 const size_t eden_capacity = eden_space->capacity_in_bytes();
duke@435 1786
duke@435 1787 if (absorb_size >= eden_capacity) {
duke@435 1788 return false; // Must leave some space in eden.
duke@435 1789 }
duke@435 1790
duke@435 1791 const size_t new_young_size = young_gen->capacity_in_bytes() - absorb_size;
duke@435 1792 if (new_young_size < young_gen->min_gen_size()) {
duke@435 1793 return false; // Respect young gen minimum size.
duke@435 1794 }
duke@435 1795
duke@435 1796 if (TraceAdaptiveGCBoundary && Verbose) {
duke@435 1797 gclog_or_tty->print(" absorbing " SIZE_FORMAT "K: "
duke@435 1798 "eden " SIZE_FORMAT "K->" SIZE_FORMAT "K "
duke@435 1799 "from " SIZE_FORMAT "K, to " SIZE_FORMAT "K "
duke@435 1800 "young_gen " SIZE_FORMAT "K->" SIZE_FORMAT "K ",
duke@435 1801 absorb_size / K,
duke@435 1802 eden_capacity / K, (eden_capacity - absorb_size) / K,
duke@435 1803 young_gen->from_space()->used_in_bytes() / K,
duke@435 1804 young_gen->to_space()->used_in_bytes() / K,
duke@435 1805 young_gen->capacity_in_bytes() / K, new_young_size / K);
duke@435 1806 }
duke@435 1807
duke@435 1808 // Fill the unused part of the old gen.
duke@435 1809 MutableSpace* const old_space = old_gen->object_space();
duke@435 1810 MemRegion old_gen_unused(old_space->top(), old_space->end());
duke@435 1811 if (!old_gen_unused.is_empty()) {
duke@435 1812 SharedHeap::fill_region_with_object(old_gen_unused);
duke@435 1813 }
duke@435 1814
duke@435 1815 // Take the live data from eden and set both top and end in the old gen to
duke@435 1816 // eden top. (Need to set end because reset_after_change() mangles the region
duke@435 1817 // from end to virtual_space->high() in debug builds).
duke@435 1818 HeapWord* const new_top = eden_space->top();
duke@435 1819 old_gen->virtual_space()->expand_into(young_gen->virtual_space(),
duke@435 1820 absorb_size);
duke@435 1821 young_gen->reset_after_change();
duke@435 1822 old_space->set_top(new_top);
duke@435 1823 old_space->set_end(new_top);
duke@435 1824 old_gen->reset_after_change();
duke@435 1825
duke@435 1826 // Update the object start array for the filler object and the data from eden.
duke@435 1827 ObjectStartArray* const start_array = old_gen->start_array();
duke@435 1828 HeapWord* const start = old_gen_unused.start();
duke@435 1829 for (HeapWord* addr = start; addr < new_top; addr += oop(addr)->size()) {
duke@435 1830 start_array->allocate_block(addr);
duke@435 1831 }
duke@435 1832
duke@435 1833 // Could update the promoted average here, but it is not typically updated at
duke@435 1834 // full GCs and the value to use is unclear. Something like
duke@435 1835 //
duke@435 1836 // cur_promoted_avg + absorb_size / number_of_scavenges_since_last_full_gc.
duke@435 1837
duke@435 1838 size_policy->set_bytes_absorbed_from_eden(absorb_size);
duke@435 1839 return true;
duke@435 1840 }
duke@435 1841
duke@435 1842 GCTaskManager* const PSParallelCompact::gc_task_manager() {
duke@435 1843 assert(ParallelScavengeHeap::gc_task_manager() != NULL,
duke@435 1844 "shouldn't return NULL");
duke@435 1845 return ParallelScavengeHeap::gc_task_manager();
duke@435 1846 }
duke@435 1847
duke@435 1848 void PSParallelCompact::marking_phase(ParCompactionManager* cm,
duke@435 1849 bool maximum_heap_compaction) {
duke@435 1850 // Recursively traverse all live objects and mark them
duke@435 1851 EventMark m("1 mark object");
duke@435 1852 TraceTime tm("marking phase", print_phases(), true, gclog_or_tty);
duke@435 1853
duke@435 1854 ParallelScavengeHeap* heap = gc_heap();
duke@435 1855 uint parallel_gc_threads = heap->gc_task_manager()->workers();
jcoomes@810 1856 TaskQueueSetSuper* qset = ParCompactionManager::region_array();
duke@435 1857 ParallelTaskTerminator terminator(parallel_gc_threads, qset);
duke@435 1858
duke@435 1859 PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
duke@435 1860 PSParallelCompact::FollowStackClosure follow_stack_closure(cm);
duke@435 1861
duke@435 1862 {
duke@435 1863 TraceTime tm_m("par mark", print_phases(), true, gclog_or_tty);
duke@435 1864
duke@435 1865 GCTaskQueue* q = GCTaskQueue::create();
duke@435 1866
duke@435 1867 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::universe));
duke@435 1868 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::jni_handles));
duke@435 1869 // We scan the thread roots in parallel
duke@435 1870 Threads::create_thread_roots_marking_tasks(q);
duke@435 1871 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::object_synchronizer));
duke@435 1872 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::flat_profiler));
duke@435 1873 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::management));
duke@435 1874 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::system_dictionary));
duke@435 1875 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::jvmti));
duke@435 1876 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::vm_symbols));
duke@435 1877
duke@435 1878 if (parallel_gc_threads > 1) {
duke@435 1879 for (uint j = 0; j < parallel_gc_threads; j++) {
duke@435 1880 q->enqueue(new StealMarkingTask(&terminator));
duke@435 1881 }
duke@435 1882 }
duke@435 1883
duke@435 1884 WaitForBarrierGCTask* fin = WaitForBarrierGCTask::create();
duke@435 1885 q->enqueue(fin);
duke@435 1886
duke@435 1887 gc_task_manager()->add_list(q);
duke@435 1888
duke@435 1889 fin->wait_for();
duke@435 1890
duke@435 1891 // We have to release the barrier tasks!
duke@435 1892 WaitForBarrierGCTask::destroy(fin);
duke@435 1893 }
duke@435 1894
duke@435 1895 // Process reference objects found during marking
duke@435 1896 {
duke@435 1897 TraceTime tm_r("reference processing", print_phases(), true, gclog_or_tty);
duke@435 1898 if (ref_processor()->processing_is_mt()) {
duke@435 1899 RefProcTaskExecutor task_executor;
duke@435 1900 ref_processor()->process_discovered_references(
ysr@888 1901 is_alive_closure(), &mark_and_push_closure, &follow_stack_closure,
ysr@888 1902 &task_executor);
duke@435 1903 } else {
duke@435 1904 ref_processor()->process_discovered_references(
ysr@888 1905 is_alive_closure(), &mark_and_push_closure, &follow_stack_closure, NULL);
duke@435 1906 }
duke@435 1907 }
duke@435 1908
duke@435 1909 TraceTime tm_c("class unloading", print_phases(), true, gclog_or_tty);
duke@435 1910 // Follow system dictionary roots and unload classes.
duke@435 1911 bool purged_class = SystemDictionary::do_unloading(is_alive_closure());
duke@435 1912
duke@435 1913 // Follow code cache roots.
duke@435 1914 CodeCache::do_unloading(is_alive_closure(), &mark_and_push_closure,
duke@435 1915 purged_class);
duke@435 1916 follow_stack(cm); // Flush marking stack.
duke@435 1917
duke@435 1918 // Update subklass/sibling/implementor links of live klasses
duke@435 1919 // revisit_klass_stack is used in follow_weak_klass_links().
duke@435 1920 follow_weak_klass_links(cm);
duke@435 1921
duke@435 1922 // Visit symbol and interned string tables and delete unmarked oops
duke@435 1923 SymbolTable::unlink(is_alive_closure());
duke@435 1924 StringTable::unlink(is_alive_closure());
duke@435 1925
duke@435 1926 assert(cm->marking_stack()->size() == 0, "stack should be empty by now");
duke@435 1927 assert(cm->overflow_stack()->is_empty(), "stack should be empty by now");
duke@435 1928 }
duke@435 1929
duke@435 1930 // This should be moved to the shared markSweep code!
duke@435 1931 class PSAlwaysTrueClosure: public BoolObjectClosure {
duke@435 1932 public:
duke@435 1933 void do_object(oop p) { ShouldNotReachHere(); }
duke@435 1934 bool do_object_b(oop p) { return true; }
duke@435 1935 };
duke@435 1936 static PSAlwaysTrueClosure always_true;
duke@435 1937
duke@435 1938 void PSParallelCompact::adjust_roots() {
duke@435 1939 // Adjust the pointers to reflect the new locations
duke@435 1940 EventMark m("3 adjust roots");
duke@435 1941 TraceTime tm("adjust roots", print_phases(), true, gclog_or_tty);
duke@435 1942
duke@435 1943 // General strong roots.
duke@435 1944 Universe::oops_do(adjust_root_pointer_closure());
duke@435 1945 ReferenceProcessor::oops_do(adjust_root_pointer_closure());
duke@435 1946 JNIHandles::oops_do(adjust_root_pointer_closure()); // Global (strong) JNI handles
duke@435 1947 Threads::oops_do(adjust_root_pointer_closure());
duke@435 1948 ObjectSynchronizer::oops_do(adjust_root_pointer_closure());
duke@435 1949 FlatProfiler::oops_do(adjust_root_pointer_closure());
duke@435 1950 Management::oops_do(adjust_root_pointer_closure());
duke@435 1951 JvmtiExport::oops_do(adjust_root_pointer_closure());
duke@435 1952 // SO_AllClasses
duke@435 1953 SystemDictionary::oops_do(adjust_root_pointer_closure());
duke@435 1954 vmSymbols::oops_do(adjust_root_pointer_closure());
duke@435 1955
duke@435 1956 // Now adjust pointers in remaining weak roots. (All of which should
duke@435 1957 // have been cleared if they pointed to non-surviving objects.)
duke@435 1958 // Global (weak) JNI handles
duke@435 1959 JNIHandles::weak_oops_do(&always_true, adjust_root_pointer_closure());
duke@435 1960
duke@435 1961 CodeCache::oops_do(adjust_pointer_closure());
duke@435 1962 SymbolTable::oops_do(adjust_root_pointer_closure());
duke@435 1963 StringTable::oops_do(adjust_root_pointer_closure());
duke@435 1964 ref_processor()->weak_oops_do(adjust_root_pointer_closure());
duke@435 1965 // Roots were visited so references into the young gen in roots
duke@435 1966 // may have been scanned. Process them also.
duke@435 1967 // Should the reference processor have a span that excludes
duke@435 1968 // young gen objects?
duke@435 1969 PSScavenge::reference_processor()->weak_oops_do(
duke@435 1970 adjust_root_pointer_closure());
duke@435 1971 }
duke@435 1972
duke@435 1973 void PSParallelCompact::compact_perm(ParCompactionManager* cm) {
duke@435 1974 EventMark m("4 compact perm");
duke@435 1975 TraceTime tm("compact perm gen", print_phases(), true, gclog_or_tty);
duke@435 1976 // trace("4");
duke@435 1977
duke@435 1978 gc_heap()->perm_gen()->start_array()->reset();
duke@435 1979 move_and_update(cm, perm_space_id);
duke@435 1980 }
duke@435 1981
jcoomes@810 1982 void PSParallelCompact::enqueue_region_draining_tasks(GCTaskQueue* q,
jcoomes@810 1983 uint parallel_gc_threads)
jcoomes@810 1984 {
duke@435 1985 TraceTime tm("drain task setup", print_phases(), true, gclog_or_tty);
duke@435 1986
duke@435 1987 const unsigned int task_count = MAX2(parallel_gc_threads, 1U);
duke@435 1988 for (unsigned int j = 0; j < task_count; j++) {
duke@435 1989 q->enqueue(new DrainStacksCompactionTask());
duke@435 1990 }
duke@435 1991
jcoomes@810 1992 // Find all regions that are available (can be filled immediately) and
duke@435 1993 // distribute them to the thread stacks. The iteration is done in reverse
jcoomes@810 1994 // order (high to low) so the regions will be removed in ascending order.
duke@435 1995
duke@435 1996 const ParallelCompactData& sd = PSParallelCompact::summary_data();
duke@435 1997
jcoomes@810 1998 size_t fillable_regions = 0; // A count for diagnostic purposes.
duke@435 1999 unsigned int which = 0; // The worker thread number.
duke@435 2000
duke@435 2001 for (unsigned int id = to_space_id; id > perm_space_id; --id) {
duke@435 2002 SpaceInfo* const space_info = _space_info + id;
duke@435 2003 MutableSpace* const space = space_info->space();
duke@435 2004 HeapWord* const new_top = space_info->new_top();
duke@435 2005
jcoomes@810 2006 const size_t beg_region = sd.addr_to_region_idx(space_info->dense_prefix());
jcoomes@810 2007 const size_t end_region =
jcoomes@810 2008 sd.addr_to_region_idx(sd.region_align_up(new_top));
jcoomes@810 2009 assert(end_region > 0, "perm gen cannot be empty");
jcoomes@810 2010
jcoomes@810 2011 for (size_t cur = end_region - 1; cur >= beg_region; --cur) {
jcoomes@810 2012 if (sd.region(cur)->claim_unsafe()) {
duke@435 2013 ParCompactionManager* cm = ParCompactionManager::manager_array(which);
duke@435 2014 cm->save_for_processing(cur);
duke@435 2015
duke@435 2016 if (TraceParallelOldGCCompactionPhase && Verbose) {
jcoomes@810 2017 const size_t count_mod_8 = fillable_regions & 7;
duke@435 2018 if (count_mod_8 == 0) gclog_or_tty->print("fillable: ");
jcoomes@699 2019 gclog_or_tty->print(" " SIZE_FORMAT_W(7), cur);
duke@435 2020 if (count_mod_8 == 7) gclog_or_tty->cr();
duke@435 2021 }
duke@435 2022
jcoomes@810 2023 NOT_PRODUCT(++fillable_regions;)
jcoomes@810 2024
jcoomes@810 2025 // Assign regions to threads in round-robin fashion.
duke@435 2026 if (++which == task_count) {
duke@435 2027 which = 0;
duke@435 2028 }
duke@435 2029 }
duke@435 2030 }
duke@435 2031 }
duke@435 2032
duke@435 2033 if (TraceParallelOldGCCompactionPhase) {
jcoomes@810 2034 if (Verbose && (fillable_regions & 7) != 0) gclog_or_tty->cr();
jcoomes@810 2035 gclog_or_tty->print_cr("%u initially fillable regions", fillable_regions);
duke@435 2036 }
duke@435 2037 }
duke@435 2038
duke@435 2039 #define PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING 4
duke@435 2040
duke@435 2041 void PSParallelCompact::enqueue_dense_prefix_tasks(GCTaskQueue* q,
duke@435 2042 uint parallel_gc_threads) {
duke@435 2043 TraceTime tm("dense prefix task setup", print_phases(), true, gclog_or_tty);
duke@435 2044
duke@435 2045 ParallelCompactData& sd = PSParallelCompact::summary_data();
duke@435 2046
duke@435 2047 // Iterate over all the spaces adding tasks for updating
jcoomes@810 2048 // regions in the dense prefix. Assume that 1 gc thread
duke@435 2049 // will work on opening the gaps and the remaining gc threads
duke@435 2050 // will work on the dense prefix.
duke@435 2051 SpaceId space_id = old_space_id;
duke@435 2052 while (space_id != last_space_id) {
duke@435 2053 HeapWord* const dense_prefix_end = _space_info[space_id].dense_prefix();
duke@435 2054 const MutableSpace* const space = _space_info[space_id].space();
duke@435 2055
duke@435 2056 if (dense_prefix_end == space->bottom()) {
duke@435 2057 // There is no dense prefix for this space.
duke@435 2058 space_id = next_compaction_space_id(space_id);
duke@435 2059 continue;
duke@435 2060 }
duke@435 2061
jcoomes@810 2062 // The dense prefix is before this region.
jcoomes@810 2063 size_t region_index_end_dense_prefix =
jcoomes@810 2064 sd.addr_to_region_idx(dense_prefix_end);
jcoomes@810 2065 RegionData* const dense_prefix_cp =
jcoomes@810 2066 sd.region(region_index_end_dense_prefix);
duke@435 2067 assert(dense_prefix_end == space->end() ||
duke@435 2068 dense_prefix_cp->available() ||
duke@435 2069 dense_prefix_cp->claimed(),
jcoomes@810 2070 "The region after the dense prefix should always be ready to fill");
jcoomes@810 2071
jcoomes@810 2072 size_t region_index_start = sd.addr_to_region_idx(space->bottom());
duke@435 2073
duke@435 2074 // Is there dense prefix work?
jcoomes@810 2075 size_t total_dense_prefix_regions =
jcoomes@810 2076 region_index_end_dense_prefix - region_index_start;
jcoomes@810 2077 // How many regions of the dense prefix should be given to
duke@435 2078 // each thread?
jcoomes@810 2079 if (total_dense_prefix_regions > 0) {
duke@435 2080 uint tasks_for_dense_prefix = 1;
duke@435 2081 if (UseParallelDensePrefixUpdate) {
jcoomes@810 2082 if (total_dense_prefix_regions <=
duke@435 2083 (parallel_gc_threads * PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING)) {
duke@435 2084 // Don't over partition. This assumes that
duke@435 2085 // PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING is a small integer value
jcoomes@810 2086 // so there are not many regions to process.
duke@435 2087 tasks_for_dense_prefix = parallel_gc_threads;
duke@435 2088 } else {
duke@435 2089 // Over partition
duke@435 2090 tasks_for_dense_prefix = parallel_gc_threads *
duke@435 2091 PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING;
duke@435 2092 }
duke@435 2093 }
jcoomes@810 2094 size_t regions_per_thread = total_dense_prefix_regions /
duke@435 2095 tasks_for_dense_prefix;
jcoomes@810 2096 // Give each thread at least 1 region.
jcoomes@810 2097 if (regions_per_thread == 0) {
jcoomes@810 2098 regions_per_thread = 1;
duke@435 2099 }
duke@435 2100
duke@435 2101 for (uint k = 0; k < tasks_for_dense_prefix; k++) {
jcoomes@810 2102 if (region_index_start >= region_index_end_dense_prefix) {
duke@435 2103 break;
duke@435 2104 }
jcoomes@810 2105 // region_index_end is not processed
jcoomes@810 2106 size_t region_index_end = MIN2(region_index_start + regions_per_thread,
jcoomes@810 2107 region_index_end_dense_prefix);
duke@435 2108 q->enqueue(new UpdateDensePrefixTask(
duke@435 2109 space_id,
jcoomes@810 2110 region_index_start,
jcoomes@810 2111 region_index_end));
jcoomes@810 2112 region_index_start = region_index_end;
duke@435 2113 }
duke@435 2114 }
duke@435 2115 // This gets any part of the dense prefix that did not
duke@435 2116 // fit evenly.
jcoomes@810 2117 if (region_index_start < region_index_end_dense_prefix) {
duke@435 2118 q->enqueue(new UpdateDensePrefixTask(
duke@435 2119 space_id,
jcoomes@810 2120 region_index_start,
jcoomes@810 2121 region_index_end_dense_prefix));
duke@435 2122 }
duke@435 2123 space_id = next_compaction_space_id(space_id);
duke@435 2124 } // End tasks for dense prefix
duke@435 2125 }
duke@435 2126
jcoomes@810 2127 void PSParallelCompact::enqueue_region_stealing_tasks(
duke@435 2128 GCTaskQueue* q,
duke@435 2129 ParallelTaskTerminator* terminator_ptr,
duke@435 2130 uint parallel_gc_threads) {
duke@435 2131 TraceTime tm("steal task setup", print_phases(), true, gclog_or_tty);
duke@435 2132
jcoomes@810 2133 // Once a thread has drained it's stack, it should try to steal regions from
duke@435 2134 // other threads.
duke@435 2135 if (parallel_gc_threads > 1) {
duke@435 2136 for (uint j = 0; j < parallel_gc_threads; j++) {
jcoomes@810 2137 q->enqueue(new StealRegionCompactionTask(terminator_ptr));
duke@435 2138 }
duke@435 2139 }
duke@435 2140 }
duke@435 2141
duke@435 2142 void PSParallelCompact::compact() {
duke@435 2143 EventMark m("5 compact");
duke@435 2144 // trace("5");
duke@435 2145 TraceTime tm("compaction phase", print_phases(), true, gclog_or_tty);
duke@435 2146
duke@435 2147 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
duke@435 2148 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
duke@435 2149 PSOldGen* old_gen = heap->old_gen();
duke@435 2150 old_gen->start_array()->reset();
duke@435 2151 uint parallel_gc_threads = heap->gc_task_manager()->workers();
jcoomes@810 2152 TaskQueueSetSuper* qset = ParCompactionManager::region_array();
duke@435 2153 ParallelTaskTerminator terminator(parallel_gc_threads, qset);
duke@435 2154
duke@435 2155 GCTaskQueue* q = GCTaskQueue::create();
jcoomes@810 2156 enqueue_region_draining_tasks(q, parallel_gc_threads);
duke@435 2157 enqueue_dense_prefix_tasks(q, parallel_gc_threads);
jcoomes@810 2158 enqueue_region_stealing_tasks(q, &terminator, parallel_gc_threads);
duke@435 2159
duke@435 2160 {
duke@435 2161 TraceTime tm_pc("par compact", print_phases(), true, gclog_or_tty);
duke@435 2162
duke@435 2163 WaitForBarrierGCTask* fin = WaitForBarrierGCTask::create();
duke@435 2164 q->enqueue(fin);
duke@435 2165
duke@435 2166 gc_task_manager()->add_list(q);
duke@435 2167
duke@435 2168 fin->wait_for();
duke@435 2169
duke@435 2170 // We have to release the barrier tasks!
duke@435 2171 WaitForBarrierGCTask::destroy(fin);
duke@435 2172
duke@435 2173 #ifdef ASSERT
jcoomes@810 2174 // Verify that all regions have been processed before the deferred updates.
duke@435 2175 // Note that perm_space_id is skipped; this type of verification is not
jcoomes@810 2176 // valid until the perm gen is compacted by regions.
duke@435 2177 for (unsigned int id = old_space_id; id < last_space_id; ++id) {
duke@435 2178 verify_complete(SpaceId(id));
duke@435 2179 }
duke@435 2180 #endif
duke@435 2181 }
duke@435 2182
duke@435 2183 {
duke@435 2184 // Update the deferred objects, if any. Any compaction manager can be used.
duke@435 2185 TraceTime tm_du("deferred updates", print_phases(), true, gclog_or_tty);
duke@435 2186 ParCompactionManager* cm = ParCompactionManager::manager_array(0);
duke@435 2187 for (unsigned int id = old_space_id; id < last_space_id; ++id) {
duke@435 2188 update_deferred_objects(cm, SpaceId(id));
duke@435 2189 }
duke@435 2190 }
duke@435 2191 }
duke@435 2192
duke@435 2193 #ifdef ASSERT
duke@435 2194 void PSParallelCompact::verify_complete(SpaceId space_id) {
jcoomes@810 2195 // All Regions between space bottom() to new_top() should be marked as filled
jcoomes@810 2196 // and all Regions between new_top() and top() should be available (i.e.,
duke@435 2197 // should have been emptied).
duke@435 2198 ParallelCompactData& sd = summary_data();
duke@435 2199 SpaceInfo si = _space_info[space_id];
jcoomes@810 2200 HeapWord* new_top_addr = sd.region_align_up(si.new_top());
jcoomes@810 2201 HeapWord* old_top_addr = sd.region_align_up(si.space()->top());
jcoomes@810 2202 const size_t beg_region = sd.addr_to_region_idx(si.space()->bottom());
jcoomes@810 2203 const size_t new_top_region = sd.addr_to_region_idx(new_top_addr);
jcoomes@810 2204 const size_t old_top_region = sd.addr_to_region_idx(old_top_addr);
duke@435 2205
duke@435 2206 bool issued_a_warning = false;
duke@435 2207
jcoomes@810 2208 size_t cur_region;
jcoomes@810 2209 for (cur_region = beg_region; cur_region < new_top_region; ++cur_region) {
jcoomes@810 2210 const RegionData* const c = sd.region(cur_region);
duke@435 2211 if (!c->completed()) {
jcoomes@810 2212 warning("region " SIZE_FORMAT " not filled: "
duke@435 2213 "destination_count=" SIZE_FORMAT,
jcoomes@810 2214 cur_region, c->destination_count());
duke@435 2215 issued_a_warning = true;
duke@435 2216 }
duke@435 2217 }
duke@435 2218
jcoomes@810 2219 for (cur_region = new_top_region; cur_region < old_top_region; ++cur_region) {
jcoomes@810 2220 const RegionData* const c = sd.region(cur_region);
duke@435 2221 if (!c->available()) {
jcoomes@810 2222 warning("region " SIZE_FORMAT " not empty: "
duke@435 2223 "destination_count=" SIZE_FORMAT,
jcoomes@810 2224 cur_region, c->destination_count());
duke@435 2225 issued_a_warning = true;
duke@435 2226 }
duke@435 2227 }
duke@435 2228
duke@435 2229 if (issued_a_warning) {
jcoomes@810 2230 print_region_ranges();
duke@435 2231 }
duke@435 2232 }
duke@435 2233 #endif // #ifdef ASSERT
duke@435 2234
duke@435 2235 void PSParallelCompact::compact_serial(ParCompactionManager* cm) {
duke@435 2236 EventMark m("5 compact serial");
duke@435 2237 TraceTime tm("compact serial", print_phases(), true, gclog_or_tty);
duke@435 2238
duke@435 2239 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
duke@435 2240 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
duke@435 2241
duke@435 2242 PSYoungGen* young_gen = heap->young_gen();
duke@435 2243 PSOldGen* old_gen = heap->old_gen();
duke@435 2244
duke@435 2245 old_gen->start_array()->reset();
duke@435 2246 old_gen->move_and_update(cm);
duke@435 2247 young_gen->move_and_update(cm);
duke@435 2248 }
duke@435 2249
duke@435 2250
duke@435 2251 void PSParallelCompact::follow_stack(ParCompactionManager* cm) {
duke@435 2252 while(!cm->overflow_stack()->is_empty()) {
duke@435 2253 oop obj = cm->overflow_stack()->pop();
duke@435 2254 obj->follow_contents(cm);
duke@435 2255 }
duke@435 2256
duke@435 2257 oop obj;
duke@435 2258 // obj is a reference!!!
duke@435 2259 while (cm->marking_stack()->pop_local(obj)) {
duke@435 2260 // It would be nice to assert about the type of objects we might
duke@435 2261 // pop, but they can come from anywhere, unfortunately.
duke@435 2262 obj->follow_contents(cm);
duke@435 2263 }
duke@435 2264 }
duke@435 2265
duke@435 2266 void
duke@435 2267 PSParallelCompact::follow_weak_klass_links(ParCompactionManager* serial_cm) {
duke@435 2268 // All klasses on the revisit stack are marked at this point.
duke@435 2269 // Update and follow all subklass, sibling and implementor links.
duke@435 2270 for (uint i = 0; i < ParallelGCThreads+1; i++) {
duke@435 2271 ParCompactionManager* cm = ParCompactionManager::manager_array(i);
duke@435 2272 KeepAliveClosure keep_alive_closure(cm);
duke@435 2273 for (int i = 0; i < cm->revisit_klass_stack()->length(); i++) {
duke@435 2274 cm->revisit_klass_stack()->at(i)->follow_weak_klass_links(
duke@435 2275 is_alive_closure(),
duke@435 2276 &keep_alive_closure);
duke@435 2277 }
duke@435 2278 follow_stack(cm);
duke@435 2279 }
duke@435 2280 }
duke@435 2281
duke@435 2282 void
duke@435 2283 PSParallelCompact::revisit_weak_klass_link(ParCompactionManager* cm, Klass* k) {
duke@435 2284 cm->revisit_klass_stack()->push(k);
duke@435 2285 }
duke@435 2286
duke@435 2287 #ifdef VALIDATE_MARK_SWEEP
duke@435 2288
coleenp@548 2289 void PSParallelCompact::track_adjusted_pointer(void* p, bool isroot) {
duke@435 2290 if (!ValidateMarkSweep)
duke@435 2291 return;
duke@435 2292
duke@435 2293 if (!isroot) {
duke@435 2294 if (_pointer_tracking) {
duke@435 2295 guarantee(_adjusted_pointers->contains(p), "should have seen this pointer");
duke@435 2296 _adjusted_pointers->remove(p);
duke@435 2297 }
duke@435 2298 } else {
duke@435 2299 ptrdiff_t index = _root_refs_stack->find(p);
duke@435 2300 if (index != -1) {
duke@435 2301 int l = _root_refs_stack->length();
duke@435 2302 if (l > 0 && l - 1 != index) {
coleenp@548 2303 void* last = _root_refs_stack->pop();
duke@435 2304 assert(last != p, "should be different");
duke@435 2305 _root_refs_stack->at_put(index, last);
duke@435 2306 } else {
duke@435 2307 _root_refs_stack->remove(p);
duke@435 2308 }
duke@435 2309 }
duke@435 2310 }
duke@435 2311 }
duke@435 2312
duke@435 2313
coleenp@548 2314 void PSParallelCompact::check_adjust_pointer(void* p) {
duke@435 2315 _adjusted_pointers->push(p);
duke@435 2316 }
duke@435 2317
duke@435 2318
duke@435 2319 class AdjusterTracker: public OopClosure {
duke@435 2320 public:
duke@435 2321 AdjusterTracker() {};
coleenp@548 2322 void do_oop(oop* o) { PSParallelCompact::check_adjust_pointer(o); }
coleenp@548 2323 void do_oop(narrowOop* o) { PSParallelCompact::check_adjust_pointer(o); }
duke@435 2324 };
duke@435 2325
duke@435 2326
duke@435 2327 void PSParallelCompact::track_interior_pointers(oop obj) {
duke@435 2328 if (ValidateMarkSweep) {
duke@435 2329 _adjusted_pointers->clear();
duke@435 2330 _pointer_tracking = true;
duke@435 2331
duke@435 2332 AdjusterTracker checker;
duke@435 2333 obj->oop_iterate(&checker);
duke@435 2334 }
duke@435 2335 }
duke@435 2336
duke@435 2337
duke@435 2338 void PSParallelCompact::check_interior_pointers() {
duke@435 2339 if (ValidateMarkSweep) {
duke@435 2340 _pointer_tracking = false;
duke@435 2341 guarantee(_adjusted_pointers->length() == 0, "should have processed the same pointers");
duke@435 2342 }
duke@435 2343 }
duke@435 2344
duke@435 2345
duke@435 2346 void PSParallelCompact::reset_live_oop_tracking(bool at_perm) {
duke@435 2347 if (ValidateMarkSweep) {
duke@435 2348 guarantee((size_t)_live_oops->length() == _live_oops_index, "should be at end of live oops");
duke@435 2349 _live_oops_index = at_perm ? _live_oops_index_at_perm : 0;
duke@435 2350 }
duke@435 2351 }
duke@435 2352
duke@435 2353
duke@435 2354 void PSParallelCompact::register_live_oop(oop p, size_t size) {
duke@435 2355 if (ValidateMarkSweep) {
duke@435 2356 _live_oops->push(p);
duke@435 2357 _live_oops_size->push(size);
duke@435 2358 _live_oops_index++;
duke@435 2359 }
duke@435 2360 }
duke@435 2361
duke@435 2362 void PSParallelCompact::validate_live_oop(oop p, size_t size) {
duke@435 2363 if (ValidateMarkSweep) {
duke@435 2364 oop obj = _live_oops->at((int)_live_oops_index);
duke@435 2365 guarantee(obj == p, "should be the same object");
duke@435 2366 guarantee(_live_oops_size->at((int)_live_oops_index) == size, "should be the same size");
duke@435 2367 _live_oops_index++;
duke@435 2368 }
duke@435 2369 }
duke@435 2370
duke@435 2371 void PSParallelCompact::live_oop_moved_to(HeapWord* q, size_t size,
duke@435 2372 HeapWord* compaction_top) {
duke@435 2373 assert(oop(q)->forwardee() == NULL || oop(q)->forwardee() == oop(compaction_top),
duke@435 2374 "should be moved to forwarded location");
duke@435 2375 if (ValidateMarkSweep) {
duke@435 2376 PSParallelCompact::validate_live_oop(oop(q), size);
duke@435 2377 _live_oops_moved_to->push(oop(compaction_top));
duke@435 2378 }
duke@435 2379 if (RecordMarkSweepCompaction) {
duke@435 2380 _cur_gc_live_oops->push(q);
duke@435 2381 _cur_gc_live_oops_moved_to->push(compaction_top);
duke@435 2382 _cur_gc_live_oops_size->push(size);
duke@435 2383 }
duke@435 2384 }
duke@435 2385
duke@435 2386
duke@435 2387 void PSParallelCompact::compaction_complete() {
duke@435 2388 if (RecordMarkSweepCompaction) {
duke@435 2389 GrowableArray<HeapWord*>* _tmp_live_oops = _cur_gc_live_oops;
duke@435 2390 GrowableArray<HeapWord*>* _tmp_live_oops_moved_to = _cur_gc_live_oops_moved_to;
duke@435 2391 GrowableArray<size_t> * _tmp_live_oops_size = _cur_gc_live_oops_size;
duke@435 2392
duke@435 2393 _cur_gc_live_oops = _last_gc_live_oops;
duke@435 2394 _cur_gc_live_oops_moved_to = _last_gc_live_oops_moved_to;
duke@435 2395 _cur_gc_live_oops_size = _last_gc_live_oops_size;
duke@435 2396 _last_gc_live_oops = _tmp_live_oops;
duke@435 2397 _last_gc_live_oops_moved_to = _tmp_live_oops_moved_to;
duke@435 2398 _last_gc_live_oops_size = _tmp_live_oops_size;
duke@435 2399 }
duke@435 2400 }
duke@435 2401
duke@435 2402
duke@435 2403 void PSParallelCompact::print_new_location_of_heap_address(HeapWord* q) {
duke@435 2404 if (!RecordMarkSweepCompaction) {
duke@435 2405 tty->print_cr("Requires RecordMarkSweepCompaction to be enabled");
duke@435 2406 return;
duke@435 2407 }
duke@435 2408
duke@435 2409 if (_last_gc_live_oops == NULL) {
duke@435 2410 tty->print_cr("No compaction information gathered yet");
duke@435 2411 return;
duke@435 2412 }
duke@435 2413
duke@435 2414 for (int i = 0; i < _last_gc_live_oops->length(); i++) {
duke@435 2415 HeapWord* old_oop = _last_gc_live_oops->at(i);
duke@435 2416 size_t sz = _last_gc_live_oops_size->at(i);
duke@435 2417 if (old_oop <= q && q < (old_oop + sz)) {
duke@435 2418 HeapWord* new_oop = _last_gc_live_oops_moved_to->at(i);
duke@435 2419 size_t offset = (q - old_oop);
duke@435 2420 tty->print_cr("Address " PTR_FORMAT, q);
duke@435 2421 tty->print_cr(" Was in oop " PTR_FORMAT ", size %d, at offset %d", old_oop, sz, offset);
duke@435 2422 tty->print_cr(" Now in oop " PTR_FORMAT ", actual address " PTR_FORMAT, new_oop, new_oop + offset);
duke@435 2423 return;
duke@435 2424 }
duke@435 2425 }
duke@435 2426
duke@435 2427 tty->print_cr("Address " PTR_FORMAT " not found in live oop information from last GC", q);
duke@435 2428 }
duke@435 2429 #endif //VALIDATE_MARK_SWEEP
duke@435 2430
jcoomes@810 2431 // Update interior oops in the ranges of regions [beg_region, end_region).
duke@435 2432 void
duke@435 2433 PSParallelCompact::update_and_deadwood_in_dense_prefix(ParCompactionManager* cm,
duke@435 2434 SpaceId space_id,
jcoomes@810 2435 size_t beg_region,
jcoomes@810 2436 size_t end_region) {
duke@435 2437 ParallelCompactData& sd = summary_data();
duke@435 2438 ParMarkBitMap* const mbm = mark_bitmap();
duke@435 2439
jcoomes@810 2440 HeapWord* beg_addr = sd.region_to_addr(beg_region);
jcoomes@810 2441 HeapWord* const end_addr = sd.region_to_addr(end_region);
jcoomes@810 2442 assert(beg_region <= end_region, "bad region range");
duke@435 2443 assert(end_addr <= dense_prefix(space_id), "not in the dense prefix");
duke@435 2444
duke@435 2445 #ifdef ASSERT
jcoomes@810 2446 // Claim the regions to avoid triggering an assert when they are marked as
duke@435 2447 // filled.
jcoomes@810 2448 for (size_t claim_region = beg_region; claim_region < end_region; ++claim_region) {
jcoomes@810 2449 assert(sd.region(claim_region)->claim_unsafe(), "claim() failed");
duke@435 2450 }
duke@435 2451 #endif // #ifdef ASSERT
duke@435 2452
duke@435 2453 if (beg_addr != space(space_id)->bottom()) {
duke@435 2454 // Find the first live object or block of dead space that *starts* in this
jcoomes@810 2455 // range of regions. If a partial object crosses onto the region, skip it;
jcoomes@810 2456 // it will be marked for 'deferred update' when the object head is
jcoomes@810 2457 // processed. If dead space crosses onto the region, it is also skipped; it
jcoomes@810 2458 // will be filled when the prior region is processed. If neither of those
jcoomes@810 2459 // apply, the first word in the region is the start of a live object or dead
jcoomes@810 2460 // space.
duke@435 2461 assert(beg_addr > space(space_id)->bottom(), "sanity");
jcoomes@810 2462 const RegionData* const cp = sd.region(beg_region);
duke@435 2463 if (cp->partial_obj_size() != 0) {
jcoomes@810 2464 beg_addr = sd.partial_obj_end(beg_region);
duke@435 2465 } else if (dead_space_crosses_boundary(cp, mbm->addr_to_bit(beg_addr))) {
duke@435 2466 beg_addr = mbm->find_obj_beg(beg_addr, end_addr);
duke@435 2467 }
duke@435 2468 }
duke@435 2469
duke@435 2470 if (beg_addr < end_addr) {
jcoomes@810 2471 // A live object or block of dead space starts in this range of Regions.
duke@435 2472 HeapWord* const dense_prefix_end = dense_prefix(space_id);
duke@435 2473
duke@435 2474 // Create closures and iterate.
duke@435 2475 UpdateOnlyClosure update_closure(mbm, cm, space_id);
duke@435 2476 FillClosure fill_closure(cm, space_id);
duke@435 2477 ParMarkBitMap::IterationStatus status;
duke@435 2478 status = mbm->iterate(&update_closure, &fill_closure, beg_addr, end_addr,
duke@435 2479 dense_prefix_end);
duke@435 2480 if (status == ParMarkBitMap::incomplete) {
duke@435 2481 update_closure.do_addr(update_closure.source());
duke@435 2482 }
duke@435 2483 }
duke@435 2484
jcoomes@810 2485 // Mark the regions as filled.
jcoomes@810 2486 RegionData* const beg_cp = sd.region(beg_region);
jcoomes@810 2487 RegionData* const end_cp = sd.region(end_region);
jcoomes@810 2488 for (RegionData* cp = beg_cp; cp < end_cp; ++cp) {
duke@435 2489 cp->set_completed();
duke@435 2490 }
duke@435 2491 }
duke@435 2492
duke@435 2493 // Return the SpaceId for the space containing addr. If addr is not in the
duke@435 2494 // heap, last_space_id is returned. In debug mode it expects the address to be
duke@435 2495 // in the heap and asserts such.
duke@435 2496 PSParallelCompact::SpaceId PSParallelCompact::space_id(HeapWord* addr) {
duke@435 2497 assert(Universe::heap()->is_in_reserved(addr), "addr not in the heap");
duke@435 2498
duke@435 2499 for (unsigned int id = perm_space_id; id < last_space_id; ++id) {
duke@435 2500 if (_space_info[id].space()->contains(addr)) {
duke@435 2501 return SpaceId(id);
duke@435 2502 }
duke@435 2503 }
duke@435 2504
duke@435 2505 assert(false, "no space contains the addr");
duke@435 2506 return last_space_id;
duke@435 2507 }
duke@435 2508
duke@435 2509 void PSParallelCompact::update_deferred_objects(ParCompactionManager* cm,
duke@435 2510 SpaceId id) {
duke@435 2511 assert(id < last_space_id, "bad space id");
duke@435 2512
duke@435 2513 ParallelCompactData& sd = summary_data();
duke@435 2514 const SpaceInfo* const space_info = _space_info + id;
duke@435 2515 ObjectStartArray* const start_array = space_info->start_array();
duke@435 2516
duke@435 2517 const MutableSpace* const space = space_info->space();
duke@435 2518 assert(space_info->dense_prefix() >= space->bottom(), "dense_prefix not set");
duke@435 2519 HeapWord* const beg_addr = space_info->dense_prefix();
jcoomes@810 2520 HeapWord* const end_addr = sd.region_align_up(space_info->new_top());
jcoomes@810 2521
jcoomes@810 2522 const RegionData* const beg_region = sd.addr_to_region_ptr(beg_addr);
jcoomes@810 2523 const RegionData* const end_region = sd.addr_to_region_ptr(end_addr);
jcoomes@810 2524 const RegionData* cur_region;
jcoomes@810 2525 for (cur_region = beg_region; cur_region < end_region; ++cur_region) {
jcoomes@810 2526 HeapWord* const addr = cur_region->deferred_obj_addr();
duke@435 2527 if (addr != NULL) {
duke@435 2528 if (start_array != NULL) {
duke@435 2529 start_array->allocate_block(addr);
duke@435 2530 }
duke@435 2531 oop(addr)->update_contents(cm);
duke@435 2532 assert(oop(addr)->is_oop_or_null(), "should be an oop now");
duke@435 2533 }
duke@435 2534 }
duke@435 2535 }
duke@435 2536
duke@435 2537 // Skip over count live words starting from beg, and return the address of the
duke@435 2538 // next live word. Unless marked, the word corresponding to beg is assumed to
duke@435 2539 // be dead. Callers must either ensure beg does not correspond to the middle of
duke@435 2540 // an object, or account for those live words in some other way. Callers must
duke@435 2541 // also ensure that there are enough live words in the range [beg, end) to skip.
duke@435 2542 HeapWord*
duke@435 2543 PSParallelCompact::skip_live_words(HeapWord* beg, HeapWord* end, size_t count)
duke@435 2544 {
duke@435 2545 assert(count > 0, "sanity");
duke@435 2546
duke@435 2547 ParMarkBitMap* m = mark_bitmap();
duke@435 2548 idx_t bits_to_skip = m->words_to_bits(count);
duke@435 2549 idx_t cur_beg = m->addr_to_bit(beg);
duke@435 2550 const idx_t search_end = BitMap::word_align_up(m->addr_to_bit(end));
duke@435 2551
duke@435 2552 do {
duke@435 2553 cur_beg = m->find_obj_beg(cur_beg, search_end);
duke@435 2554 idx_t cur_end = m->find_obj_end(cur_beg, search_end);
duke@435 2555 const size_t obj_bits = cur_end - cur_beg + 1;
duke@435 2556 if (obj_bits > bits_to_skip) {
duke@435 2557 return m->bit_to_addr(cur_beg + bits_to_skip);
duke@435 2558 }
duke@435 2559 bits_to_skip -= obj_bits;
duke@435 2560 cur_beg = cur_end + 1;
duke@435 2561 } while (bits_to_skip > 0);
duke@435 2562
duke@435 2563 // Skipping the desired number of words landed just past the end of an object.
duke@435 2564 // Find the start of the next object.
duke@435 2565 cur_beg = m->find_obj_beg(cur_beg, search_end);
duke@435 2566 assert(cur_beg < m->addr_to_bit(end), "not enough live words to skip");
duke@435 2567 return m->bit_to_addr(cur_beg);
duke@435 2568 }
duke@435 2569
duke@435 2570 HeapWord*
duke@435 2571 PSParallelCompact::first_src_addr(HeapWord* const dest_addr,
jcoomes@810 2572 size_t src_region_idx)
duke@435 2573 {
duke@435 2574 ParMarkBitMap* const bitmap = mark_bitmap();
duke@435 2575 const ParallelCompactData& sd = summary_data();
jcoomes@810 2576 const size_t RegionSize = ParallelCompactData::RegionSize;
jcoomes@810 2577
jcoomes@810 2578 assert(sd.is_region_aligned(dest_addr), "not aligned");
jcoomes@810 2579
jcoomes@810 2580 const RegionData* const src_region_ptr = sd.region(src_region_idx);
jcoomes@810 2581 const size_t partial_obj_size = src_region_ptr->partial_obj_size();
jcoomes@810 2582 HeapWord* const src_region_destination = src_region_ptr->destination();
jcoomes@810 2583
jcoomes@810 2584 assert(dest_addr >= src_region_destination, "wrong src region");
jcoomes@810 2585 assert(src_region_ptr->data_size() > 0, "src region cannot be empty");
jcoomes@810 2586
jcoomes@810 2587 HeapWord* const src_region_beg = sd.region_to_addr(src_region_idx);
jcoomes@810 2588 HeapWord* const src_region_end = src_region_beg + RegionSize;
jcoomes@810 2589
jcoomes@810 2590 HeapWord* addr = src_region_beg;
jcoomes@810 2591 if (dest_addr == src_region_destination) {
jcoomes@810 2592 // Return the first live word in the source region.
duke@435 2593 if (partial_obj_size == 0) {
jcoomes@810 2594 addr = bitmap->find_obj_beg(addr, src_region_end);
jcoomes@810 2595 assert(addr < src_region_end, "no objects start in src region");
duke@435 2596 }
duke@435 2597 return addr;
duke@435 2598 }
duke@435 2599
duke@435 2600 // Must skip some live data.
jcoomes@810 2601 size_t words_to_skip = dest_addr - src_region_destination;
jcoomes@810 2602 assert(src_region_ptr->data_size() > words_to_skip, "wrong src region");
duke@435 2603
duke@435 2604 if (partial_obj_size >= words_to_skip) {
duke@435 2605 // All the live words to skip are part of the partial object.
duke@435 2606 addr += words_to_skip;
duke@435 2607 if (partial_obj_size == words_to_skip) {
duke@435 2608 // Find the first live word past the partial object.
jcoomes@810 2609 addr = bitmap->find_obj_beg(addr, src_region_end);
jcoomes@810 2610 assert(addr < src_region_end, "wrong src region");
duke@435 2611 }
duke@435 2612 return addr;
duke@435 2613 }
duke@435 2614
duke@435 2615 // Skip over the partial object (if any).
duke@435 2616 if (partial_obj_size != 0) {
duke@435 2617 words_to_skip -= partial_obj_size;
duke@435 2618 addr += partial_obj_size;
duke@435 2619 }
duke@435 2620
jcoomes@810 2621 // Skip over live words due to objects that start in the region.
jcoomes@810 2622 addr = skip_live_words(addr, src_region_end, words_to_skip);
jcoomes@810 2623 assert(addr < src_region_end, "wrong src region");
duke@435 2624 return addr;
duke@435 2625 }
duke@435 2626
duke@435 2627 void PSParallelCompact::decrement_destination_counts(ParCompactionManager* cm,
jcoomes@810 2628 size_t beg_region,
duke@435 2629 HeapWord* end_addr)
duke@435 2630 {
duke@435 2631 ParallelCompactData& sd = summary_data();
jcoomes@810 2632 RegionData* const beg = sd.region(beg_region);
jcoomes@810 2633 HeapWord* const end_addr_aligned_up = sd.region_align_up(end_addr);
jcoomes@810 2634 RegionData* const end = sd.addr_to_region_ptr(end_addr_aligned_up);
jcoomes@810 2635 size_t cur_idx = beg_region;
jcoomes@810 2636 for (RegionData* cur = beg; cur < end; ++cur, ++cur_idx) {
jcoomes@810 2637 assert(cur->data_size() > 0, "region must have live data");
duke@435 2638 cur->decrement_destination_count();
jcoomes@810 2639 if (cur_idx <= cur->source_region() && cur->available() && cur->claim()) {
duke@435 2640 cm->save_for_processing(cur_idx);
duke@435 2641 }
duke@435 2642 }
duke@435 2643 }
duke@435 2644
jcoomes@810 2645 size_t PSParallelCompact::next_src_region(MoveAndUpdateClosure& closure,
jcoomes@810 2646 SpaceId& src_space_id,
jcoomes@810 2647 HeapWord*& src_space_top,
jcoomes@810 2648 HeapWord* end_addr)
duke@435 2649 {
jcoomes@810 2650 typedef ParallelCompactData::RegionData RegionData;
duke@435 2651
duke@435 2652 ParallelCompactData& sd = PSParallelCompact::summary_data();
jcoomes@810 2653 const size_t region_size = ParallelCompactData::RegionSize;
jcoomes@810 2654
jcoomes@810 2655 size_t src_region_idx = 0;
jcoomes@810 2656
jcoomes@810 2657 // Skip empty regions (if any) up to the top of the space.
jcoomes@810 2658 HeapWord* const src_aligned_up = sd.region_align_up(end_addr);
jcoomes@810 2659 RegionData* src_region_ptr = sd.addr_to_region_ptr(src_aligned_up);
jcoomes@810 2660 HeapWord* const top_aligned_up = sd.region_align_up(src_space_top);
jcoomes@810 2661 const RegionData* const top_region_ptr =
jcoomes@810 2662 sd.addr_to_region_ptr(top_aligned_up);
jcoomes@810 2663 while (src_region_ptr < top_region_ptr && src_region_ptr->data_size() == 0) {
jcoomes@810 2664 ++src_region_ptr;
duke@435 2665 }
duke@435 2666
jcoomes@810 2667 if (src_region_ptr < top_region_ptr) {
jcoomes@810 2668 // The next source region is in the current space. Update src_region_idx
jcoomes@810 2669 // and the source address to match src_region_ptr.
jcoomes@810 2670 src_region_idx = sd.region(src_region_ptr);
jcoomes@810 2671 HeapWord* const src_region_addr = sd.region_to_addr(src_region_idx);
jcoomes@810 2672 if (src_region_addr > closure.source()) {
jcoomes@810 2673 closure.set_source(src_region_addr);
duke@435 2674 }
jcoomes@810 2675 return src_region_idx;
duke@435 2676 }
duke@435 2677
jcoomes@810 2678 // Switch to a new source space and find the first non-empty region.
duke@435 2679 unsigned int space_id = src_space_id + 1;
duke@435 2680 assert(space_id < last_space_id, "not enough spaces");
duke@435 2681
duke@435 2682 HeapWord* const destination = closure.destination();
duke@435 2683
duke@435 2684 do {
duke@435 2685 MutableSpace* space = _space_info[space_id].space();
duke@435 2686 HeapWord* const bottom = space->bottom();
jcoomes@810 2687 const RegionData* const bottom_cp = sd.addr_to_region_ptr(bottom);
duke@435 2688
duke@435 2689 // Iterate over the spaces that do not compact into themselves.
duke@435 2690 if (bottom_cp->destination() != bottom) {
jcoomes@810 2691 HeapWord* const top_aligned_up = sd.region_align_up(space->top());
jcoomes@810 2692 const RegionData* const top_cp = sd.addr_to_region_ptr(top_aligned_up);
jcoomes@810 2693
jcoomes@810 2694 for (const RegionData* src_cp = bottom_cp; src_cp < top_cp; ++src_cp) {
duke@435 2695 if (src_cp->live_obj_size() > 0) {
duke@435 2696 // Found it.
duke@435 2697 assert(src_cp->destination() == destination,
duke@435 2698 "first live obj in the space must match the destination");
duke@435 2699 assert(src_cp->partial_obj_size() == 0,
duke@435 2700 "a space cannot begin with a partial obj");
duke@435 2701
duke@435 2702 src_space_id = SpaceId(space_id);
duke@435 2703 src_space_top = space->top();
jcoomes@810 2704 const size_t src_region_idx = sd.region(src_cp);
jcoomes@810 2705 closure.set_source(sd.region_to_addr(src_region_idx));
jcoomes@810 2706 return src_region_idx;
duke@435 2707 } else {
duke@435 2708 assert(src_cp->data_size() == 0, "sanity");
duke@435 2709 }
duke@435 2710 }
duke@435 2711 }
duke@435 2712 } while (++space_id < last_space_id);
duke@435 2713
jcoomes@810 2714 assert(false, "no source region was found");
duke@435 2715 return 0;
duke@435 2716 }
duke@435 2717
jcoomes@810 2718 void PSParallelCompact::fill_region(ParCompactionManager* cm, size_t region_idx)
duke@435 2719 {
duke@435 2720 typedef ParMarkBitMap::IterationStatus IterationStatus;
jcoomes@810 2721 const size_t RegionSize = ParallelCompactData::RegionSize;
duke@435 2722 ParMarkBitMap* const bitmap = mark_bitmap();
duke@435 2723 ParallelCompactData& sd = summary_data();
jcoomes@810 2724 RegionData* const region_ptr = sd.region(region_idx);
duke@435 2725
duke@435 2726 // Get the items needed to construct the closure.
jcoomes@810 2727 HeapWord* dest_addr = sd.region_to_addr(region_idx);
duke@435 2728 SpaceId dest_space_id = space_id(dest_addr);
duke@435 2729 ObjectStartArray* start_array = _space_info[dest_space_id].start_array();
duke@435 2730 HeapWord* new_top = _space_info[dest_space_id].new_top();
duke@435 2731 assert(dest_addr < new_top, "sanity");
jcoomes@810 2732 const size_t words = MIN2(pointer_delta(new_top, dest_addr), RegionSize);
jcoomes@810 2733
jcoomes@810 2734 // Get the source region and related info.
jcoomes@810 2735 size_t src_region_idx = region_ptr->source_region();
jcoomes@810 2736 SpaceId src_space_id = space_id(sd.region_to_addr(src_region_idx));
duke@435 2737 HeapWord* src_space_top = _space_info[src_space_id].space()->top();
duke@435 2738
duke@435 2739 MoveAndUpdateClosure closure(bitmap, cm, start_array, dest_addr, words);
jcoomes@810 2740 closure.set_source(first_src_addr(dest_addr, src_region_idx));
jcoomes@810 2741
jcoomes@810 2742 // Adjust src_region_idx to prepare for decrementing destination counts (the
jcoomes@810 2743 // destination count is not decremented when a region is copied to itself).
jcoomes@810 2744 if (src_region_idx == region_idx) {
jcoomes@810 2745 src_region_idx += 1;
duke@435 2746 }
duke@435 2747
duke@435 2748 if (bitmap->is_unmarked(closure.source())) {
duke@435 2749 // The first source word is in the middle of an object; copy the remainder
duke@435 2750 // of the object or as much as will fit. The fact that pointer updates were
duke@435 2751 // deferred will be noted when the object header is processed.
duke@435 2752 HeapWord* const old_src_addr = closure.source();
duke@435 2753 closure.copy_partial_obj();
duke@435 2754 if (closure.is_full()) {
jcoomes@810 2755 decrement_destination_counts(cm, src_region_idx, closure.source());
jcoomes@810 2756 region_ptr->set_deferred_obj_addr(NULL);
jcoomes@810 2757 region_ptr->set_completed();
duke@435 2758 return;
duke@435 2759 }
duke@435 2760
jcoomes@810 2761 HeapWord* const end_addr = sd.region_align_down(closure.source());
jcoomes@810 2762 if (sd.region_align_down(old_src_addr) != end_addr) {
jcoomes@810 2763 // The partial object was copied from more than one source region.
jcoomes@810 2764 decrement_destination_counts(cm, src_region_idx, end_addr);
jcoomes@810 2765
jcoomes@810 2766 // Move to the next source region, possibly switching spaces as well. All
duke@435 2767 // args except end_addr may be modified.
jcoomes@810 2768 src_region_idx = next_src_region(closure, src_space_id, src_space_top,
jcoomes@810 2769 end_addr);
duke@435 2770 }
duke@435 2771 }
duke@435 2772
duke@435 2773 do {
duke@435 2774 HeapWord* const cur_addr = closure.source();
jcoomes@810 2775 HeapWord* const end_addr = MIN2(sd.region_align_up(cur_addr + 1),
duke@435 2776 src_space_top);
duke@435 2777 IterationStatus status = bitmap->iterate(&closure, cur_addr, end_addr);
duke@435 2778
duke@435 2779 if (status == ParMarkBitMap::incomplete) {
jcoomes@810 2780 // The last obj that starts in the source region does not end in the
jcoomes@810 2781 // region.
duke@435 2782 assert(closure.source() < end_addr, "sanity")
duke@435 2783 HeapWord* const obj_beg = closure.source();
duke@435 2784 HeapWord* const range_end = MIN2(obj_beg + closure.words_remaining(),
duke@435 2785 src_space_top);
duke@435 2786 HeapWord* const obj_end = bitmap->find_obj_end(obj_beg, range_end);
duke@435 2787 if (obj_end < range_end) {
duke@435 2788 // The end was found; the entire object will fit.
duke@435 2789 status = closure.do_addr(obj_beg, bitmap->obj_size(obj_beg, obj_end));
duke@435 2790 assert(status != ParMarkBitMap::would_overflow, "sanity");
duke@435 2791 } else {
duke@435 2792 // The end was not found; the object will not fit.
duke@435 2793 assert(range_end < src_space_top, "obj cannot cross space boundary");
duke@435 2794 status = ParMarkBitMap::would_overflow;
duke@435 2795 }
duke@435 2796 }
duke@435 2797
duke@435 2798 if (status == ParMarkBitMap::would_overflow) {
duke@435 2799 // The last object did not fit. Note that interior oop updates were
jcoomes@810 2800 // deferred, then copy enough of the object to fill the region.
jcoomes@810 2801 region_ptr->set_deferred_obj_addr(closure.destination());
duke@435 2802 status = closure.copy_until_full(); // copies from closure.source()
duke@435 2803
jcoomes@810 2804 decrement_destination_counts(cm, src_region_idx, closure.source());
jcoomes@810 2805 region_ptr->set_completed();
duke@435 2806 return;
duke@435 2807 }
duke@435 2808
duke@435 2809 if (status == ParMarkBitMap::full) {
jcoomes@810 2810 decrement_destination_counts(cm, src_region_idx, closure.source());
jcoomes@810 2811 region_ptr->set_deferred_obj_addr(NULL);
jcoomes@810 2812 region_ptr->set_completed();
duke@435 2813 return;
duke@435 2814 }
duke@435 2815
jcoomes@810 2816 decrement_destination_counts(cm, src_region_idx, end_addr);
jcoomes@810 2817
jcoomes@810 2818 // Move to the next source region, possibly switching spaces as well. All
duke@435 2819 // args except end_addr may be modified.
jcoomes@810 2820 src_region_idx = next_src_region(closure, src_space_id, src_space_top,
jcoomes@810 2821 end_addr);
duke@435 2822 } while (true);
duke@435 2823 }
duke@435 2824
duke@435 2825 void
duke@435 2826 PSParallelCompact::move_and_update(ParCompactionManager* cm, SpaceId space_id) {
duke@435 2827 const MutableSpace* sp = space(space_id);
duke@435 2828 if (sp->is_empty()) {
duke@435 2829 return;
duke@435 2830 }
duke@435 2831
duke@435 2832 ParallelCompactData& sd = PSParallelCompact::summary_data();
duke@435 2833 ParMarkBitMap* const bitmap = mark_bitmap();
duke@435 2834 HeapWord* const dp_addr = dense_prefix(space_id);
duke@435 2835 HeapWord* beg_addr = sp->bottom();
duke@435 2836 HeapWord* end_addr = sp->top();
duke@435 2837
duke@435 2838 #ifdef ASSERT
duke@435 2839 assert(beg_addr <= dp_addr && dp_addr <= end_addr, "bad dense prefix");
duke@435 2840 if (cm->should_verify_only()) {
duke@435 2841 VerifyUpdateClosure verify_update(cm, sp);
duke@435 2842 bitmap->iterate(&verify_update, beg_addr, end_addr);
duke@435 2843 return;
duke@435 2844 }
duke@435 2845
duke@435 2846 if (cm->should_reset_only()) {
duke@435 2847 ResetObjectsClosure reset_objects(cm);
duke@435 2848 bitmap->iterate(&reset_objects, beg_addr, end_addr);
duke@435 2849 return;
duke@435 2850 }
duke@435 2851 #endif
duke@435 2852
jcoomes@810 2853 const size_t beg_region = sd.addr_to_region_idx(beg_addr);
jcoomes@810 2854 const size_t dp_region = sd.addr_to_region_idx(dp_addr);
jcoomes@810 2855 if (beg_region < dp_region) {
jcoomes@810 2856 update_and_deadwood_in_dense_prefix(cm, space_id, beg_region, dp_region);
duke@435 2857 }
duke@435 2858
jcoomes@810 2859 // The destination of the first live object that starts in the region is one
jcoomes@810 2860 // past the end of the partial object entering the region (if any).
jcoomes@810 2861 HeapWord* const dest_addr = sd.partial_obj_end(dp_region);
duke@435 2862 HeapWord* const new_top = _space_info[space_id].new_top();
duke@435 2863 assert(new_top >= dest_addr, "bad new_top value");
duke@435 2864 const size_t words = pointer_delta(new_top, dest_addr);
duke@435 2865
duke@435 2866 if (words > 0) {
duke@435 2867 ObjectStartArray* start_array = _space_info[space_id].start_array();
duke@435 2868 MoveAndUpdateClosure closure(bitmap, cm, start_array, dest_addr, words);
duke@435 2869
duke@435 2870 ParMarkBitMap::IterationStatus status;
duke@435 2871 status = bitmap->iterate(&closure, dest_addr, end_addr);
duke@435 2872 assert(status == ParMarkBitMap::full, "iteration not complete");
duke@435 2873 assert(bitmap->find_obj_beg(closure.source(), end_addr) == end_addr,
duke@435 2874 "live objects skipped because closure is full");
duke@435 2875 }
duke@435 2876 }
duke@435 2877
duke@435 2878 jlong PSParallelCompact::millis_since_last_gc() {
duke@435 2879 jlong ret_val = os::javaTimeMillis() - _time_of_last_gc;
duke@435 2880 // XXX See note in genCollectedHeap::millis_since_last_gc().
duke@435 2881 if (ret_val < 0) {
duke@435 2882 NOT_PRODUCT(warning("time warp: %d", ret_val);)
duke@435 2883 return 0;
duke@435 2884 }
duke@435 2885 return ret_val;
duke@435 2886 }
duke@435 2887
duke@435 2888 void PSParallelCompact::reset_millis_since_last_gc() {
duke@435 2889 _time_of_last_gc = os::javaTimeMillis();
duke@435 2890 }
duke@435 2891
duke@435 2892 ParMarkBitMap::IterationStatus MoveAndUpdateClosure::copy_until_full()
duke@435 2893 {
duke@435 2894 if (source() != destination()) {
duke@435 2895 assert(source() > destination(), "must copy to the left");
duke@435 2896 Copy::aligned_conjoint_words(source(), destination(), words_remaining());
duke@435 2897 }
duke@435 2898 update_state(words_remaining());
duke@435 2899 assert(is_full(), "sanity");
duke@435 2900 return ParMarkBitMap::full;
duke@435 2901 }
duke@435 2902
duke@435 2903 void MoveAndUpdateClosure::copy_partial_obj()
duke@435 2904 {
duke@435 2905 size_t words = words_remaining();
duke@435 2906
duke@435 2907 HeapWord* const range_end = MIN2(source() + words, bitmap()->region_end());
duke@435 2908 HeapWord* const end_addr = bitmap()->find_obj_end(source(), range_end);
duke@435 2909 if (end_addr < range_end) {
duke@435 2910 words = bitmap()->obj_size(source(), end_addr);
duke@435 2911 }
duke@435 2912
duke@435 2913 // This test is necessary; if omitted, the pointer updates to a partial object
duke@435 2914 // that crosses the dense prefix boundary could be overwritten.
duke@435 2915 if (source() != destination()) {
duke@435 2916 assert(source() > destination(), "must copy to the left");
duke@435 2917 Copy::aligned_conjoint_words(source(), destination(), words);
duke@435 2918 }
duke@435 2919 update_state(words);
duke@435 2920 }
duke@435 2921
duke@435 2922 ParMarkBitMapClosure::IterationStatus
duke@435 2923 MoveAndUpdateClosure::do_addr(HeapWord* addr, size_t words) {
duke@435 2924 assert(destination() != NULL, "sanity");
duke@435 2925 assert(bitmap()->obj_size(addr) == words, "bad size");
duke@435 2926
duke@435 2927 _source = addr;
duke@435 2928 assert(PSParallelCompact::summary_data().calc_new_pointer(source()) ==
duke@435 2929 destination(), "wrong destination");
duke@435 2930
duke@435 2931 if (words > words_remaining()) {
duke@435 2932 return ParMarkBitMap::would_overflow;
duke@435 2933 }
duke@435 2934
duke@435 2935 // The start_array must be updated even if the object is not moving.
duke@435 2936 if (_start_array != NULL) {
duke@435 2937 _start_array->allocate_block(destination());
duke@435 2938 }
duke@435 2939
duke@435 2940 if (destination() != source()) {
duke@435 2941 assert(destination() < source(), "must copy to the left");
duke@435 2942 Copy::aligned_conjoint_words(source(), destination(), words);
duke@435 2943 }
duke@435 2944
duke@435 2945 oop moved_oop = (oop) destination();
duke@435 2946 moved_oop->update_contents(compaction_manager());
duke@435 2947 assert(moved_oop->is_oop_or_null(), "Object should be whole at this point");
duke@435 2948
duke@435 2949 update_state(words);
duke@435 2950 assert(destination() == (HeapWord*)moved_oop + moved_oop->size(), "sanity");
duke@435 2951 return is_full() ? ParMarkBitMap::full : ParMarkBitMap::incomplete;
duke@435 2952 }
duke@435 2953
duke@435 2954 UpdateOnlyClosure::UpdateOnlyClosure(ParMarkBitMap* mbm,
duke@435 2955 ParCompactionManager* cm,
duke@435 2956 PSParallelCompact::SpaceId space_id) :
duke@435 2957 ParMarkBitMapClosure(mbm, cm),
duke@435 2958 _space_id(space_id),
duke@435 2959 _start_array(PSParallelCompact::start_array(space_id))
duke@435 2960 {
duke@435 2961 }
duke@435 2962
duke@435 2963 // Updates the references in the object to their new values.
duke@435 2964 ParMarkBitMapClosure::IterationStatus
duke@435 2965 UpdateOnlyClosure::do_addr(HeapWord* addr, size_t words) {
duke@435 2966 do_addr(addr);
duke@435 2967 return ParMarkBitMap::incomplete;
duke@435 2968 }
duke@435 2969
duke@435 2970 // Verify the new location using the forwarding pointer
duke@435 2971 // from MarkSweep::mark_sweep_phase2(). Set the mark_word
duke@435 2972 // to the initial value.
duke@435 2973 ParMarkBitMapClosure::IterationStatus
duke@435 2974 PSParallelCompact::VerifyUpdateClosure::do_addr(HeapWord* addr, size_t words) {
duke@435 2975 // The second arg (words) is not used.
duke@435 2976 oop obj = (oop) addr;
duke@435 2977 HeapWord* forwarding_ptr = (HeapWord*) obj->mark()->decode_pointer();
duke@435 2978 HeapWord* new_pointer = summary_data().calc_new_pointer(obj);
duke@435 2979 if (forwarding_ptr == NULL) {
duke@435 2980 // The object is dead or not moving.
duke@435 2981 assert(bitmap()->is_unmarked(obj) || (new_pointer == (HeapWord*) obj),
duke@435 2982 "Object liveness is wrong.");
duke@435 2983 return ParMarkBitMap::incomplete;
duke@435 2984 }
duke@435 2985 assert(UseParallelOldGCDensePrefix ||
duke@435 2986 (HeapMaximumCompactionInterval > 1) ||
duke@435 2987 (MarkSweepAlwaysCompactCount > 1) ||
duke@435 2988 (forwarding_ptr == new_pointer),
duke@435 2989 "Calculation of new location is incorrect");
duke@435 2990 return ParMarkBitMap::incomplete;
duke@435 2991 }
duke@435 2992
duke@435 2993 // Reset objects modified for debug checking.
duke@435 2994 ParMarkBitMapClosure::IterationStatus
duke@435 2995 PSParallelCompact::ResetObjectsClosure::do_addr(HeapWord* addr, size_t words) {
duke@435 2996 // The second arg (words) is not used.
duke@435 2997 oop obj = (oop) addr;
duke@435 2998 obj->init_mark();
duke@435 2999 return ParMarkBitMap::incomplete;
duke@435 3000 }
duke@435 3001
duke@435 3002 // Prepare for compaction. This method is executed once
duke@435 3003 // (i.e., by a single thread) before compaction.
duke@435 3004 // Save the updated location of the intArrayKlassObj for
duke@435 3005 // filling holes in the dense prefix.
duke@435 3006 void PSParallelCompact::compact_prologue() {
duke@435 3007 _updated_int_array_klass_obj = (klassOop)
duke@435 3008 summary_data().calc_new_pointer(Universe::intArrayKlassObj());
duke@435 3009 }
duke@435 3010
duke@435 3011 // The initial implementation of this method created a field
duke@435 3012 // _next_compaction_space_id in SpaceInfo and initialized
duke@435 3013 // that field in SpaceInfo::initialize_space_info(). That
duke@435 3014 // required that _next_compaction_space_id be declared a
duke@435 3015 // SpaceId in SpaceInfo and that would have required that
duke@435 3016 // either SpaceId be declared in a separate class or that
duke@435 3017 // it be declared in SpaceInfo. It didn't seem consistent
duke@435 3018 // to declare it in SpaceInfo (didn't really fit logically).
duke@435 3019 // Alternatively, defining a separate class to define SpaceId
duke@435 3020 // seem excessive. This implementation is simple and localizes
duke@435 3021 // the knowledge.
duke@435 3022
duke@435 3023 PSParallelCompact::SpaceId
duke@435 3024 PSParallelCompact::next_compaction_space_id(SpaceId id) {
duke@435 3025 assert(id < last_space_id, "id out of range");
duke@435 3026 switch (id) {
duke@435 3027 case perm_space_id :
duke@435 3028 return last_space_id;
duke@435 3029 case old_space_id :
duke@435 3030 return eden_space_id;
duke@435 3031 case eden_space_id :
duke@435 3032 return from_space_id;
duke@435 3033 case from_space_id :
duke@435 3034 return to_space_id;
duke@435 3035 case to_space_id :
duke@435 3036 return last_space_id;
duke@435 3037 default:
duke@435 3038 assert(false, "Bad space id");
duke@435 3039 return last_space_id;
duke@435 3040 }
duke@435 3041 }

mercurial