src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp

Thu, 11 Dec 2008 12:05:08 -0800

author
jcoomes
date
Thu, 11 Dec 2008 12:05:08 -0800
changeset 916
7d7a7c599c17
parent 892
27a80744a83b
child 917
7c2386d67889
permissions
-rw-r--r--

6578152: fill_region_with_object has usability and safety issues
Reviewed-by: apetrusenko, ysr

duke@435 1 /*
xdono@631 2 * Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
duke@435 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
duke@435 20 * CA 95054 USA or visit www.sun.com if you need additional information or
duke@435 21 * have any questions.
duke@435 22 *
duke@435 23 */
duke@435 24
duke@435 25 #include "incls/_precompiled.incl"
duke@435 26 #include "incls/_psParallelCompact.cpp.incl"
duke@435 27
duke@435 28 #include <math.h>
duke@435 29
duke@435 30 // All sizes are in HeapWords.
jcoomes@810 31 const size_t ParallelCompactData::Log2RegionSize = 9; // 512 words
jcoomes@810 32 const size_t ParallelCompactData::RegionSize = (size_t)1 << Log2RegionSize;
jcoomes@810 33 const size_t ParallelCompactData::RegionSizeBytes =
jcoomes@810 34 RegionSize << LogHeapWordSize;
jcoomes@810 35 const size_t ParallelCompactData::RegionSizeOffsetMask = RegionSize - 1;
jcoomes@810 36 const size_t ParallelCompactData::RegionAddrOffsetMask = RegionSizeBytes - 1;
jcoomes@810 37 const size_t ParallelCompactData::RegionAddrMask = ~RegionAddrOffsetMask;
duke@435 38
jcoomes@810 39 const ParallelCompactData::RegionData::region_sz_t
jcoomes@810 40 ParallelCompactData::RegionData::dc_shift = 27;
jcoomes@810 41
jcoomes@810 42 const ParallelCompactData::RegionData::region_sz_t
jcoomes@810 43 ParallelCompactData::RegionData::dc_mask = ~0U << dc_shift;
jcoomes@810 44
jcoomes@810 45 const ParallelCompactData::RegionData::region_sz_t
jcoomes@810 46 ParallelCompactData::RegionData::dc_one = 0x1U << dc_shift;
jcoomes@810 47
jcoomes@810 48 const ParallelCompactData::RegionData::region_sz_t
jcoomes@810 49 ParallelCompactData::RegionData::los_mask = ~dc_mask;
jcoomes@810 50
jcoomes@810 51 const ParallelCompactData::RegionData::region_sz_t
jcoomes@810 52 ParallelCompactData::RegionData::dc_claimed = 0x8U << dc_shift;
jcoomes@810 53
jcoomes@810 54 const ParallelCompactData::RegionData::region_sz_t
jcoomes@810 55 ParallelCompactData::RegionData::dc_completed = 0xcU << dc_shift;
duke@435 56
duke@435 57 SpaceInfo PSParallelCompact::_space_info[PSParallelCompact::last_space_id];
duke@435 58 bool PSParallelCompact::_print_phases = false;
duke@435 59
duke@435 60 ReferenceProcessor* PSParallelCompact::_ref_processor = NULL;
duke@435 61 klassOop PSParallelCompact::_updated_int_array_klass_obj = NULL;
duke@435 62
duke@435 63 double PSParallelCompact::_dwl_mean;
duke@435 64 double PSParallelCompact::_dwl_std_dev;
duke@435 65 double PSParallelCompact::_dwl_first_term;
duke@435 66 double PSParallelCompact::_dwl_adjustment;
duke@435 67 #ifdef ASSERT
duke@435 68 bool PSParallelCompact::_dwl_initialized = false;
duke@435 69 #endif // #ifdef ASSERT
duke@435 70
duke@435 71 #ifdef VALIDATE_MARK_SWEEP
coleenp@548 72 GrowableArray<void*>* PSParallelCompact::_root_refs_stack = NULL;
duke@435 73 GrowableArray<oop> * PSParallelCompact::_live_oops = NULL;
duke@435 74 GrowableArray<oop> * PSParallelCompact::_live_oops_moved_to = NULL;
duke@435 75 GrowableArray<size_t>* PSParallelCompact::_live_oops_size = NULL;
duke@435 76 size_t PSParallelCompact::_live_oops_index = 0;
duke@435 77 size_t PSParallelCompact::_live_oops_index_at_perm = 0;
coleenp@548 78 GrowableArray<void*>* PSParallelCompact::_other_refs_stack = NULL;
coleenp@548 79 GrowableArray<void*>* PSParallelCompact::_adjusted_pointers = NULL;
duke@435 80 bool PSParallelCompact::_pointer_tracking = false;
duke@435 81 bool PSParallelCompact::_root_tracking = true;
duke@435 82
duke@435 83 GrowableArray<HeapWord*>* PSParallelCompact::_cur_gc_live_oops = NULL;
duke@435 84 GrowableArray<HeapWord*>* PSParallelCompact::_cur_gc_live_oops_moved_to = NULL;
duke@435 85 GrowableArray<size_t> * PSParallelCompact::_cur_gc_live_oops_size = NULL;
duke@435 86 GrowableArray<HeapWord*>* PSParallelCompact::_last_gc_live_oops = NULL;
duke@435 87 GrowableArray<HeapWord*>* PSParallelCompact::_last_gc_live_oops_moved_to = NULL;
duke@435 88 GrowableArray<size_t> * PSParallelCompact::_last_gc_live_oops_size = NULL;
duke@435 89 #endif
duke@435 90
duke@435 91 #ifndef PRODUCT
duke@435 92 const char* PSParallelCompact::space_names[] = {
duke@435 93 "perm", "old ", "eden", "from", "to "
duke@435 94 };
duke@435 95
jcoomes@810 96 void PSParallelCompact::print_region_ranges()
duke@435 97 {
duke@435 98 tty->print_cr("space bottom top end new_top");
duke@435 99 tty->print_cr("------ ---------- ---------- ---------- ----------");
duke@435 100
duke@435 101 for (unsigned int id = 0; id < last_space_id; ++id) {
duke@435 102 const MutableSpace* space = _space_info[id].space();
duke@435 103 tty->print_cr("%u %s "
jcoomes@699 104 SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10) " "
jcoomes@699 105 SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10) " ",
duke@435 106 id, space_names[id],
jcoomes@810 107 summary_data().addr_to_region_idx(space->bottom()),
jcoomes@810 108 summary_data().addr_to_region_idx(space->top()),
jcoomes@810 109 summary_data().addr_to_region_idx(space->end()),
jcoomes@810 110 summary_data().addr_to_region_idx(_space_info[id].new_top()));
duke@435 111 }
duke@435 112 }
duke@435 113
duke@435 114 void
jcoomes@810 115 print_generic_summary_region(size_t i, const ParallelCompactData::RegionData* c)
duke@435 116 {
jcoomes@810 117 #define REGION_IDX_FORMAT SIZE_FORMAT_W(7)
jcoomes@810 118 #define REGION_DATA_FORMAT SIZE_FORMAT_W(5)
duke@435 119
duke@435 120 ParallelCompactData& sd = PSParallelCompact::summary_data();
jcoomes@810 121 size_t dci = c->destination() ? sd.addr_to_region_idx(c->destination()) : 0;
jcoomes@810 122 tty->print_cr(REGION_IDX_FORMAT " " PTR_FORMAT " "
jcoomes@810 123 REGION_IDX_FORMAT " " PTR_FORMAT " "
jcoomes@810 124 REGION_DATA_FORMAT " " REGION_DATA_FORMAT " "
jcoomes@810 125 REGION_DATA_FORMAT " " REGION_IDX_FORMAT " %d",
duke@435 126 i, c->data_location(), dci, c->destination(),
duke@435 127 c->partial_obj_size(), c->live_obj_size(),
jcoomes@810 128 c->data_size(), c->source_region(), c->destination_count());
jcoomes@810 129
jcoomes@810 130 #undef REGION_IDX_FORMAT
jcoomes@810 131 #undef REGION_DATA_FORMAT
duke@435 132 }
duke@435 133
duke@435 134 void
duke@435 135 print_generic_summary_data(ParallelCompactData& summary_data,
duke@435 136 HeapWord* const beg_addr,
duke@435 137 HeapWord* const end_addr)
duke@435 138 {
duke@435 139 size_t total_words = 0;
jcoomes@810 140 size_t i = summary_data.addr_to_region_idx(beg_addr);
jcoomes@810 141 const size_t last = summary_data.addr_to_region_idx(end_addr);
duke@435 142 HeapWord* pdest = 0;
duke@435 143
duke@435 144 while (i <= last) {
jcoomes@810 145 ParallelCompactData::RegionData* c = summary_data.region(i);
duke@435 146 if (c->data_size() != 0 || c->destination() != pdest) {
jcoomes@810 147 print_generic_summary_region(i, c);
duke@435 148 total_words += c->data_size();
duke@435 149 pdest = c->destination();
duke@435 150 }
duke@435 151 ++i;
duke@435 152 }
duke@435 153
duke@435 154 tty->print_cr("summary_data_bytes=" SIZE_FORMAT, total_words * HeapWordSize);
duke@435 155 }
duke@435 156
duke@435 157 void
duke@435 158 print_generic_summary_data(ParallelCompactData& summary_data,
duke@435 159 SpaceInfo* space_info)
duke@435 160 {
duke@435 161 for (unsigned int id = 0; id < PSParallelCompact::last_space_id; ++id) {
duke@435 162 const MutableSpace* space = space_info[id].space();
duke@435 163 print_generic_summary_data(summary_data, space->bottom(),
duke@435 164 MAX2(space->top(), space_info[id].new_top()));
duke@435 165 }
duke@435 166 }
duke@435 167
duke@435 168 void
jcoomes@810 169 print_initial_summary_region(size_t i,
jcoomes@810 170 const ParallelCompactData::RegionData* c,
jcoomes@810 171 bool newline = true)
duke@435 172 {
jcoomes@699 173 tty->print(SIZE_FORMAT_W(5) " " PTR_FORMAT " "
jcoomes@699 174 SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " "
jcoomes@699 175 SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " %d",
duke@435 176 i, c->destination(),
duke@435 177 c->partial_obj_size(), c->live_obj_size(),
jcoomes@810 178 c->data_size(), c->source_region(), c->destination_count());
duke@435 179 if (newline) tty->cr();
duke@435 180 }
duke@435 181
duke@435 182 void
duke@435 183 print_initial_summary_data(ParallelCompactData& summary_data,
duke@435 184 const MutableSpace* space) {
duke@435 185 if (space->top() == space->bottom()) {
duke@435 186 return;
duke@435 187 }
duke@435 188
jcoomes@810 189 const size_t region_size = ParallelCompactData::RegionSize;
jcoomes@810 190 typedef ParallelCompactData::RegionData RegionData;
jcoomes@810 191 HeapWord* const top_aligned_up = summary_data.region_align_up(space->top());
jcoomes@810 192 const size_t end_region = summary_data.addr_to_region_idx(top_aligned_up);
jcoomes@810 193 const RegionData* c = summary_data.region(end_region - 1);
duke@435 194 HeapWord* end_addr = c->destination() + c->data_size();
duke@435 195 const size_t live_in_space = pointer_delta(end_addr, space->bottom());
duke@435 196
jcoomes@810 197 // Print (and count) the full regions at the beginning of the space.
jcoomes@810 198 size_t full_region_count = 0;
jcoomes@810 199 size_t i = summary_data.addr_to_region_idx(space->bottom());
jcoomes@810 200 while (i < end_region && summary_data.region(i)->data_size() == region_size) {
jcoomes@810 201 print_initial_summary_region(i, summary_data.region(i));
jcoomes@810 202 ++full_region_count;
duke@435 203 ++i;
duke@435 204 }
duke@435 205
jcoomes@810 206 size_t live_to_right = live_in_space - full_region_count * region_size;
duke@435 207
duke@435 208 double max_reclaimed_ratio = 0.0;
jcoomes@810 209 size_t max_reclaimed_ratio_region = 0;
duke@435 210 size_t max_dead_to_right = 0;
duke@435 211 size_t max_live_to_right = 0;
duke@435 212
jcoomes@810 213 // Print the 'reclaimed ratio' for regions while there is something live in
jcoomes@810 214 // the region or to the right of it. The remaining regions are empty (and
duke@435 215 // uninteresting), and computing the ratio will result in division by 0.
jcoomes@810 216 while (i < end_region && live_to_right > 0) {
jcoomes@810 217 c = summary_data.region(i);
jcoomes@810 218 HeapWord* const region_addr = summary_data.region_to_addr(i);
jcoomes@810 219 const size_t used_to_right = pointer_delta(space->top(), region_addr);
duke@435 220 const size_t dead_to_right = used_to_right - live_to_right;
duke@435 221 const double reclaimed_ratio = double(dead_to_right) / live_to_right;
duke@435 222
duke@435 223 if (reclaimed_ratio > max_reclaimed_ratio) {
duke@435 224 max_reclaimed_ratio = reclaimed_ratio;
jcoomes@810 225 max_reclaimed_ratio_region = i;
duke@435 226 max_dead_to_right = dead_to_right;
duke@435 227 max_live_to_right = live_to_right;
duke@435 228 }
duke@435 229
jcoomes@810 230 print_initial_summary_region(i, c, false);
jcoomes@699 231 tty->print_cr(" %12.10f " SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10),
duke@435 232 reclaimed_ratio, dead_to_right, live_to_right);
duke@435 233
duke@435 234 live_to_right -= c->data_size();
duke@435 235 ++i;
duke@435 236 }
duke@435 237
jcoomes@810 238 // Any remaining regions are empty. Print one more if there is one.
jcoomes@810 239 if (i < end_region) {
jcoomes@810 240 print_initial_summary_region(i, summary_data.region(i));
duke@435 241 }
duke@435 242
jcoomes@699 243 tty->print_cr("max: " SIZE_FORMAT_W(4) " d2r=" SIZE_FORMAT_W(10) " "
jcoomes@699 244 "l2r=" SIZE_FORMAT_W(10) " max_ratio=%14.12f",
jcoomes@810 245 max_reclaimed_ratio_region, max_dead_to_right,
duke@435 246 max_live_to_right, max_reclaimed_ratio);
duke@435 247 }
duke@435 248
duke@435 249 void
duke@435 250 print_initial_summary_data(ParallelCompactData& summary_data,
duke@435 251 SpaceInfo* space_info) {
duke@435 252 unsigned int id = PSParallelCompact::perm_space_id;
duke@435 253 const MutableSpace* space;
duke@435 254 do {
duke@435 255 space = space_info[id].space();
duke@435 256 print_initial_summary_data(summary_data, space);
duke@435 257 } while (++id < PSParallelCompact::eden_space_id);
duke@435 258
duke@435 259 do {
duke@435 260 space = space_info[id].space();
duke@435 261 print_generic_summary_data(summary_data, space->bottom(), space->top());
duke@435 262 } while (++id < PSParallelCompact::last_space_id);
duke@435 263 }
duke@435 264 #endif // #ifndef PRODUCT
duke@435 265
duke@435 266 #ifdef ASSERT
duke@435 267 size_t add_obj_count;
duke@435 268 size_t add_obj_size;
duke@435 269 size_t mark_bitmap_count;
duke@435 270 size_t mark_bitmap_size;
duke@435 271 #endif // #ifdef ASSERT
duke@435 272
duke@435 273 ParallelCompactData::ParallelCompactData()
duke@435 274 {
duke@435 275 _region_start = 0;
duke@435 276
jcoomes@810 277 _region_vspace = 0;
jcoomes@810 278 _region_data = 0;
jcoomes@810 279 _region_count = 0;
duke@435 280 }
duke@435 281
duke@435 282 bool ParallelCompactData::initialize(MemRegion covered_region)
duke@435 283 {
duke@435 284 _region_start = covered_region.start();
duke@435 285 const size_t region_size = covered_region.word_size();
duke@435 286 DEBUG_ONLY(_region_end = _region_start + region_size;)
duke@435 287
jcoomes@810 288 assert(region_align_down(_region_start) == _region_start,
duke@435 289 "region start not aligned");
jcoomes@810 290 assert((region_size & RegionSizeOffsetMask) == 0,
jcoomes@810 291 "region size not a multiple of RegionSize");
jcoomes@810 292
jcoomes@810 293 bool result = initialize_region_data(region_size);
duke@435 294
duke@435 295 return result;
duke@435 296 }
duke@435 297
duke@435 298 PSVirtualSpace*
duke@435 299 ParallelCompactData::create_vspace(size_t count, size_t element_size)
duke@435 300 {
duke@435 301 const size_t raw_bytes = count * element_size;
duke@435 302 const size_t page_sz = os::page_size_for_region(raw_bytes, raw_bytes, 10);
duke@435 303 const size_t granularity = os::vm_allocation_granularity();
duke@435 304 const size_t bytes = align_size_up(raw_bytes, MAX2(page_sz, granularity));
duke@435 305
duke@435 306 const size_t rs_align = page_sz == (size_t) os::vm_page_size() ? 0 :
duke@435 307 MAX2(page_sz, granularity);
jcoomes@514 308 ReservedSpace rs(bytes, rs_align, rs_align > 0);
duke@435 309 os::trace_page_sizes("par compact", raw_bytes, raw_bytes, page_sz, rs.base(),
duke@435 310 rs.size());
duke@435 311 PSVirtualSpace* vspace = new PSVirtualSpace(rs, page_sz);
duke@435 312 if (vspace != 0) {
duke@435 313 if (vspace->expand_by(bytes)) {
duke@435 314 return vspace;
duke@435 315 }
duke@435 316 delete vspace;
coleenp@672 317 // Release memory reserved in the space.
coleenp@672 318 rs.release();
duke@435 319 }
duke@435 320
duke@435 321 return 0;
duke@435 322 }
duke@435 323
jcoomes@810 324 bool ParallelCompactData::initialize_region_data(size_t region_size)
duke@435 325 {
jcoomes@810 326 const size_t count = (region_size + RegionSizeOffsetMask) >> Log2RegionSize;
jcoomes@810 327 _region_vspace = create_vspace(count, sizeof(RegionData));
jcoomes@810 328 if (_region_vspace != 0) {
jcoomes@810 329 _region_data = (RegionData*)_region_vspace->reserved_low_addr();
jcoomes@810 330 _region_count = count;
duke@435 331 return true;
duke@435 332 }
duke@435 333 return false;
duke@435 334 }
duke@435 335
duke@435 336 void ParallelCompactData::clear()
duke@435 337 {
jcoomes@810 338 memset(_region_data, 0, _region_vspace->committed_size());
duke@435 339 }
duke@435 340
jcoomes@810 341 void ParallelCompactData::clear_range(size_t beg_region, size_t end_region) {
jcoomes@810 342 assert(beg_region <= _region_count, "beg_region out of range");
jcoomes@810 343 assert(end_region <= _region_count, "end_region out of range");
jcoomes@810 344
jcoomes@810 345 const size_t region_cnt = end_region - beg_region;
jcoomes@810 346 memset(_region_data + beg_region, 0, region_cnt * sizeof(RegionData));
duke@435 347 }
duke@435 348
jcoomes@810 349 HeapWord* ParallelCompactData::partial_obj_end(size_t region_idx) const
duke@435 350 {
jcoomes@810 351 const RegionData* cur_cp = region(region_idx);
jcoomes@810 352 const RegionData* const end_cp = region(region_count() - 1);
jcoomes@810 353
jcoomes@810 354 HeapWord* result = region_to_addr(region_idx);
duke@435 355 if (cur_cp < end_cp) {
duke@435 356 do {
duke@435 357 result += cur_cp->partial_obj_size();
jcoomes@810 358 } while (cur_cp->partial_obj_size() == RegionSize && ++cur_cp < end_cp);
duke@435 359 }
duke@435 360 return result;
duke@435 361 }
duke@435 362
duke@435 363 void ParallelCompactData::add_obj(HeapWord* addr, size_t len)
duke@435 364 {
duke@435 365 const size_t obj_ofs = pointer_delta(addr, _region_start);
jcoomes@810 366 const size_t beg_region = obj_ofs >> Log2RegionSize;
jcoomes@810 367 const size_t end_region = (obj_ofs + len - 1) >> Log2RegionSize;
duke@435 368
duke@435 369 DEBUG_ONLY(Atomic::inc_ptr(&add_obj_count);)
duke@435 370 DEBUG_ONLY(Atomic::add_ptr(len, &add_obj_size);)
duke@435 371
jcoomes@810 372 if (beg_region == end_region) {
jcoomes@810 373 // All in one region.
jcoomes@810 374 _region_data[beg_region].add_live_obj(len);
duke@435 375 return;
duke@435 376 }
duke@435 377
jcoomes@810 378 // First region.
jcoomes@810 379 const size_t beg_ofs = region_offset(addr);
jcoomes@810 380 _region_data[beg_region].add_live_obj(RegionSize - beg_ofs);
duke@435 381
duke@435 382 klassOop klass = ((oop)addr)->klass();
jcoomes@810 383 // Middle regions--completely spanned by this object.
jcoomes@810 384 for (size_t region = beg_region + 1; region < end_region; ++region) {
jcoomes@810 385 _region_data[region].set_partial_obj_size(RegionSize);
jcoomes@810 386 _region_data[region].set_partial_obj_addr(addr);
duke@435 387 }
duke@435 388
jcoomes@810 389 // Last region.
jcoomes@810 390 const size_t end_ofs = region_offset(addr + len - 1);
jcoomes@810 391 _region_data[end_region].set_partial_obj_size(end_ofs + 1);
jcoomes@810 392 _region_data[end_region].set_partial_obj_addr(addr);
duke@435 393 }
duke@435 394
duke@435 395 void
duke@435 396 ParallelCompactData::summarize_dense_prefix(HeapWord* beg, HeapWord* end)
duke@435 397 {
jcoomes@810 398 assert(region_offset(beg) == 0, "not RegionSize aligned");
jcoomes@810 399 assert(region_offset(end) == 0, "not RegionSize aligned");
jcoomes@810 400
jcoomes@810 401 size_t cur_region = addr_to_region_idx(beg);
jcoomes@810 402 const size_t end_region = addr_to_region_idx(end);
duke@435 403 HeapWord* addr = beg;
jcoomes@810 404 while (cur_region < end_region) {
jcoomes@810 405 _region_data[cur_region].set_destination(addr);
jcoomes@810 406 _region_data[cur_region].set_destination_count(0);
jcoomes@810 407 _region_data[cur_region].set_source_region(cur_region);
jcoomes@810 408 _region_data[cur_region].set_data_location(addr);
jcoomes@810 409
jcoomes@810 410 // Update live_obj_size so the region appears completely full.
jcoomes@810 411 size_t live_size = RegionSize - _region_data[cur_region].partial_obj_size();
jcoomes@810 412 _region_data[cur_region].set_live_obj_size(live_size);
jcoomes@810 413
jcoomes@810 414 ++cur_region;
jcoomes@810 415 addr += RegionSize;
duke@435 416 }
duke@435 417 }
duke@435 418
duke@435 419 bool ParallelCompactData::summarize(HeapWord* target_beg, HeapWord* target_end,
duke@435 420 HeapWord* source_beg, HeapWord* source_end,
duke@435 421 HeapWord** target_next,
duke@435 422 HeapWord** source_next) {
duke@435 423 // This is too strict.
jcoomes@810 424 // assert(region_offset(source_beg) == 0, "not RegionSize aligned");
duke@435 425
duke@435 426 if (TraceParallelOldGCSummaryPhase) {
duke@435 427 tty->print_cr("tb=" PTR_FORMAT " te=" PTR_FORMAT " "
duke@435 428 "sb=" PTR_FORMAT " se=" PTR_FORMAT " "
duke@435 429 "tn=" PTR_FORMAT " sn=" PTR_FORMAT,
duke@435 430 target_beg, target_end,
duke@435 431 source_beg, source_end,
duke@435 432 target_next != 0 ? *target_next : (HeapWord*) 0,
duke@435 433 source_next != 0 ? *source_next : (HeapWord*) 0);
duke@435 434 }
duke@435 435
jcoomes@810 436 size_t cur_region = addr_to_region_idx(source_beg);
jcoomes@810 437 const size_t end_region = addr_to_region_idx(region_align_up(source_end));
duke@435 438
duke@435 439 HeapWord *dest_addr = target_beg;
jcoomes@810 440 while (cur_region < end_region) {
jcoomes@810 441 size_t words = _region_data[cur_region].data_size();
duke@435 442
duke@435 443 #if 1
duke@435 444 assert(pointer_delta(target_end, dest_addr) >= words,
duke@435 445 "source region does not fit into target region");
duke@435 446 #else
jcoomes@810 447 // XXX - need some work on the corner cases here. If the region does not
jcoomes@810 448 // fit, then must either make sure any partial_obj from the region fits, or
jcoomes@810 449 // "undo" the initial part of the partial_obj that is in the previous
jcoomes@810 450 // region.
duke@435 451 if (dest_addr + words >= target_end) {
duke@435 452 // Let the caller know where to continue.
duke@435 453 *target_next = dest_addr;
jcoomes@810 454 *source_next = region_to_addr(cur_region);
duke@435 455 return false;
duke@435 456 }
duke@435 457 #endif // #if 1
duke@435 458
jcoomes@810 459 _region_data[cur_region].set_destination(dest_addr);
jcoomes@810 460
jcoomes@810 461 // Set the destination_count for cur_region, and if necessary, update
jcoomes@810 462 // source_region for a destination region. The source_region field is
jcoomes@810 463 // updated if cur_region is the first (left-most) region to be copied to a
jcoomes@810 464 // destination region.
duke@435 465 //
jcoomes@810 466 // The destination_count calculation is a bit subtle. A region that has
jcoomes@810 467 // data that compacts into itself does not count itself as a destination.
jcoomes@810 468 // This maintains the invariant that a zero count means the region is
jcoomes@810 469 // available and can be claimed and then filled.
duke@435 470 if (words > 0) {
duke@435 471 HeapWord* const last_addr = dest_addr + words - 1;
jcoomes@810 472 const size_t dest_region_1 = addr_to_region_idx(dest_addr);
jcoomes@810 473 const size_t dest_region_2 = addr_to_region_idx(last_addr);
duke@435 474 #if 0
jcoomes@810 475 // Initially assume that the destination regions will be the same and
duke@435 476 // adjust the value below if necessary. Under this assumption, if
jcoomes@810 477 // cur_region == dest_region_2, then cur_region will be compacted
jcoomes@810 478 // completely into itself.
jcoomes@810 479 uint destination_count = cur_region == dest_region_2 ? 0 : 1;
jcoomes@810 480 if (dest_region_1 != dest_region_2) {
jcoomes@810 481 // Destination regions differ; adjust destination_count.
duke@435 482 destination_count += 1;
jcoomes@810 483 // Data from cur_region will be copied to the start of dest_region_2.
jcoomes@810 484 _region_data[dest_region_2].set_source_region(cur_region);
jcoomes@810 485 } else if (region_offset(dest_addr) == 0) {
jcoomes@810 486 // Data from cur_region will be copied to the start of the destination
jcoomes@810 487 // region.
jcoomes@810 488 _region_data[dest_region_1].set_source_region(cur_region);
duke@435 489 }
duke@435 490 #else
jcoomes@810 491 // Initially assume that the destination regions will be different and
duke@435 492 // adjust the value below if necessary. Under this assumption, if
jcoomes@810 493 // cur_region == dest_region2, then cur_region will be compacted partially
jcoomes@810 494 // into dest_region_1 and partially into itself.
jcoomes@810 495 uint destination_count = cur_region == dest_region_2 ? 1 : 2;
jcoomes@810 496 if (dest_region_1 != dest_region_2) {
jcoomes@810 497 // Data from cur_region will be copied to the start of dest_region_2.
jcoomes@810 498 _region_data[dest_region_2].set_source_region(cur_region);
duke@435 499 } else {
jcoomes@810 500 // Destination regions are the same; adjust destination_count.
duke@435 501 destination_count -= 1;
jcoomes@810 502 if (region_offset(dest_addr) == 0) {
jcoomes@810 503 // Data from cur_region will be copied to the start of the destination
jcoomes@810 504 // region.
jcoomes@810 505 _region_data[dest_region_1].set_source_region(cur_region);
duke@435 506 }
duke@435 507 }
duke@435 508 #endif // #if 0
duke@435 509
jcoomes@810 510 _region_data[cur_region].set_destination_count(destination_count);
jcoomes@810 511 _region_data[cur_region].set_data_location(region_to_addr(cur_region));
duke@435 512 dest_addr += words;
duke@435 513 }
duke@435 514
jcoomes@810 515 ++cur_region;
duke@435 516 }
duke@435 517
duke@435 518 *target_next = dest_addr;
duke@435 519 return true;
duke@435 520 }
duke@435 521
duke@435 522 HeapWord* ParallelCompactData::calc_new_pointer(HeapWord* addr) {
duke@435 523 assert(addr != NULL, "Should detect NULL oop earlier");
duke@435 524 assert(PSParallelCompact::gc_heap()->is_in(addr), "addr not in heap");
duke@435 525 #ifdef ASSERT
duke@435 526 if (PSParallelCompact::mark_bitmap()->is_unmarked(addr)) {
duke@435 527 gclog_or_tty->print_cr("calc_new_pointer:: addr " PTR_FORMAT, addr);
duke@435 528 }
duke@435 529 #endif
duke@435 530 assert(PSParallelCompact::mark_bitmap()->is_marked(addr), "obj not marked");
duke@435 531
jcoomes@810 532 // Region covering the object.
jcoomes@810 533 size_t region_index = addr_to_region_idx(addr);
jcoomes@810 534 const RegionData* const region_ptr = region(region_index);
jcoomes@810 535 HeapWord* const region_addr = region_align_down(addr);
jcoomes@810 536
jcoomes@810 537 assert(addr < region_addr + RegionSize, "Region does not cover object");
jcoomes@810 538 assert(addr_to_region_ptr(region_addr) == region_ptr, "sanity check");
jcoomes@810 539
jcoomes@810 540 HeapWord* result = region_ptr->destination();
jcoomes@810 541
jcoomes@810 542 // If all the data in the region is live, then the new location of the object
jcoomes@810 543 // can be calculated from the destination of the region plus the offset of the
jcoomes@810 544 // object in the region.
jcoomes@810 545 if (region_ptr->data_size() == RegionSize) {
jcoomes@810 546 result += pointer_delta(addr, region_addr);
duke@435 547 return result;
duke@435 548 }
duke@435 549
duke@435 550 // The new location of the object is
jcoomes@810 551 // region destination +
jcoomes@810 552 // size of the partial object extending onto the region +
jcoomes@810 553 // sizes of the live objects in the Region that are to the left of addr
jcoomes@810 554 const size_t partial_obj_size = region_ptr->partial_obj_size();
jcoomes@810 555 HeapWord* const search_start = region_addr + partial_obj_size;
duke@435 556
duke@435 557 const ParMarkBitMap* bitmap = PSParallelCompact::mark_bitmap();
duke@435 558 size_t live_to_left = bitmap->live_words_in_range(search_start, oop(addr));
duke@435 559
duke@435 560 result += partial_obj_size + live_to_left;
duke@435 561 assert(result <= addr, "object cannot move to the right");
duke@435 562 return result;
duke@435 563 }
duke@435 564
duke@435 565 klassOop ParallelCompactData::calc_new_klass(klassOop old_klass) {
duke@435 566 klassOop updated_klass;
duke@435 567 if (PSParallelCompact::should_update_klass(old_klass)) {
duke@435 568 updated_klass = (klassOop) calc_new_pointer(old_klass);
duke@435 569 } else {
duke@435 570 updated_klass = old_klass;
duke@435 571 }
duke@435 572
duke@435 573 return updated_klass;
duke@435 574 }
duke@435 575
duke@435 576 #ifdef ASSERT
duke@435 577 void ParallelCompactData::verify_clear(const PSVirtualSpace* vspace)
duke@435 578 {
duke@435 579 const size_t* const beg = (const size_t*)vspace->committed_low_addr();
duke@435 580 const size_t* const end = (const size_t*)vspace->committed_high_addr();
duke@435 581 for (const size_t* p = beg; p < end; ++p) {
duke@435 582 assert(*p == 0, "not zero");
duke@435 583 }
duke@435 584 }
duke@435 585
duke@435 586 void ParallelCompactData::verify_clear()
duke@435 587 {
jcoomes@810 588 verify_clear(_region_vspace);
duke@435 589 }
duke@435 590 #endif // #ifdef ASSERT
duke@435 591
duke@435 592 #ifdef NOT_PRODUCT
jcoomes@810 593 ParallelCompactData::RegionData* debug_region(size_t region_index) {
duke@435 594 ParallelCompactData& sd = PSParallelCompact::summary_data();
jcoomes@810 595 return sd.region(region_index);
duke@435 596 }
duke@435 597 #endif
duke@435 598
duke@435 599 elapsedTimer PSParallelCompact::_accumulated_time;
duke@435 600 unsigned int PSParallelCompact::_total_invocations = 0;
duke@435 601 unsigned int PSParallelCompact::_maximum_compaction_gc_num = 0;
duke@435 602 jlong PSParallelCompact::_time_of_last_gc = 0;
duke@435 603 CollectorCounters* PSParallelCompact::_counters = NULL;
duke@435 604 ParMarkBitMap PSParallelCompact::_mark_bitmap;
duke@435 605 ParallelCompactData PSParallelCompact::_summary_data;
duke@435 606
duke@435 607 PSParallelCompact::IsAliveClosure PSParallelCompact::_is_alive_closure;
coleenp@548 608
coleenp@548 609 void PSParallelCompact::IsAliveClosure::do_object(oop p) { ShouldNotReachHere(); }
coleenp@548 610 bool PSParallelCompact::IsAliveClosure::do_object_b(oop p) { return mark_bitmap()->is_marked(p); }
coleenp@548 611
coleenp@548 612 void PSParallelCompact::KeepAliveClosure::do_oop(oop* p) { PSParallelCompact::KeepAliveClosure::do_oop_work(p); }
coleenp@548 613 void PSParallelCompact::KeepAliveClosure::do_oop(narrowOop* p) { PSParallelCompact::KeepAliveClosure::do_oop_work(p); }
coleenp@548 614
duke@435 615 PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_root_pointer_closure(true);
duke@435 616 PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_pointer_closure(false);
duke@435 617
coleenp@548 618 void PSParallelCompact::AdjustPointerClosure::do_oop(oop* p) { adjust_pointer(p, _is_root); }
coleenp@548 619 void PSParallelCompact::AdjustPointerClosure::do_oop(narrowOop* p) { adjust_pointer(p, _is_root); }
coleenp@548 620
coleenp@548 621 void PSParallelCompact::FollowStackClosure::do_void() { follow_stack(_compaction_manager); }
coleenp@548 622
coleenp@548 623 void PSParallelCompact::MarkAndPushClosure::do_oop(oop* p) { mark_and_push(_compaction_manager, p); }
coleenp@548 624 void PSParallelCompact::MarkAndPushClosure::do_oop(narrowOop* p) { mark_and_push(_compaction_manager, p); }
duke@435 625
duke@435 626 void PSParallelCompact::post_initialize() {
duke@435 627 ParallelScavengeHeap* heap = gc_heap();
duke@435 628 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
duke@435 629
duke@435 630 MemRegion mr = heap->reserved_region();
duke@435 631 _ref_processor = ReferenceProcessor::create_ref_processor(
duke@435 632 mr, // span
duke@435 633 true, // atomic_discovery
duke@435 634 true, // mt_discovery
duke@435 635 &_is_alive_closure,
duke@435 636 ParallelGCThreads,
duke@435 637 ParallelRefProcEnabled);
duke@435 638 _counters = new CollectorCounters("PSParallelCompact", 1);
duke@435 639
duke@435 640 // Initialize static fields in ParCompactionManager.
duke@435 641 ParCompactionManager::initialize(mark_bitmap());
duke@435 642 }
duke@435 643
duke@435 644 bool PSParallelCompact::initialize() {
duke@435 645 ParallelScavengeHeap* heap = gc_heap();
duke@435 646 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
duke@435 647 MemRegion mr = heap->reserved_region();
duke@435 648
duke@435 649 // Was the old gen get allocated successfully?
duke@435 650 if (!heap->old_gen()->is_allocated()) {
duke@435 651 return false;
duke@435 652 }
duke@435 653
duke@435 654 initialize_space_info();
duke@435 655 initialize_dead_wood_limiter();
duke@435 656
duke@435 657 if (!_mark_bitmap.initialize(mr)) {
duke@435 658 vm_shutdown_during_initialization("Unable to allocate bit map for "
duke@435 659 "parallel garbage collection for the requested heap size.");
duke@435 660 return false;
duke@435 661 }
duke@435 662
duke@435 663 if (!_summary_data.initialize(mr)) {
duke@435 664 vm_shutdown_during_initialization("Unable to allocate tables for "
duke@435 665 "parallel garbage collection for the requested heap size.");
duke@435 666 return false;
duke@435 667 }
duke@435 668
duke@435 669 return true;
duke@435 670 }
duke@435 671
duke@435 672 void PSParallelCompact::initialize_space_info()
duke@435 673 {
duke@435 674 memset(&_space_info, 0, sizeof(_space_info));
duke@435 675
duke@435 676 ParallelScavengeHeap* heap = gc_heap();
duke@435 677 PSYoungGen* young_gen = heap->young_gen();
duke@435 678 MutableSpace* perm_space = heap->perm_gen()->object_space();
duke@435 679
duke@435 680 _space_info[perm_space_id].set_space(perm_space);
duke@435 681 _space_info[old_space_id].set_space(heap->old_gen()->object_space());
duke@435 682 _space_info[eden_space_id].set_space(young_gen->eden_space());
duke@435 683 _space_info[from_space_id].set_space(young_gen->from_space());
duke@435 684 _space_info[to_space_id].set_space(young_gen->to_space());
duke@435 685
duke@435 686 _space_info[perm_space_id].set_start_array(heap->perm_gen()->start_array());
duke@435 687 _space_info[old_space_id].set_start_array(heap->old_gen()->start_array());
duke@435 688
duke@435 689 _space_info[perm_space_id].set_min_dense_prefix(perm_space->top());
duke@435 690 if (TraceParallelOldGCDensePrefix) {
duke@435 691 tty->print_cr("perm min_dense_prefix=" PTR_FORMAT,
duke@435 692 _space_info[perm_space_id].min_dense_prefix());
duke@435 693 }
duke@435 694 }
duke@435 695
duke@435 696 void PSParallelCompact::initialize_dead_wood_limiter()
duke@435 697 {
duke@435 698 const size_t max = 100;
duke@435 699 _dwl_mean = double(MIN2(ParallelOldDeadWoodLimiterMean, max)) / 100.0;
duke@435 700 _dwl_std_dev = double(MIN2(ParallelOldDeadWoodLimiterStdDev, max)) / 100.0;
duke@435 701 _dwl_first_term = 1.0 / (sqrt(2.0 * M_PI) * _dwl_std_dev);
duke@435 702 DEBUG_ONLY(_dwl_initialized = true;)
duke@435 703 _dwl_adjustment = normal_distribution(1.0);
duke@435 704 }
duke@435 705
duke@435 706 // Simple class for storing info about the heap at the start of GC, to be used
duke@435 707 // after GC for comparison/printing.
duke@435 708 class PreGCValues {
duke@435 709 public:
duke@435 710 PreGCValues() { }
duke@435 711 PreGCValues(ParallelScavengeHeap* heap) { fill(heap); }
duke@435 712
duke@435 713 void fill(ParallelScavengeHeap* heap) {
duke@435 714 _heap_used = heap->used();
duke@435 715 _young_gen_used = heap->young_gen()->used_in_bytes();
duke@435 716 _old_gen_used = heap->old_gen()->used_in_bytes();
duke@435 717 _perm_gen_used = heap->perm_gen()->used_in_bytes();
duke@435 718 };
duke@435 719
duke@435 720 size_t heap_used() const { return _heap_used; }
duke@435 721 size_t young_gen_used() const { return _young_gen_used; }
duke@435 722 size_t old_gen_used() const { return _old_gen_used; }
duke@435 723 size_t perm_gen_used() const { return _perm_gen_used; }
duke@435 724
duke@435 725 private:
duke@435 726 size_t _heap_used;
duke@435 727 size_t _young_gen_used;
duke@435 728 size_t _old_gen_used;
duke@435 729 size_t _perm_gen_used;
duke@435 730 };
duke@435 731
duke@435 732 void
duke@435 733 PSParallelCompact::clear_data_covering_space(SpaceId id)
duke@435 734 {
duke@435 735 // At this point, top is the value before GC, new_top() is the value that will
duke@435 736 // be set at the end of GC. The marking bitmap is cleared to top; nothing
duke@435 737 // should be marked above top. The summary data is cleared to the larger of
duke@435 738 // top & new_top.
duke@435 739 MutableSpace* const space = _space_info[id].space();
duke@435 740 HeapWord* const bot = space->bottom();
duke@435 741 HeapWord* const top = space->top();
duke@435 742 HeapWord* const max_top = MAX2(top, _space_info[id].new_top());
duke@435 743
duke@435 744 const idx_t beg_bit = _mark_bitmap.addr_to_bit(bot);
duke@435 745 const idx_t end_bit = BitMap::word_align_up(_mark_bitmap.addr_to_bit(top));
duke@435 746 _mark_bitmap.clear_range(beg_bit, end_bit);
duke@435 747
jcoomes@810 748 const size_t beg_region = _summary_data.addr_to_region_idx(bot);
jcoomes@810 749 const size_t end_region =
jcoomes@810 750 _summary_data.addr_to_region_idx(_summary_data.region_align_up(max_top));
jcoomes@810 751 _summary_data.clear_range(beg_region, end_region);
duke@435 752 }
duke@435 753
duke@435 754 void PSParallelCompact::pre_compact(PreGCValues* pre_gc_values)
duke@435 755 {
duke@435 756 // Update the from & to space pointers in space_info, since they are swapped
duke@435 757 // at each young gen gc. Do the update unconditionally (even though a
duke@435 758 // promotion failure does not swap spaces) because an unknown number of minor
duke@435 759 // collections will have swapped the spaces an unknown number of times.
duke@435 760 TraceTime tm("pre compact", print_phases(), true, gclog_or_tty);
duke@435 761 ParallelScavengeHeap* heap = gc_heap();
duke@435 762 _space_info[from_space_id].set_space(heap->young_gen()->from_space());
duke@435 763 _space_info[to_space_id].set_space(heap->young_gen()->to_space());
duke@435 764
duke@435 765 pre_gc_values->fill(heap);
duke@435 766
duke@435 767 ParCompactionManager::reset();
duke@435 768 NOT_PRODUCT(_mark_bitmap.reset_counters());
duke@435 769 DEBUG_ONLY(add_obj_count = add_obj_size = 0;)
duke@435 770 DEBUG_ONLY(mark_bitmap_count = mark_bitmap_size = 0;)
duke@435 771
duke@435 772 // Increment the invocation count
apetrusenko@574 773 heap->increment_total_collections(true);
duke@435 774
duke@435 775 // We need to track unique mark sweep invocations as well.
duke@435 776 _total_invocations++;
duke@435 777
duke@435 778 if (PrintHeapAtGC) {
duke@435 779 Universe::print_heap_before_gc();
duke@435 780 }
duke@435 781
duke@435 782 // Fill in TLABs
duke@435 783 heap->accumulate_statistics_all_tlabs();
duke@435 784 heap->ensure_parsability(true); // retire TLABs
duke@435 785
duke@435 786 if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
duke@435 787 HandleMark hm; // Discard invalid handles created during verification
duke@435 788 gclog_or_tty->print(" VerifyBeforeGC:");
duke@435 789 Universe::verify(true);
duke@435 790 }
duke@435 791
duke@435 792 // Verify object start arrays
duke@435 793 if (VerifyObjectStartArray &&
duke@435 794 VerifyBeforeGC) {
duke@435 795 heap->old_gen()->verify_object_start_array();
duke@435 796 heap->perm_gen()->verify_object_start_array();
duke@435 797 }
duke@435 798
duke@435 799 DEBUG_ONLY(mark_bitmap()->verify_clear();)
duke@435 800 DEBUG_ONLY(summary_data().verify_clear();)
jcoomes@645 801
jcoomes@645 802 // Have worker threads release resources the next time they run a task.
jcoomes@645 803 gc_task_manager()->release_all_resources();
duke@435 804 }
duke@435 805
duke@435 806 void PSParallelCompact::post_compact()
duke@435 807 {
duke@435 808 TraceTime tm("post compact", print_phases(), true, gclog_or_tty);
duke@435 809
duke@435 810 // Clear the marking bitmap and summary data and update top() in each space.
duke@435 811 for (unsigned int id = perm_space_id; id < last_space_id; ++id) {
duke@435 812 clear_data_covering_space(SpaceId(id));
duke@435 813 _space_info[id].space()->set_top(_space_info[id].new_top());
duke@435 814 }
duke@435 815
duke@435 816 MutableSpace* const eden_space = _space_info[eden_space_id].space();
duke@435 817 MutableSpace* const from_space = _space_info[from_space_id].space();
duke@435 818 MutableSpace* const to_space = _space_info[to_space_id].space();
duke@435 819
duke@435 820 ParallelScavengeHeap* heap = gc_heap();
duke@435 821 bool eden_empty = eden_space->is_empty();
duke@435 822 if (!eden_empty) {
duke@435 823 eden_empty = absorb_live_data_from_eden(heap->size_policy(),
duke@435 824 heap->young_gen(), heap->old_gen());
duke@435 825 }
duke@435 826
duke@435 827 // Update heap occupancy information which is used as input to the soft ref
duke@435 828 // clearing policy at the next gc.
duke@435 829 Universe::update_heap_info_at_gc();
duke@435 830
duke@435 831 bool young_gen_empty = eden_empty && from_space->is_empty() &&
duke@435 832 to_space->is_empty();
duke@435 833
duke@435 834 BarrierSet* bs = heap->barrier_set();
duke@435 835 if (bs->is_a(BarrierSet::ModRef)) {
duke@435 836 ModRefBarrierSet* modBS = (ModRefBarrierSet*)bs;
duke@435 837 MemRegion old_mr = heap->old_gen()->reserved();
duke@435 838 MemRegion perm_mr = heap->perm_gen()->reserved();
duke@435 839 assert(perm_mr.end() <= old_mr.start(), "Generations out of order");
duke@435 840
duke@435 841 if (young_gen_empty) {
duke@435 842 modBS->clear(MemRegion(perm_mr.start(), old_mr.end()));
duke@435 843 } else {
duke@435 844 modBS->invalidate(MemRegion(perm_mr.start(), old_mr.end()));
duke@435 845 }
duke@435 846 }
duke@435 847
duke@435 848 Threads::gc_epilogue();
duke@435 849 CodeCache::gc_epilogue();
duke@435 850
duke@435 851 COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
duke@435 852
duke@435 853 ref_processor()->enqueue_discovered_references(NULL);
duke@435 854
jmasa@698 855 if (ZapUnusedHeapArea) {
jmasa@698 856 heap->gen_mangle_unused_area();
jmasa@698 857 }
jmasa@698 858
duke@435 859 // Update time of last GC
duke@435 860 reset_millis_since_last_gc();
duke@435 861 }
duke@435 862
duke@435 863 HeapWord*
duke@435 864 PSParallelCompact::compute_dense_prefix_via_density(const SpaceId id,
duke@435 865 bool maximum_compaction)
duke@435 866 {
jcoomes@810 867 const size_t region_size = ParallelCompactData::RegionSize;
duke@435 868 const ParallelCompactData& sd = summary_data();
duke@435 869
duke@435 870 const MutableSpace* const space = _space_info[id].space();
jcoomes@810 871 HeapWord* const top_aligned_up = sd.region_align_up(space->top());
jcoomes@810 872 const RegionData* const beg_cp = sd.addr_to_region_ptr(space->bottom());
jcoomes@810 873 const RegionData* const end_cp = sd.addr_to_region_ptr(top_aligned_up);
jcoomes@810 874
jcoomes@810 875 // Skip full regions at the beginning of the space--they are necessarily part
duke@435 876 // of the dense prefix.
duke@435 877 size_t full_count = 0;
jcoomes@810 878 const RegionData* cp;
jcoomes@810 879 for (cp = beg_cp; cp < end_cp && cp->data_size() == region_size; ++cp) {
duke@435 880 ++full_count;
duke@435 881 }
duke@435 882
duke@435 883 assert(total_invocations() >= _maximum_compaction_gc_num, "sanity");
duke@435 884 const size_t gcs_since_max = total_invocations() - _maximum_compaction_gc_num;
duke@435 885 const bool interval_ended = gcs_since_max > HeapMaximumCompactionInterval;
duke@435 886 if (maximum_compaction || cp == end_cp || interval_ended) {
duke@435 887 _maximum_compaction_gc_num = total_invocations();
jcoomes@810 888 return sd.region_to_addr(cp);
duke@435 889 }
duke@435 890
duke@435 891 HeapWord* const new_top = _space_info[id].new_top();
duke@435 892 const size_t space_live = pointer_delta(new_top, space->bottom());
duke@435 893 const size_t space_used = space->used_in_words();
duke@435 894 const size_t space_capacity = space->capacity_in_words();
duke@435 895
duke@435 896 const double cur_density = double(space_live) / space_capacity;
duke@435 897 const double deadwood_density =
duke@435 898 (1.0 - cur_density) * (1.0 - cur_density) * cur_density * cur_density;
duke@435 899 const size_t deadwood_goal = size_t(space_capacity * deadwood_density);
duke@435 900
duke@435 901 if (TraceParallelOldGCDensePrefix) {
duke@435 902 tty->print_cr("cur_dens=%5.3f dw_dens=%5.3f dw_goal=" SIZE_FORMAT,
duke@435 903 cur_density, deadwood_density, deadwood_goal);
duke@435 904 tty->print_cr("space_live=" SIZE_FORMAT " " "space_used=" SIZE_FORMAT " "
duke@435 905 "space_cap=" SIZE_FORMAT,
duke@435 906 space_live, space_used,
duke@435 907 space_capacity);
duke@435 908 }
duke@435 909
duke@435 910 // XXX - Use binary search?
jcoomes@810 911 HeapWord* dense_prefix = sd.region_to_addr(cp);
jcoomes@810 912 const RegionData* full_cp = cp;
jcoomes@810 913 const RegionData* const top_cp = sd.addr_to_region_ptr(space->top() - 1);
duke@435 914 while (cp < end_cp) {
jcoomes@810 915 HeapWord* region_destination = cp->destination();
jcoomes@810 916 const size_t cur_deadwood = pointer_delta(dense_prefix, region_destination);
duke@435 917 if (TraceParallelOldGCDensePrefix && Verbose) {
jcoomes@699 918 tty->print_cr("c#=" SIZE_FORMAT_W(4) " dst=" PTR_FORMAT " "
jcoomes@699 919 "dp=" SIZE_FORMAT_W(8) " " "cdw=" SIZE_FORMAT_W(8),
jcoomes@810 920 sd.region(cp), region_destination,
duke@435 921 dense_prefix, cur_deadwood);
duke@435 922 }
duke@435 923
duke@435 924 if (cur_deadwood >= deadwood_goal) {
jcoomes@810 925 // Found the region that has the correct amount of deadwood to the left.
jcoomes@810 926 // This typically occurs after crossing a fairly sparse set of regions, so
jcoomes@810 927 // iterate backwards over those sparse regions, looking for the region
jcoomes@810 928 // that has the lowest density of live objects 'to the right.'
jcoomes@810 929 size_t space_to_left = sd.region(cp) * region_size;
duke@435 930 size_t live_to_left = space_to_left - cur_deadwood;
duke@435 931 size_t space_to_right = space_capacity - space_to_left;
duke@435 932 size_t live_to_right = space_live - live_to_left;
duke@435 933 double density_to_right = double(live_to_right) / space_to_right;
duke@435 934 while (cp > full_cp) {
duke@435 935 --cp;
jcoomes@810 936 const size_t prev_region_live_to_right = live_to_right -
jcoomes@810 937 cp->data_size();
jcoomes@810 938 const size_t prev_region_space_to_right = space_to_right + region_size;
jcoomes@810 939 double prev_region_density_to_right =
jcoomes@810 940 double(prev_region_live_to_right) / prev_region_space_to_right;
jcoomes@810 941 if (density_to_right <= prev_region_density_to_right) {
duke@435 942 return dense_prefix;
duke@435 943 }
duke@435 944 if (TraceParallelOldGCDensePrefix && Verbose) {
jcoomes@699 945 tty->print_cr("backing up from c=" SIZE_FORMAT_W(4) " d2r=%10.8f "
jcoomes@810 946 "pc_d2r=%10.8f", sd.region(cp), density_to_right,
jcoomes@810 947 prev_region_density_to_right);
duke@435 948 }
jcoomes@810 949 dense_prefix -= region_size;
jcoomes@810 950 live_to_right = prev_region_live_to_right;
jcoomes@810 951 space_to_right = prev_region_space_to_right;
jcoomes@810 952 density_to_right = prev_region_density_to_right;
duke@435 953 }
duke@435 954 return dense_prefix;
duke@435 955 }
duke@435 956
jcoomes@810 957 dense_prefix += region_size;
duke@435 958 ++cp;
duke@435 959 }
duke@435 960
duke@435 961 return dense_prefix;
duke@435 962 }
duke@435 963
duke@435 964 #ifndef PRODUCT
duke@435 965 void PSParallelCompact::print_dense_prefix_stats(const char* const algorithm,
duke@435 966 const SpaceId id,
duke@435 967 const bool maximum_compaction,
duke@435 968 HeapWord* const addr)
duke@435 969 {
jcoomes@810 970 const size_t region_idx = summary_data().addr_to_region_idx(addr);
jcoomes@810 971 RegionData* const cp = summary_data().region(region_idx);
duke@435 972 const MutableSpace* const space = _space_info[id].space();
duke@435 973 HeapWord* const new_top = _space_info[id].new_top();
duke@435 974
duke@435 975 const size_t space_live = pointer_delta(new_top, space->bottom());
duke@435 976 const size_t dead_to_left = pointer_delta(addr, cp->destination());
duke@435 977 const size_t space_cap = space->capacity_in_words();
duke@435 978 const double dead_to_left_pct = double(dead_to_left) / space_cap;
duke@435 979 const size_t live_to_right = new_top - cp->destination();
duke@435 980 const size_t dead_to_right = space->top() - addr - live_to_right;
duke@435 981
jcoomes@699 982 tty->print_cr("%s=" PTR_FORMAT " dpc=" SIZE_FORMAT_W(5) " "
duke@435 983 "spl=" SIZE_FORMAT " "
duke@435 984 "d2l=" SIZE_FORMAT " d2l%%=%6.4f "
duke@435 985 "d2r=" SIZE_FORMAT " l2r=" SIZE_FORMAT
duke@435 986 " ratio=%10.8f",
jcoomes@810 987 algorithm, addr, region_idx,
duke@435 988 space_live,
duke@435 989 dead_to_left, dead_to_left_pct,
duke@435 990 dead_to_right, live_to_right,
duke@435 991 double(dead_to_right) / live_to_right);
duke@435 992 }
duke@435 993 #endif // #ifndef PRODUCT
duke@435 994
duke@435 995 // Return a fraction indicating how much of the generation can be treated as
duke@435 996 // "dead wood" (i.e., not reclaimed). The function uses a normal distribution
duke@435 997 // based on the density of live objects in the generation to determine a limit,
duke@435 998 // which is then adjusted so the return value is min_percent when the density is
duke@435 999 // 1.
duke@435 1000 //
duke@435 1001 // The following table shows some return values for a different values of the
duke@435 1002 // standard deviation (ParallelOldDeadWoodLimiterStdDev); the mean is 0.5 and
duke@435 1003 // min_percent is 1.
duke@435 1004 //
duke@435 1005 // fraction allowed as dead wood
duke@435 1006 // -----------------------------------------------------------------
duke@435 1007 // density std_dev=70 std_dev=75 std_dev=80 std_dev=85 std_dev=90 std_dev=95
duke@435 1008 // ------- ---------- ---------- ---------- ---------- ---------- ----------
duke@435 1009 // 0.00000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000
duke@435 1010 // 0.05000 0.03193096 0.02836880 0.02550828 0.02319280 0.02130337 0.01974941
duke@435 1011 // 0.10000 0.05247504 0.04547452 0.03988045 0.03537016 0.03170171 0.02869272
duke@435 1012 // 0.15000 0.07135702 0.06111390 0.05296419 0.04641639 0.04110601 0.03676066
duke@435 1013 // 0.20000 0.08831616 0.07509618 0.06461766 0.05622444 0.04943437 0.04388975
duke@435 1014 // 0.25000 0.10311208 0.08724696 0.07471205 0.06469760 0.05661313 0.05002313
duke@435 1015 // 0.30000 0.11553050 0.09741183 0.08313394 0.07175114 0.06257797 0.05511132
duke@435 1016 // 0.35000 0.12538832 0.10545958 0.08978741 0.07731366 0.06727491 0.05911289
duke@435 1017 // 0.40000 0.13253818 0.11128511 0.09459590 0.08132834 0.07066107 0.06199500
duke@435 1018 // 0.45000 0.13687208 0.11481163 0.09750361 0.08375387 0.07270534 0.06373386
duke@435 1019 // 0.50000 0.13832410 0.11599237 0.09847664 0.08456518 0.07338887 0.06431510
duke@435 1020 // 0.55000 0.13687208 0.11481163 0.09750361 0.08375387 0.07270534 0.06373386
duke@435 1021 // 0.60000 0.13253818 0.11128511 0.09459590 0.08132834 0.07066107 0.06199500
duke@435 1022 // 0.65000 0.12538832 0.10545958 0.08978741 0.07731366 0.06727491 0.05911289
duke@435 1023 // 0.70000 0.11553050 0.09741183 0.08313394 0.07175114 0.06257797 0.05511132
duke@435 1024 // 0.75000 0.10311208 0.08724696 0.07471205 0.06469760 0.05661313 0.05002313
duke@435 1025 // 0.80000 0.08831616 0.07509618 0.06461766 0.05622444 0.04943437 0.04388975
duke@435 1026 // 0.85000 0.07135702 0.06111390 0.05296419 0.04641639 0.04110601 0.03676066
duke@435 1027 // 0.90000 0.05247504 0.04547452 0.03988045 0.03537016 0.03170171 0.02869272
duke@435 1028 // 0.95000 0.03193096 0.02836880 0.02550828 0.02319280 0.02130337 0.01974941
duke@435 1029 // 1.00000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000
duke@435 1030
duke@435 1031 double PSParallelCompact::dead_wood_limiter(double density, size_t min_percent)
duke@435 1032 {
duke@435 1033 assert(_dwl_initialized, "uninitialized");
duke@435 1034
duke@435 1035 // The raw limit is the value of the normal distribution at x = density.
duke@435 1036 const double raw_limit = normal_distribution(density);
duke@435 1037
duke@435 1038 // Adjust the raw limit so it becomes the minimum when the density is 1.
duke@435 1039 //
duke@435 1040 // First subtract the adjustment value (which is simply the precomputed value
duke@435 1041 // normal_distribution(1.0)); this yields a value of 0 when the density is 1.
duke@435 1042 // Then add the minimum value, so the minimum is returned when the density is
duke@435 1043 // 1. Finally, prevent negative values, which occur when the mean is not 0.5.
duke@435 1044 const double min = double(min_percent) / 100.0;
duke@435 1045 const double limit = raw_limit - _dwl_adjustment + min;
duke@435 1046 return MAX2(limit, 0.0);
duke@435 1047 }
duke@435 1048
jcoomes@810 1049 ParallelCompactData::RegionData*
jcoomes@810 1050 PSParallelCompact::first_dead_space_region(const RegionData* beg,
jcoomes@810 1051 const RegionData* end)
duke@435 1052 {
jcoomes@810 1053 const size_t region_size = ParallelCompactData::RegionSize;
duke@435 1054 ParallelCompactData& sd = summary_data();
jcoomes@810 1055 size_t left = sd.region(beg);
jcoomes@810 1056 size_t right = end > beg ? sd.region(end) - 1 : left;
duke@435 1057
duke@435 1058 // Binary search.
duke@435 1059 while (left < right) {
duke@435 1060 // Equivalent to (left + right) / 2, but does not overflow.
duke@435 1061 const size_t middle = left + (right - left) / 2;
jcoomes@810 1062 RegionData* const middle_ptr = sd.region(middle);
duke@435 1063 HeapWord* const dest = middle_ptr->destination();
jcoomes@810 1064 HeapWord* const addr = sd.region_to_addr(middle);
duke@435 1065 assert(dest != NULL, "sanity");
duke@435 1066 assert(dest <= addr, "must move left");
duke@435 1067
duke@435 1068 if (middle > left && dest < addr) {
duke@435 1069 right = middle - 1;
jcoomes@810 1070 } else if (middle < right && middle_ptr->data_size() == region_size) {
duke@435 1071 left = middle + 1;
duke@435 1072 } else {
duke@435 1073 return middle_ptr;
duke@435 1074 }
duke@435 1075 }
jcoomes@810 1076 return sd.region(left);
duke@435 1077 }
duke@435 1078
jcoomes@810 1079 ParallelCompactData::RegionData*
jcoomes@810 1080 PSParallelCompact::dead_wood_limit_region(const RegionData* beg,
jcoomes@810 1081 const RegionData* end,
jcoomes@810 1082 size_t dead_words)
duke@435 1083 {
duke@435 1084 ParallelCompactData& sd = summary_data();
jcoomes@810 1085 size_t left = sd.region(beg);
jcoomes@810 1086 size_t right = end > beg ? sd.region(end) - 1 : left;
duke@435 1087
duke@435 1088 // Binary search.
duke@435 1089 while (left < right) {
duke@435 1090 // Equivalent to (left + right) / 2, but does not overflow.
duke@435 1091 const size_t middle = left + (right - left) / 2;
jcoomes@810 1092 RegionData* const middle_ptr = sd.region(middle);
duke@435 1093 HeapWord* const dest = middle_ptr->destination();
jcoomes@810 1094 HeapWord* const addr = sd.region_to_addr(middle);
duke@435 1095 assert(dest != NULL, "sanity");
duke@435 1096 assert(dest <= addr, "must move left");
duke@435 1097
duke@435 1098 const size_t dead_to_left = pointer_delta(addr, dest);
duke@435 1099 if (middle > left && dead_to_left > dead_words) {
duke@435 1100 right = middle - 1;
duke@435 1101 } else if (middle < right && dead_to_left < dead_words) {
duke@435 1102 left = middle + 1;
duke@435 1103 } else {
duke@435 1104 return middle_ptr;
duke@435 1105 }
duke@435 1106 }
jcoomes@810 1107 return sd.region(left);
duke@435 1108 }
duke@435 1109
duke@435 1110 // The result is valid during the summary phase, after the initial summarization
duke@435 1111 // of each space into itself, and before final summarization.
duke@435 1112 inline double
jcoomes@810 1113 PSParallelCompact::reclaimed_ratio(const RegionData* const cp,
duke@435 1114 HeapWord* const bottom,
duke@435 1115 HeapWord* const top,
duke@435 1116 HeapWord* const new_top)
duke@435 1117 {
duke@435 1118 ParallelCompactData& sd = summary_data();
duke@435 1119
duke@435 1120 assert(cp != NULL, "sanity");
duke@435 1121 assert(bottom != NULL, "sanity");
duke@435 1122 assert(top != NULL, "sanity");
duke@435 1123 assert(new_top != NULL, "sanity");
duke@435 1124 assert(top >= new_top, "summary data problem?");
duke@435 1125 assert(new_top > bottom, "space is empty; should not be here");
duke@435 1126 assert(new_top >= cp->destination(), "sanity");
jcoomes@810 1127 assert(top >= sd.region_to_addr(cp), "sanity");
duke@435 1128
duke@435 1129 HeapWord* const destination = cp->destination();
duke@435 1130 const size_t dense_prefix_live = pointer_delta(destination, bottom);
duke@435 1131 const size_t compacted_region_live = pointer_delta(new_top, destination);
jcoomes@810 1132 const size_t compacted_region_used = pointer_delta(top,
jcoomes@810 1133 sd.region_to_addr(cp));
duke@435 1134 const size_t reclaimable = compacted_region_used - compacted_region_live;
duke@435 1135
duke@435 1136 const double divisor = dense_prefix_live + 1.25 * compacted_region_live;
duke@435 1137 return double(reclaimable) / divisor;
duke@435 1138 }
duke@435 1139
duke@435 1140 // Return the address of the end of the dense prefix, a.k.a. the start of the
jcoomes@810 1141 // compacted region. The address is always on a region boundary.
duke@435 1142 //
jcoomes@810 1143 // Completely full regions at the left are skipped, since no compaction can
jcoomes@810 1144 // occur in those regions. Then the maximum amount of dead wood to allow is
jcoomes@810 1145 // computed, based on the density (amount live / capacity) of the generation;
jcoomes@810 1146 // the region with approximately that amount of dead space to the left is
jcoomes@810 1147 // identified as the limit region. Regions between the last completely full
jcoomes@810 1148 // region and the limit region are scanned and the one that has the best
jcoomes@810 1149 // (maximum) reclaimed_ratio() is selected.
duke@435 1150 HeapWord*
duke@435 1151 PSParallelCompact::compute_dense_prefix(const SpaceId id,
duke@435 1152 bool maximum_compaction)
duke@435 1153 {
jcoomes@810 1154 const size_t region_size = ParallelCompactData::RegionSize;
duke@435 1155 const ParallelCompactData& sd = summary_data();
duke@435 1156
duke@435 1157 const MutableSpace* const space = _space_info[id].space();
duke@435 1158 HeapWord* const top = space->top();
jcoomes@810 1159 HeapWord* const top_aligned_up = sd.region_align_up(top);
duke@435 1160 HeapWord* const new_top = _space_info[id].new_top();
jcoomes@810 1161 HeapWord* const new_top_aligned_up = sd.region_align_up(new_top);
duke@435 1162 HeapWord* const bottom = space->bottom();
jcoomes@810 1163 const RegionData* const beg_cp = sd.addr_to_region_ptr(bottom);
jcoomes@810 1164 const RegionData* const top_cp = sd.addr_to_region_ptr(top_aligned_up);
jcoomes@810 1165 const RegionData* const new_top_cp =
jcoomes@810 1166 sd.addr_to_region_ptr(new_top_aligned_up);
jcoomes@810 1167
jcoomes@810 1168 // Skip full regions at the beginning of the space--they are necessarily part
duke@435 1169 // of the dense prefix.
jcoomes@810 1170 const RegionData* const full_cp = first_dead_space_region(beg_cp, new_top_cp);
jcoomes@810 1171 assert(full_cp->destination() == sd.region_to_addr(full_cp) ||
duke@435 1172 space->is_empty(), "no dead space allowed to the left");
jcoomes@810 1173 assert(full_cp->data_size() < region_size || full_cp == new_top_cp - 1,
jcoomes@810 1174 "region must have dead space");
duke@435 1175
duke@435 1176 // The gc number is saved whenever a maximum compaction is done, and used to
duke@435 1177 // determine when the maximum compaction interval has expired. This avoids
duke@435 1178 // successive max compactions for different reasons.
duke@435 1179 assert(total_invocations() >= _maximum_compaction_gc_num, "sanity");
duke@435 1180 const size_t gcs_since_max = total_invocations() - _maximum_compaction_gc_num;
duke@435 1181 const bool interval_ended = gcs_since_max > HeapMaximumCompactionInterval ||
duke@435 1182 total_invocations() == HeapFirstMaximumCompactionCount;
duke@435 1183 if (maximum_compaction || full_cp == top_cp || interval_ended) {
duke@435 1184 _maximum_compaction_gc_num = total_invocations();
jcoomes@810 1185 return sd.region_to_addr(full_cp);
duke@435 1186 }
duke@435 1187
duke@435 1188 const size_t space_live = pointer_delta(new_top, bottom);
duke@435 1189 const size_t space_used = space->used_in_words();
duke@435 1190 const size_t space_capacity = space->capacity_in_words();
duke@435 1191
duke@435 1192 const double density = double(space_live) / double(space_capacity);
duke@435 1193 const size_t min_percent_free =
duke@435 1194 id == perm_space_id ? PermMarkSweepDeadRatio : MarkSweepDeadRatio;
duke@435 1195 const double limiter = dead_wood_limiter(density, min_percent_free);
duke@435 1196 const size_t dead_wood_max = space_used - space_live;
duke@435 1197 const size_t dead_wood_limit = MIN2(size_t(space_capacity * limiter),
duke@435 1198 dead_wood_max);
duke@435 1199
duke@435 1200 if (TraceParallelOldGCDensePrefix) {
duke@435 1201 tty->print_cr("space_live=" SIZE_FORMAT " " "space_used=" SIZE_FORMAT " "
duke@435 1202 "space_cap=" SIZE_FORMAT,
duke@435 1203 space_live, space_used,
duke@435 1204 space_capacity);
duke@435 1205 tty->print_cr("dead_wood_limiter(%6.4f, %d)=%6.4f "
duke@435 1206 "dead_wood_max=" SIZE_FORMAT " dead_wood_limit=" SIZE_FORMAT,
duke@435 1207 density, min_percent_free, limiter,
duke@435 1208 dead_wood_max, dead_wood_limit);
duke@435 1209 }
duke@435 1210
jcoomes@810 1211 // Locate the region with the desired amount of dead space to the left.
jcoomes@810 1212 const RegionData* const limit_cp =
jcoomes@810 1213 dead_wood_limit_region(full_cp, top_cp, dead_wood_limit);
jcoomes@810 1214
jcoomes@810 1215 // Scan from the first region with dead space to the limit region and find the
duke@435 1216 // one with the best (largest) reclaimed ratio.
duke@435 1217 double best_ratio = 0.0;
jcoomes@810 1218 const RegionData* best_cp = full_cp;
jcoomes@810 1219 for (const RegionData* cp = full_cp; cp < limit_cp; ++cp) {
duke@435 1220 double tmp_ratio = reclaimed_ratio(cp, bottom, top, new_top);
duke@435 1221 if (tmp_ratio > best_ratio) {
duke@435 1222 best_cp = cp;
duke@435 1223 best_ratio = tmp_ratio;
duke@435 1224 }
duke@435 1225 }
duke@435 1226
duke@435 1227 #if 0
jcoomes@810 1228 // Something to consider: if the region with the best ratio is 'close to' the
jcoomes@810 1229 // first region w/free space, choose the first region with free space
jcoomes@810 1230 // ("first-free"). The first-free region is usually near the start of the
duke@435 1231 // heap, which means we are copying most of the heap already, so copy a bit
duke@435 1232 // more to get complete compaction.
jcoomes@810 1233 if (pointer_delta(best_cp, full_cp, sizeof(RegionData)) < 4) {
duke@435 1234 _maximum_compaction_gc_num = total_invocations();
duke@435 1235 best_cp = full_cp;
duke@435 1236 }
duke@435 1237 #endif // #if 0
duke@435 1238
jcoomes@810 1239 return sd.region_to_addr(best_cp);
duke@435 1240 }
duke@435 1241
duke@435 1242 void PSParallelCompact::summarize_spaces_quick()
duke@435 1243 {
duke@435 1244 for (unsigned int i = 0; i < last_space_id; ++i) {
duke@435 1245 const MutableSpace* space = _space_info[i].space();
duke@435 1246 bool result = _summary_data.summarize(space->bottom(), space->end(),
duke@435 1247 space->bottom(), space->top(),
duke@435 1248 _space_info[i].new_top_addr());
duke@435 1249 assert(result, "should never fail");
duke@435 1250 _space_info[i].set_dense_prefix(space->bottom());
duke@435 1251 }
duke@435 1252 }
duke@435 1253
duke@435 1254 void PSParallelCompact::fill_dense_prefix_end(SpaceId id)
duke@435 1255 {
duke@435 1256 HeapWord* const dense_prefix_end = dense_prefix(id);
jcoomes@810 1257 const RegionData* region = _summary_data.addr_to_region_ptr(dense_prefix_end);
duke@435 1258 const idx_t dense_prefix_bit = _mark_bitmap.addr_to_bit(dense_prefix_end);
jcoomes@810 1259 if (dead_space_crosses_boundary(region, dense_prefix_bit)) {
duke@435 1260 // Only enough dead space is filled so that any remaining dead space to the
duke@435 1261 // left is larger than the minimum filler object. (The remainder is filled
duke@435 1262 // during the copy/update phase.)
duke@435 1263 //
duke@435 1264 // The size of the dead space to the right of the boundary is not a
duke@435 1265 // concern, since compaction will be able to use whatever space is
duke@435 1266 // available.
duke@435 1267 //
duke@435 1268 // Here '||' is the boundary, 'x' represents a don't care bit and a box
duke@435 1269 // surrounds the space to be filled with an object.
duke@435 1270 //
duke@435 1271 // In the 32-bit VM, each bit represents two 32-bit words:
duke@435 1272 // +---+
duke@435 1273 // a) beg_bits: ... x x x | 0 | || 0 x x ...
duke@435 1274 // end_bits: ... x x x | 0 | || 0 x x ...
duke@435 1275 // +---+
duke@435 1276 //
duke@435 1277 // In the 64-bit VM, each bit represents one 64-bit word:
duke@435 1278 // +------------+
duke@435 1279 // b) beg_bits: ... x x x | 0 || 0 | x x ...
duke@435 1280 // end_bits: ... x x 1 | 0 || 0 | x x ...
duke@435 1281 // +------------+
duke@435 1282 // +-------+
duke@435 1283 // c) beg_bits: ... x x | 0 0 | || 0 x x ...
duke@435 1284 // end_bits: ... x 1 | 0 0 | || 0 x x ...
duke@435 1285 // +-------+
duke@435 1286 // +-----------+
duke@435 1287 // d) beg_bits: ... x | 0 0 0 | || 0 x x ...
duke@435 1288 // end_bits: ... 1 | 0 0 0 | || 0 x x ...
duke@435 1289 // +-----------+
duke@435 1290 // +-------+
duke@435 1291 // e) beg_bits: ... 0 0 | 0 0 | || 0 x x ...
duke@435 1292 // end_bits: ... 0 0 | 0 0 | || 0 x x ...
duke@435 1293 // +-------+
duke@435 1294
duke@435 1295 // Initially assume case a, c or e will apply.
duke@435 1296 size_t obj_len = (size_t)oopDesc::header_size();
duke@435 1297 HeapWord* obj_beg = dense_prefix_end - obj_len;
duke@435 1298
duke@435 1299 #ifdef _LP64
duke@435 1300 if (_mark_bitmap.is_obj_end(dense_prefix_bit - 2)) {
duke@435 1301 // Case b above.
duke@435 1302 obj_beg = dense_prefix_end - 1;
duke@435 1303 } else if (!_mark_bitmap.is_obj_end(dense_prefix_bit - 3) &&
duke@435 1304 _mark_bitmap.is_obj_end(dense_prefix_bit - 4)) {
duke@435 1305 // Case d above.
duke@435 1306 obj_beg = dense_prefix_end - 3;
duke@435 1307 obj_len = 3;
duke@435 1308 }
duke@435 1309 #endif // #ifdef _LP64
duke@435 1310
jcoomes@916 1311 gc_heap()->fill_with_object(obj_beg, obj_len);
duke@435 1312 _mark_bitmap.mark_obj(obj_beg, obj_len);
duke@435 1313 _summary_data.add_obj(obj_beg, obj_len);
duke@435 1314 assert(start_array(id) != NULL, "sanity");
duke@435 1315 start_array(id)->allocate_block(obj_beg);
duke@435 1316 }
duke@435 1317 }
duke@435 1318
duke@435 1319 void
duke@435 1320 PSParallelCompact::summarize_space(SpaceId id, bool maximum_compaction)
duke@435 1321 {
duke@435 1322 assert(id < last_space_id, "id out of range");
jcoomes@700 1323 assert(_space_info[id].dense_prefix() == _space_info[id].space()->bottom(),
jcoomes@700 1324 "should have been set in summarize_spaces_quick()");
duke@435 1325
duke@435 1326 const MutableSpace* space = _space_info[id].space();
jcoomes@700 1327 if (_space_info[id].new_top() != space->bottom()) {
jcoomes@700 1328 HeapWord* dense_prefix_end = compute_dense_prefix(id, maximum_compaction);
jcoomes@700 1329 _space_info[id].set_dense_prefix(dense_prefix_end);
duke@435 1330
duke@435 1331 #ifndef PRODUCT
jcoomes@700 1332 if (TraceParallelOldGCDensePrefix) {
jcoomes@700 1333 print_dense_prefix_stats("ratio", id, maximum_compaction,
jcoomes@700 1334 dense_prefix_end);
jcoomes@700 1335 HeapWord* addr = compute_dense_prefix_via_density(id, maximum_compaction);
jcoomes@700 1336 print_dense_prefix_stats("density", id, maximum_compaction, addr);
jcoomes@700 1337 }
jcoomes@700 1338 #endif // #ifndef PRODUCT
jcoomes@700 1339
jcoomes@700 1340 // If dead space crosses the dense prefix boundary, it is (at least
jcoomes@700 1341 // partially) filled with a dummy object, marked live and added to the
jcoomes@700 1342 // summary data. This simplifies the copy/update phase and must be done
jcoomes@700 1343 // before the final locations of objects are determined, to prevent leaving
jcoomes@700 1344 // a fragment of dead space that is too small to fill with an object.
jcoomes@700 1345 if (!maximum_compaction && dense_prefix_end != space->bottom()) {
jcoomes@700 1346 fill_dense_prefix_end(id);
jcoomes@700 1347 }
jcoomes@700 1348
jcoomes@810 1349 // Compute the destination of each Region, and thus each object.
jcoomes@700 1350 _summary_data.summarize_dense_prefix(space->bottom(), dense_prefix_end);
jcoomes@700 1351 _summary_data.summarize(dense_prefix_end, space->end(),
jcoomes@700 1352 dense_prefix_end, space->top(),
jcoomes@700 1353 _space_info[id].new_top_addr());
duke@435 1354 }
duke@435 1355
duke@435 1356 if (TraceParallelOldGCSummaryPhase) {
jcoomes@810 1357 const size_t region_size = ParallelCompactData::RegionSize;
jcoomes@700 1358 HeapWord* const dense_prefix_end = _space_info[id].dense_prefix();
jcoomes@810 1359 const size_t dp_region = _summary_data.addr_to_region_idx(dense_prefix_end);
duke@435 1360 const size_t dp_words = pointer_delta(dense_prefix_end, space->bottom());
jcoomes@700 1361 HeapWord* const new_top = _space_info[id].new_top();
jcoomes@810 1362 const HeapWord* nt_aligned_up = _summary_data.region_align_up(new_top);
duke@435 1363 const size_t cr_words = pointer_delta(nt_aligned_up, dense_prefix_end);
duke@435 1364 tty->print_cr("id=%d cap=" SIZE_FORMAT " dp=" PTR_FORMAT " "
jcoomes@810 1365 "dp_region=" SIZE_FORMAT " " "dp_count=" SIZE_FORMAT " "
duke@435 1366 "cr_count=" SIZE_FORMAT " " "nt=" PTR_FORMAT,
duke@435 1367 id, space->capacity_in_words(), dense_prefix_end,
jcoomes@810 1368 dp_region, dp_words / region_size,
jcoomes@810 1369 cr_words / region_size, new_top);
duke@435 1370 }
duke@435 1371 }
duke@435 1372
duke@435 1373 void PSParallelCompact::summary_phase(ParCompactionManager* cm,
duke@435 1374 bool maximum_compaction)
duke@435 1375 {
duke@435 1376 EventMark m("2 summarize");
duke@435 1377 TraceTime tm("summary phase", print_phases(), true, gclog_or_tty);
duke@435 1378 // trace("2");
duke@435 1379
duke@435 1380 #ifdef ASSERT
duke@435 1381 if (TraceParallelOldGCMarkingPhase) {
duke@435 1382 tty->print_cr("add_obj_count=" SIZE_FORMAT " "
duke@435 1383 "add_obj_bytes=" SIZE_FORMAT,
duke@435 1384 add_obj_count, add_obj_size * HeapWordSize);
duke@435 1385 tty->print_cr("mark_bitmap_count=" SIZE_FORMAT " "
duke@435 1386 "mark_bitmap_bytes=" SIZE_FORMAT,
duke@435 1387 mark_bitmap_count, mark_bitmap_size * HeapWordSize);
duke@435 1388 }
duke@435 1389 #endif // #ifdef ASSERT
duke@435 1390
duke@435 1391 // Quick summarization of each space into itself, to see how much is live.
duke@435 1392 summarize_spaces_quick();
duke@435 1393
duke@435 1394 if (TraceParallelOldGCSummaryPhase) {
duke@435 1395 tty->print_cr("summary_phase: after summarizing each space to self");
duke@435 1396 Universe::print();
jcoomes@810 1397 NOT_PRODUCT(print_region_ranges());
duke@435 1398 if (Verbose) {
duke@435 1399 NOT_PRODUCT(print_initial_summary_data(_summary_data, _space_info));
duke@435 1400 }
duke@435 1401 }
duke@435 1402
duke@435 1403 // The amount of live data that will end up in old space (assuming it fits).
duke@435 1404 size_t old_space_total_live = 0;
duke@435 1405 unsigned int id;
duke@435 1406 for (id = old_space_id; id < last_space_id; ++id) {
duke@435 1407 old_space_total_live += pointer_delta(_space_info[id].new_top(),
duke@435 1408 _space_info[id].space()->bottom());
duke@435 1409 }
duke@435 1410
duke@435 1411 const MutableSpace* old_space = _space_info[old_space_id].space();
duke@435 1412 if (old_space_total_live > old_space->capacity_in_words()) {
duke@435 1413 // XXX - should also try to expand
duke@435 1414 maximum_compaction = true;
duke@435 1415 } else if (!UseParallelOldGCDensePrefix) {
duke@435 1416 maximum_compaction = true;
duke@435 1417 }
duke@435 1418
duke@435 1419 // Permanent and Old generations.
duke@435 1420 summarize_space(perm_space_id, maximum_compaction);
duke@435 1421 summarize_space(old_space_id, maximum_compaction);
duke@435 1422
duke@435 1423 // Summarize the remaining spaces (those in the young gen) into old space. If
duke@435 1424 // the live data from a space doesn't fit, the existing summarization is left
duke@435 1425 // intact, so the data is compacted down within the space itself.
duke@435 1426 HeapWord** new_top_addr = _space_info[old_space_id].new_top_addr();
duke@435 1427 HeapWord* const target_space_end = old_space->end();
duke@435 1428 for (id = eden_space_id; id < last_space_id; ++id) {
duke@435 1429 const MutableSpace* space = _space_info[id].space();
duke@435 1430 const size_t live = pointer_delta(_space_info[id].new_top(),
duke@435 1431 space->bottom());
duke@435 1432 const size_t available = pointer_delta(target_space_end, *new_top_addr);
jcoomes@701 1433 if (live > 0 && live <= available) {
duke@435 1434 // All the live data will fit.
duke@435 1435 if (TraceParallelOldGCSummaryPhase) {
duke@435 1436 tty->print_cr("summarizing %d into old_space @ " PTR_FORMAT,
duke@435 1437 id, *new_top_addr);
duke@435 1438 }
duke@435 1439 _summary_data.summarize(*new_top_addr, target_space_end,
duke@435 1440 space->bottom(), space->top(),
duke@435 1441 new_top_addr);
duke@435 1442
jcoomes@810 1443 // Clear the source_region field for each region in the space.
jcoomes@701 1444 HeapWord* const new_top = _space_info[id].new_top();
jcoomes@810 1445 HeapWord* const clear_end = _summary_data.region_align_up(new_top);
jcoomes@810 1446 RegionData* beg_region =
jcoomes@810 1447 _summary_data.addr_to_region_ptr(space->bottom());
jcoomes@810 1448 RegionData* end_region = _summary_data.addr_to_region_ptr(clear_end);
jcoomes@810 1449 while (beg_region < end_region) {
jcoomes@810 1450 beg_region->set_source_region(0);
jcoomes@810 1451 ++beg_region;
duke@435 1452 }
jcoomes@701 1453
jcoomes@701 1454 // Reset the new_top value for the space.
jcoomes@701 1455 _space_info[id].set_new_top(space->bottom());
duke@435 1456 }
duke@435 1457 }
duke@435 1458
duke@435 1459 if (TraceParallelOldGCSummaryPhase) {
duke@435 1460 tty->print_cr("summary_phase: after final summarization");
duke@435 1461 Universe::print();
jcoomes@810 1462 NOT_PRODUCT(print_region_ranges());
duke@435 1463 if (Verbose) {
duke@435 1464 NOT_PRODUCT(print_generic_summary_data(_summary_data, _space_info));
duke@435 1465 }
duke@435 1466 }
duke@435 1467 }
duke@435 1468
duke@435 1469 // This method should contain all heap-specific policy for invoking a full
duke@435 1470 // collection. invoke_no_policy() will only attempt to compact the heap; it
duke@435 1471 // will do nothing further. If we need to bail out for policy reasons, scavenge
duke@435 1472 // before full gc, or any other specialized behavior, it needs to be added here.
duke@435 1473 //
duke@435 1474 // Note that this method should only be called from the vm_thread while at a
duke@435 1475 // safepoint.
duke@435 1476 void PSParallelCompact::invoke(bool maximum_heap_compaction) {
duke@435 1477 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
duke@435 1478 assert(Thread::current() == (Thread*)VMThread::vm_thread(),
duke@435 1479 "should be in vm thread");
duke@435 1480 ParallelScavengeHeap* heap = gc_heap();
duke@435 1481 GCCause::Cause gc_cause = heap->gc_cause();
duke@435 1482 assert(!heap->is_gc_active(), "not reentrant");
duke@435 1483
duke@435 1484 PSAdaptiveSizePolicy* policy = heap->size_policy();
duke@435 1485
duke@435 1486 // Before each allocation/collection attempt, find out from the
duke@435 1487 // policy object if GCs are, on the whole, taking too long. If so,
duke@435 1488 // bail out without attempting a collection. The exceptions are
duke@435 1489 // for explicitly requested GC's.
duke@435 1490 if (!policy->gc_time_limit_exceeded() ||
duke@435 1491 GCCause::is_user_requested_gc(gc_cause) ||
duke@435 1492 GCCause::is_serviceability_requested_gc(gc_cause)) {
duke@435 1493 IsGCActiveMark mark;
duke@435 1494
duke@435 1495 if (ScavengeBeforeFullGC) {
duke@435 1496 PSScavenge::invoke_no_policy();
duke@435 1497 }
duke@435 1498
duke@435 1499 PSParallelCompact::invoke_no_policy(maximum_heap_compaction);
duke@435 1500 }
duke@435 1501 }
duke@435 1502
jcoomes@810 1503 bool ParallelCompactData::region_contains(size_t region_index, HeapWord* addr) {
jcoomes@810 1504 size_t addr_region_index = addr_to_region_idx(addr);
jcoomes@810 1505 return region_index == addr_region_index;
duke@435 1506 }
duke@435 1507
duke@435 1508 // This method contains no policy. You should probably
duke@435 1509 // be calling invoke() instead.
duke@435 1510 void PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
duke@435 1511 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
duke@435 1512 assert(ref_processor() != NULL, "Sanity");
duke@435 1513
apetrusenko@574 1514 if (GC_locker::check_active_before_gc()) {
duke@435 1515 return;
duke@435 1516 }
duke@435 1517
duke@435 1518 TimeStamp marking_start;
duke@435 1519 TimeStamp compaction_start;
duke@435 1520 TimeStamp collection_exit;
duke@435 1521
duke@435 1522 ParallelScavengeHeap* heap = gc_heap();
duke@435 1523 GCCause::Cause gc_cause = heap->gc_cause();
duke@435 1524 PSYoungGen* young_gen = heap->young_gen();
duke@435 1525 PSOldGen* old_gen = heap->old_gen();
duke@435 1526 PSPermGen* perm_gen = heap->perm_gen();
duke@435 1527 PSAdaptiveSizePolicy* size_policy = heap->size_policy();
duke@435 1528
jmasa@698 1529 if (ZapUnusedHeapArea) {
jmasa@698 1530 // Save information needed to minimize mangling
jmasa@698 1531 heap->record_gen_tops_before_GC();
jmasa@698 1532 }
jmasa@698 1533
duke@435 1534 _print_phases = PrintGCDetails && PrintParallelOldGCPhaseTimes;
duke@435 1535
duke@435 1536 // Make sure data structures are sane, make the heap parsable, and do other
duke@435 1537 // miscellaneous bookkeeping.
duke@435 1538 PreGCValues pre_gc_values;
duke@435 1539 pre_compact(&pre_gc_values);
duke@435 1540
jcoomes@645 1541 // Get the compaction manager reserved for the VM thread.
jcoomes@645 1542 ParCompactionManager* const vmthread_cm =
jcoomes@645 1543 ParCompactionManager::manager_array(gc_task_manager()->workers());
jcoomes@645 1544
duke@435 1545 // Place after pre_compact() where the number of invocations is incremented.
duke@435 1546 AdaptiveSizePolicyOutput(size_policy, heap->total_collections());
duke@435 1547
duke@435 1548 {
duke@435 1549 ResourceMark rm;
duke@435 1550 HandleMark hm;
duke@435 1551
duke@435 1552 const bool is_system_gc = gc_cause == GCCause::_java_lang_system_gc;
duke@435 1553
duke@435 1554 // This is useful for debugging but don't change the output the
duke@435 1555 // the customer sees.
duke@435 1556 const char* gc_cause_str = "Full GC";
duke@435 1557 if (is_system_gc && PrintGCDetails) {
duke@435 1558 gc_cause_str = "Full GC (System)";
duke@435 1559 }
duke@435 1560 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
duke@435 1561 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
duke@435 1562 TraceTime t1(gc_cause_str, PrintGC, !PrintGCDetails, gclog_or_tty);
duke@435 1563 TraceCollectorStats tcs(counters());
duke@435 1564 TraceMemoryManagerStats tms(true /* Full GC */);
duke@435 1565
duke@435 1566 if (TraceGen1Time) accumulated_time()->start();
duke@435 1567
duke@435 1568 // Let the size policy know we're starting
duke@435 1569 size_policy->major_collection_begin();
duke@435 1570
duke@435 1571 // When collecting the permanent generation methodOops may be moving,
duke@435 1572 // so we either have to flush all bcp data or convert it into bci.
duke@435 1573 CodeCache::gc_prologue();
duke@435 1574 Threads::gc_prologue();
duke@435 1575
duke@435 1576 NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
duke@435 1577 COMPILER2_PRESENT(DerivedPointerTable::clear());
duke@435 1578
duke@435 1579 ref_processor()->enable_discovery();
ysr@892 1580 ref_processor()->setup_policy(maximum_heap_compaction);
duke@435 1581
duke@435 1582 bool marked_for_unloading = false;
duke@435 1583
duke@435 1584 marking_start.update();
jcoomes@645 1585 marking_phase(vmthread_cm, maximum_heap_compaction);
duke@435 1586
duke@435 1587 #ifndef PRODUCT
duke@435 1588 if (TraceParallelOldGCMarkingPhase) {
duke@435 1589 gclog_or_tty->print_cr("marking_phase: cas_tries %d cas_retries %d "
duke@435 1590 "cas_by_another %d",
duke@435 1591 mark_bitmap()->cas_tries(), mark_bitmap()->cas_retries(),
duke@435 1592 mark_bitmap()->cas_by_another());
duke@435 1593 }
duke@435 1594 #endif // #ifndef PRODUCT
duke@435 1595
duke@435 1596 bool max_on_system_gc = UseMaximumCompactionOnSystemGC && is_system_gc;
jcoomes@645 1597 summary_phase(vmthread_cm, maximum_heap_compaction || max_on_system_gc);
duke@435 1598
duke@435 1599 COMPILER2_PRESENT(assert(DerivedPointerTable::is_active(), "Sanity"));
duke@435 1600 COMPILER2_PRESENT(DerivedPointerTable::set_active(false));
duke@435 1601
duke@435 1602 // adjust_roots() updates Universe::_intArrayKlassObj which is
duke@435 1603 // needed by the compaction for filling holes in the dense prefix.
duke@435 1604 adjust_roots();
duke@435 1605
duke@435 1606 compaction_start.update();
duke@435 1607 // Does the perm gen always have to be done serially because
duke@435 1608 // klasses are used in the update of an object?
jcoomes@645 1609 compact_perm(vmthread_cm);
duke@435 1610
duke@435 1611 if (UseParallelOldGCCompacting) {
duke@435 1612 compact();
duke@435 1613 } else {
jcoomes@645 1614 compact_serial(vmthread_cm);
duke@435 1615 }
duke@435 1616
duke@435 1617 // Reset the mark bitmap, summary data, and do other bookkeeping. Must be
duke@435 1618 // done before resizing.
duke@435 1619 post_compact();
duke@435 1620
duke@435 1621 // Let the size policy know we're done
duke@435 1622 size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause);
duke@435 1623
duke@435 1624 if (UseAdaptiveSizePolicy) {
duke@435 1625 if (PrintAdaptiveSizePolicy) {
duke@435 1626 gclog_or_tty->print("AdaptiveSizeStart: ");
duke@435 1627 gclog_or_tty->stamp();
duke@435 1628 gclog_or_tty->print_cr(" collection: %d ",
duke@435 1629 heap->total_collections());
duke@435 1630 if (Verbose) {
duke@435 1631 gclog_or_tty->print("old_gen_capacity: %d young_gen_capacity: %d"
duke@435 1632 " perm_gen_capacity: %d ",
duke@435 1633 old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes(),
duke@435 1634 perm_gen->capacity_in_bytes());
duke@435 1635 }
duke@435 1636 }
duke@435 1637
duke@435 1638 // Don't check if the size_policy is ready here. Let
duke@435 1639 // the size_policy check that internally.
duke@435 1640 if (UseAdaptiveGenerationSizePolicyAtMajorCollection &&
duke@435 1641 ((gc_cause != GCCause::_java_lang_system_gc) ||
duke@435 1642 UseAdaptiveSizePolicyWithSystemGC)) {
duke@435 1643 // Calculate optimal free space amounts
duke@435 1644 assert(young_gen->max_size() >
duke@435 1645 young_gen->from_space()->capacity_in_bytes() +
duke@435 1646 young_gen->to_space()->capacity_in_bytes(),
duke@435 1647 "Sizes of space in young gen are out-of-bounds");
duke@435 1648 size_t max_eden_size = young_gen->max_size() -
duke@435 1649 young_gen->from_space()->capacity_in_bytes() -
duke@435 1650 young_gen->to_space()->capacity_in_bytes();
jmasa@698 1651 size_policy->compute_generation_free_space(
jmasa@698 1652 young_gen->used_in_bytes(),
jmasa@698 1653 young_gen->eden_space()->used_in_bytes(),
jmasa@698 1654 old_gen->used_in_bytes(),
jmasa@698 1655 perm_gen->used_in_bytes(),
jmasa@698 1656 young_gen->eden_space()->capacity_in_bytes(),
jmasa@698 1657 old_gen->max_gen_size(),
jmasa@698 1658 max_eden_size,
jmasa@698 1659 true /* full gc*/,
jmasa@698 1660 gc_cause);
jmasa@698 1661
jmasa@698 1662 heap->resize_old_gen(
jmasa@698 1663 size_policy->calculated_old_free_size_in_bytes());
duke@435 1664
duke@435 1665 // Don't resize the young generation at an major collection. A
duke@435 1666 // desired young generation size may have been calculated but
duke@435 1667 // resizing the young generation complicates the code because the
duke@435 1668 // resizing of the old generation may have moved the boundary
duke@435 1669 // between the young generation and the old generation. Let the
duke@435 1670 // young generation resizing happen at the minor collections.
duke@435 1671 }
duke@435 1672 if (PrintAdaptiveSizePolicy) {
duke@435 1673 gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ",
duke@435 1674 heap->total_collections());
duke@435 1675 }
duke@435 1676 }
duke@435 1677
duke@435 1678 if (UsePerfData) {
duke@435 1679 PSGCAdaptivePolicyCounters* const counters = heap->gc_policy_counters();
duke@435 1680 counters->update_counters();
duke@435 1681 counters->update_old_capacity(old_gen->capacity_in_bytes());
duke@435 1682 counters->update_young_capacity(young_gen->capacity_in_bytes());
duke@435 1683 }
duke@435 1684
duke@435 1685 heap->resize_all_tlabs();
duke@435 1686
duke@435 1687 // We collected the perm gen, so we'll resize it here.
duke@435 1688 perm_gen->compute_new_size(pre_gc_values.perm_gen_used());
duke@435 1689
duke@435 1690 if (TraceGen1Time) accumulated_time()->stop();
duke@435 1691
duke@435 1692 if (PrintGC) {
duke@435 1693 if (PrintGCDetails) {
duke@435 1694 // No GC timestamp here. This is after GC so it would be confusing.
duke@435 1695 young_gen->print_used_change(pre_gc_values.young_gen_used());
duke@435 1696 old_gen->print_used_change(pre_gc_values.old_gen_used());
duke@435 1697 heap->print_heap_change(pre_gc_values.heap_used());
duke@435 1698 // Print perm gen last (print_heap_change() excludes the perm gen).
duke@435 1699 perm_gen->print_used_change(pre_gc_values.perm_gen_used());
duke@435 1700 } else {
duke@435 1701 heap->print_heap_change(pre_gc_values.heap_used());
duke@435 1702 }
duke@435 1703 }
duke@435 1704
duke@435 1705 // Track memory usage and detect low memory
duke@435 1706 MemoryService::track_memory_usage();
duke@435 1707 heap->update_counters();
duke@435 1708
duke@435 1709 if (PrintGCDetails) {
duke@435 1710 if (size_policy->print_gc_time_limit_would_be_exceeded()) {
duke@435 1711 if (size_policy->gc_time_limit_exceeded()) {
duke@435 1712 gclog_or_tty->print_cr(" GC time is exceeding GCTimeLimit "
duke@435 1713 "of %d%%", GCTimeLimit);
duke@435 1714 } else {
duke@435 1715 gclog_or_tty->print_cr(" GC time would exceed GCTimeLimit "
duke@435 1716 "of %d%%", GCTimeLimit);
duke@435 1717 }
duke@435 1718 }
duke@435 1719 size_policy->set_print_gc_time_limit_would_be_exceeded(false);
duke@435 1720 }
duke@435 1721 }
duke@435 1722
duke@435 1723 if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
duke@435 1724 HandleMark hm; // Discard invalid handles created during verification
duke@435 1725 gclog_or_tty->print(" VerifyAfterGC:");
duke@435 1726 Universe::verify(false);
duke@435 1727 }
duke@435 1728
duke@435 1729 // Re-verify object start arrays
duke@435 1730 if (VerifyObjectStartArray &&
duke@435 1731 VerifyAfterGC) {
duke@435 1732 old_gen->verify_object_start_array();
duke@435 1733 perm_gen->verify_object_start_array();
duke@435 1734 }
duke@435 1735
jmasa@698 1736 if (ZapUnusedHeapArea) {
jmasa@698 1737 old_gen->object_space()->check_mangled_unused_area_complete();
jmasa@698 1738 perm_gen->object_space()->check_mangled_unused_area_complete();
jmasa@698 1739 }
jmasa@698 1740
duke@435 1741 NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
duke@435 1742
duke@435 1743 collection_exit.update();
duke@435 1744
duke@435 1745 if (PrintHeapAtGC) {
duke@435 1746 Universe::print_heap_after_gc();
duke@435 1747 }
duke@435 1748 if (PrintGCTaskTimeStamps) {
duke@435 1749 gclog_or_tty->print_cr("VM-Thread " INT64_FORMAT " " INT64_FORMAT " "
duke@435 1750 INT64_FORMAT,
duke@435 1751 marking_start.ticks(), compaction_start.ticks(),
duke@435 1752 collection_exit.ticks());
duke@435 1753 gc_task_manager()->print_task_time_stamps();
duke@435 1754 }
duke@435 1755 }
duke@435 1756
duke@435 1757 bool PSParallelCompact::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
duke@435 1758 PSYoungGen* young_gen,
duke@435 1759 PSOldGen* old_gen) {
duke@435 1760 MutableSpace* const eden_space = young_gen->eden_space();
duke@435 1761 assert(!eden_space->is_empty(), "eden must be non-empty");
duke@435 1762 assert(young_gen->virtual_space()->alignment() ==
duke@435 1763 old_gen->virtual_space()->alignment(), "alignments do not match");
duke@435 1764
duke@435 1765 if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary)) {
duke@435 1766 return false;
duke@435 1767 }
duke@435 1768
duke@435 1769 // Both generations must be completely committed.
duke@435 1770 if (young_gen->virtual_space()->uncommitted_size() != 0) {
duke@435 1771 return false;
duke@435 1772 }
duke@435 1773 if (old_gen->virtual_space()->uncommitted_size() != 0) {
duke@435 1774 return false;
duke@435 1775 }
duke@435 1776
duke@435 1777 // Figure out how much to take from eden. Include the average amount promoted
duke@435 1778 // in the total; otherwise the next young gen GC will simply bail out to a
duke@435 1779 // full GC.
duke@435 1780 const size_t alignment = old_gen->virtual_space()->alignment();
duke@435 1781 const size_t eden_used = eden_space->used_in_bytes();
duke@435 1782 const size_t promoted = (size_t)size_policy->avg_promoted()->padded_average();
duke@435 1783 const size_t absorb_size = align_size_up(eden_used + promoted, alignment);
duke@435 1784 const size_t eden_capacity = eden_space->capacity_in_bytes();
duke@435 1785
duke@435 1786 if (absorb_size >= eden_capacity) {
duke@435 1787 return false; // Must leave some space in eden.
duke@435 1788 }
duke@435 1789
duke@435 1790 const size_t new_young_size = young_gen->capacity_in_bytes() - absorb_size;
duke@435 1791 if (new_young_size < young_gen->min_gen_size()) {
duke@435 1792 return false; // Respect young gen minimum size.
duke@435 1793 }
duke@435 1794
duke@435 1795 if (TraceAdaptiveGCBoundary && Verbose) {
duke@435 1796 gclog_or_tty->print(" absorbing " SIZE_FORMAT "K: "
duke@435 1797 "eden " SIZE_FORMAT "K->" SIZE_FORMAT "K "
duke@435 1798 "from " SIZE_FORMAT "K, to " SIZE_FORMAT "K "
duke@435 1799 "young_gen " SIZE_FORMAT "K->" SIZE_FORMAT "K ",
duke@435 1800 absorb_size / K,
duke@435 1801 eden_capacity / K, (eden_capacity - absorb_size) / K,
duke@435 1802 young_gen->from_space()->used_in_bytes() / K,
duke@435 1803 young_gen->to_space()->used_in_bytes() / K,
duke@435 1804 young_gen->capacity_in_bytes() / K, new_young_size / K);
duke@435 1805 }
duke@435 1806
duke@435 1807 // Fill the unused part of the old gen.
duke@435 1808 MutableSpace* const old_space = old_gen->object_space();
jcoomes@916 1809 HeapWord* const unused_start = old_space->top();
jcoomes@916 1810 size_t const unused_words = pointer_delta(old_space->end(), unused_start);
jcoomes@916 1811
jcoomes@916 1812 if (unused_words > 0) {
jcoomes@916 1813 if (unused_words < CollectedHeap::min_fill_size()) {
jcoomes@916 1814 return false; // If the old gen cannot be filled, must give up.
jcoomes@916 1815 }
jcoomes@916 1816 CollectedHeap::fill_with_objects(unused_start, unused_words);
duke@435 1817 }
duke@435 1818
duke@435 1819 // Take the live data from eden and set both top and end in the old gen to
duke@435 1820 // eden top. (Need to set end because reset_after_change() mangles the region
duke@435 1821 // from end to virtual_space->high() in debug builds).
duke@435 1822 HeapWord* const new_top = eden_space->top();
duke@435 1823 old_gen->virtual_space()->expand_into(young_gen->virtual_space(),
duke@435 1824 absorb_size);
duke@435 1825 young_gen->reset_after_change();
duke@435 1826 old_space->set_top(new_top);
duke@435 1827 old_space->set_end(new_top);
duke@435 1828 old_gen->reset_after_change();
duke@435 1829
duke@435 1830 // Update the object start array for the filler object and the data from eden.
duke@435 1831 ObjectStartArray* const start_array = old_gen->start_array();
jcoomes@916 1832 for (HeapWord* p = unused_start; p < new_top; p += oop(p)->size()) {
jcoomes@916 1833 start_array->allocate_block(p);
duke@435 1834 }
duke@435 1835
duke@435 1836 // Could update the promoted average here, but it is not typically updated at
duke@435 1837 // full GCs and the value to use is unclear. Something like
duke@435 1838 //
duke@435 1839 // cur_promoted_avg + absorb_size / number_of_scavenges_since_last_full_gc.
duke@435 1840
duke@435 1841 size_policy->set_bytes_absorbed_from_eden(absorb_size);
duke@435 1842 return true;
duke@435 1843 }
duke@435 1844
duke@435 1845 GCTaskManager* const PSParallelCompact::gc_task_manager() {
duke@435 1846 assert(ParallelScavengeHeap::gc_task_manager() != NULL,
duke@435 1847 "shouldn't return NULL");
duke@435 1848 return ParallelScavengeHeap::gc_task_manager();
duke@435 1849 }
duke@435 1850
duke@435 1851 void PSParallelCompact::marking_phase(ParCompactionManager* cm,
duke@435 1852 bool maximum_heap_compaction) {
duke@435 1853 // Recursively traverse all live objects and mark them
duke@435 1854 EventMark m("1 mark object");
duke@435 1855 TraceTime tm("marking phase", print_phases(), true, gclog_or_tty);
duke@435 1856
duke@435 1857 ParallelScavengeHeap* heap = gc_heap();
duke@435 1858 uint parallel_gc_threads = heap->gc_task_manager()->workers();
jcoomes@810 1859 TaskQueueSetSuper* qset = ParCompactionManager::region_array();
duke@435 1860 ParallelTaskTerminator terminator(parallel_gc_threads, qset);
duke@435 1861
duke@435 1862 PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
duke@435 1863 PSParallelCompact::FollowStackClosure follow_stack_closure(cm);
duke@435 1864
duke@435 1865 {
duke@435 1866 TraceTime tm_m("par mark", print_phases(), true, gclog_or_tty);
duke@435 1867
duke@435 1868 GCTaskQueue* q = GCTaskQueue::create();
duke@435 1869
duke@435 1870 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::universe));
duke@435 1871 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::jni_handles));
duke@435 1872 // We scan the thread roots in parallel
duke@435 1873 Threads::create_thread_roots_marking_tasks(q);
duke@435 1874 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::object_synchronizer));
duke@435 1875 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::flat_profiler));
duke@435 1876 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::management));
duke@435 1877 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::system_dictionary));
duke@435 1878 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::jvmti));
duke@435 1879 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::vm_symbols));
duke@435 1880
duke@435 1881 if (parallel_gc_threads > 1) {
duke@435 1882 for (uint j = 0; j < parallel_gc_threads; j++) {
duke@435 1883 q->enqueue(new StealMarkingTask(&terminator));
duke@435 1884 }
duke@435 1885 }
duke@435 1886
duke@435 1887 WaitForBarrierGCTask* fin = WaitForBarrierGCTask::create();
duke@435 1888 q->enqueue(fin);
duke@435 1889
duke@435 1890 gc_task_manager()->add_list(q);
duke@435 1891
duke@435 1892 fin->wait_for();
duke@435 1893
duke@435 1894 // We have to release the barrier tasks!
duke@435 1895 WaitForBarrierGCTask::destroy(fin);
duke@435 1896 }
duke@435 1897
duke@435 1898 // Process reference objects found during marking
duke@435 1899 {
duke@435 1900 TraceTime tm_r("reference processing", print_phases(), true, gclog_or_tty);
duke@435 1901 if (ref_processor()->processing_is_mt()) {
duke@435 1902 RefProcTaskExecutor task_executor;
duke@435 1903 ref_processor()->process_discovered_references(
ysr@888 1904 is_alive_closure(), &mark_and_push_closure, &follow_stack_closure,
ysr@888 1905 &task_executor);
duke@435 1906 } else {
duke@435 1907 ref_processor()->process_discovered_references(
ysr@888 1908 is_alive_closure(), &mark_and_push_closure, &follow_stack_closure, NULL);
duke@435 1909 }
duke@435 1910 }
duke@435 1911
duke@435 1912 TraceTime tm_c("class unloading", print_phases(), true, gclog_or_tty);
duke@435 1913 // Follow system dictionary roots and unload classes.
duke@435 1914 bool purged_class = SystemDictionary::do_unloading(is_alive_closure());
duke@435 1915
duke@435 1916 // Follow code cache roots.
duke@435 1917 CodeCache::do_unloading(is_alive_closure(), &mark_and_push_closure,
duke@435 1918 purged_class);
duke@435 1919 follow_stack(cm); // Flush marking stack.
duke@435 1920
duke@435 1921 // Update subklass/sibling/implementor links of live klasses
duke@435 1922 // revisit_klass_stack is used in follow_weak_klass_links().
duke@435 1923 follow_weak_klass_links(cm);
duke@435 1924
duke@435 1925 // Visit symbol and interned string tables and delete unmarked oops
duke@435 1926 SymbolTable::unlink(is_alive_closure());
duke@435 1927 StringTable::unlink(is_alive_closure());
duke@435 1928
duke@435 1929 assert(cm->marking_stack()->size() == 0, "stack should be empty by now");
duke@435 1930 assert(cm->overflow_stack()->is_empty(), "stack should be empty by now");
duke@435 1931 }
duke@435 1932
duke@435 1933 // This should be moved to the shared markSweep code!
duke@435 1934 class PSAlwaysTrueClosure: public BoolObjectClosure {
duke@435 1935 public:
duke@435 1936 void do_object(oop p) { ShouldNotReachHere(); }
duke@435 1937 bool do_object_b(oop p) { return true; }
duke@435 1938 };
duke@435 1939 static PSAlwaysTrueClosure always_true;
duke@435 1940
duke@435 1941 void PSParallelCompact::adjust_roots() {
duke@435 1942 // Adjust the pointers to reflect the new locations
duke@435 1943 EventMark m("3 adjust roots");
duke@435 1944 TraceTime tm("adjust roots", print_phases(), true, gclog_or_tty);
duke@435 1945
duke@435 1946 // General strong roots.
duke@435 1947 Universe::oops_do(adjust_root_pointer_closure());
duke@435 1948 ReferenceProcessor::oops_do(adjust_root_pointer_closure());
duke@435 1949 JNIHandles::oops_do(adjust_root_pointer_closure()); // Global (strong) JNI handles
duke@435 1950 Threads::oops_do(adjust_root_pointer_closure());
duke@435 1951 ObjectSynchronizer::oops_do(adjust_root_pointer_closure());
duke@435 1952 FlatProfiler::oops_do(adjust_root_pointer_closure());
duke@435 1953 Management::oops_do(adjust_root_pointer_closure());
duke@435 1954 JvmtiExport::oops_do(adjust_root_pointer_closure());
duke@435 1955 // SO_AllClasses
duke@435 1956 SystemDictionary::oops_do(adjust_root_pointer_closure());
duke@435 1957 vmSymbols::oops_do(adjust_root_pointer_closure());
duke@435 1958
duke@435 1959 // Now adjust pointers in remaining weak roots. (All of which should
duke@435 1960 // have been cleared if they pointed to non-surviving objects.)
duke@435 1961 // Global (weak) JNI handles
duke@435 1962 JNIHandles::weak_oops_do(&always_true, adjust_root_pointer_closure());
duke@435 1963
duke@435 1964 CodeCache::oops_do(adjust_pointer_closure());
duke@435 1965 SymbolTable::oops_do(adjust_root_pointer_closure());
duke@435 1966 StringTable::oops_do(adjust_root_pointer_closure());
duke@435 1967 ref_processor()->weak_oops_do(adjust_root_pointer_closure());
duke@435 1968 // Roots were visited so references into the young gen in roots
duke@435 1969 // may have been scanned. Process them also.
duke@435 1970 // Should the reference processor have a span that excludes
duke@435 1971 // young gen objects?
duke@435 1972 PSScavenge::reference_processor()->weak_oops_do(
duke@435 1973 adjust_root_pointer_closure());
duke@435 1974 }
duke@435 1975
duke@435 1976 void PSParallelCompact::compact_perm(ParCompactionManager* cm) {
duke@435 1977 EventMark m("4 compact perm");
duke@435 1978 TraceTime tm("compact perm gen", print_phases(), true, gclog_or_tty);
duke@435 1979 // trace("4");
duke@435 1980
duke@435 1981 gc_heap()->perm_gen()->start_array()->reset();
duke@435 1982 move_and_update(cm, perm_space_id);
duke@435 1983 }
duke@435 1984
jcoomes@810 1985 void PSParallelCompact::enqueue_region_draining_tasks(GCTaskQueue* q,
jcoomes@810 1986 uint parallel_gc_threads)
jcoomes@810 1987 {
duke@435 1988 TraceTime tm("drain task setup", print_phases(), true, gclog_or_tty);
duke@435 1989
duke@435 1990 const unsigned int task_count = MAX2(parallel_gc_threads, 1U);
duke@435 1991 for (unsigned int j = 0; j < task_count; j++) {
duke@435 1992 q->enqueue(new DrainStacksCompactionTask());
duke@435 1993 }
duke@435 1994
jcoomes@810 1995 // Find all regions that are available (can be filled immediately) and
duke@435 1996 // distribute them to the thread stacks. The iteration is done in reverse
jcoomes@810 1997 // order (high to low) so the regions will be removed in ascending order.
duke@435 1998
duke@435 1999 const ParallelCompactData& sd = PSParallelCompact::summary_data();
duke@435 2000
jcoomes@810 2001 size_t fillable_regions = 0; // A count for diagnostic purposes.
duke@435 2002 unsigned int which = 0; // The worker thread number.
duke@435 2003
duke@435 2004 for (unsigned int id = to_space_id; id > perm_space_id; --id) {
duke@435 2005 SpaceInfo* const space_info = _space_info + id;
duke@435 2006 MutableSpace* const space = space_info->space();
duke@435 2007 HeapWord* const new_top = space_info->new_top();
duke@435 2008
jcoomes@810 2009 const size_t beg_region = sd.addr_to_region_idx(space_info->dense_prefix());
jcoomes@810 2010 const size_t end_region =
jcoomes@810 2011 sd.addr_to_region_idx(sd.region_align_up(new_top));
jcoomes@810 2012 assert(end_region > 0, "perm gen cannot be empty");
jcoomes@810 2013
jcoomes@810 2014 for (size_t cur = end_region - 1; cur >= beg_region; --cur) {
jcoomes@810 2015 if (sd.region(cur)->claim_unsafe()) {
duke@435 2016 ParCompactionManager* cm = ParCompactionManager::manager_array(which);
duke@435 2017 cm->save_for_processing(cur);
duke@435 2018
duke@435 2019 if (TraceParallelOldGCCompactionPhase && Verbose) {
jcoomes@810 2020 const size_t count_mod_8 = fillable_regions & 7;
duke@435 2021 if (count_mod_8 == 0) gclog_or_tty->print("fillable: ");
jcoomes@699 2022 gclog_or_tty->print(" " SIZE_FORMAT_W(7), cur);
duke@435 2023 if (count_mod_8 == 7) gclog_or_tty->cr();
duke@435 2024 }
duke@435 2025
jcoomes@810 2026 NOT_PRODUCT(++fillable_regions;)
jcoomes@810 2027
jcoomes@810 2028 // Assign regions to threads in round-robin fashion.
duke@435 2029 if (++which == task_count) {
duke@435 2030 which = 0;
duke@435 2031 }
duke@435 2032 }
duke@435 2033 }
duke@435 2034 }
duke@435 2035
duke@435 2036 if (TraceParallelOldGCCompactionPhase) {
jcoomes@810 2037 if (Verbose && (fillable_regions & 7) != 0) gclog_or_tty->cr();
jcoomes@810 2038 gclog_or_tty->print_cr("%u initially fillable regions", fillable_regions);
duke@435 2039 }
duke@435 2040 }
duke@435 2041
duke@435 2042 #define PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING 4
duke@435 2043
duke@435 2044 void PSParallelCompact::enqueue_dense_prefix_tasks(GCTaskQueue* q,
duke@435 2045 uint parallel_gc_threads) {
duke@435 2046 TraceTime tm("dense prefix task setup", print_phases(), true, gclog_or_tty);
duke@435 2047
duke@435 2048 ParallelCompactData& sd = PSParallelCompact::summary_data();
duke@435 2049
duke@435 2050 // Iterate over all the spaces adding tasks for updating
jcoomes@810 2051 // regions in the dense prefix. Assume that 1 gc thread
duke@435 2052 // will work on opening the gaps and the remaining gc threads
duke@435 2053 // will work on the dense prefix.
duke@435 2054 SpaceId space_id = old_space_id;
duke@435 2055 while (space_id != last_space_id) {
duke@435 2056 HeapWord* const dense_prefix_end = _space_info[space_id].dense_prefix();
duke@435 2057 const MutableSpace* const space = _space_info[space_id].space();
duke@435 2058
duke@435 2059 if (dense_prefix_end == space->bottom()) {
duke@435 2060 // There is no dense prefix for this space.
duke@435 2061 space_id = next_compaction_space_id(space_id);
duke@435 2062 continue;
duke@435 2063 }
duke@435 2064
jcoomes@810 2065 // The dense prefix is before this region.
jcoomes@810 2066 size_t region_index_end_dense_prefix =
jcoomes@810 2067 sd.addr_to_region_idx(dense_prefix_end);
jcoomes@810 2068 RegionData* const dense_prefix_cp =
jcoomes@810 2069 sd.region(region_index_end_dense_prefix);
duke@435 2070 assert(dense_prefix_end == space->end() ||
duke@435 2071 dense_prefix_cp->available() ||
duke@435 2072 dense_prefix_cp->claimed(),
jcoomes@810 2073 "The region after the dense prefix should always be ready to fill");
jcoomes@810 2074
jcoomes@810 2075 size_t region_index_start = sd.addr_to_region_idx(space->bottom());
duke@435 2076
duke@435 2077 // Is there dense prefix work?
jcoomes@810 2078 size_t total_dense_prefix_regions =
jcoomes@810 2079 region_index_end_dense_prefix - region_index_start;
jcoomes@810 2080 // How many regions of the dense prefix should be given to
duke@435 2081 // each thread?
jcoomes@810 2082 if (total_dense_prefix_regions > 0) {
duke@435 2083 uint tasks_for_dense_prefix = 1;
duke@435 2084 if (UseParallelDensePrefixUpdate) {
jcoomes@810 2085 if (total_dense_prefix_regions <=
duke@435 2086 (parallel_gc_threads * PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING)) {
duke@435 2087 // Don't over partition. This assumes that
duke@435 2088 // PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING is a small integer value
jcoomes@810 2089 // so there are not many regions to process.
duke@435 2090 tasks_for_dense_prefix = parallel_gc_threads;
duke@435 2091 } else {
duke@435 2092 // Over partition
duke@435 2093 tasks_for_dense_prefix = parallel_gc_threads *
duke@435 2094 PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING;
duke@435 2095 }
duke@435 2096 }
jcoomes@810 2097 size_t regions_per_thread = total_dense_prefix_regions /
duke@435 2098 tasks_for_dense_prefix;
jcoomes@810 2099 // Give each thread at least 1 region.
jcoomes@810 2100 if (regions_per_thread == 0) {
jcoomes@810 2101 regions_per_thread = 1;
duke@435 2102 }
duke@435 2103
duke@435 2104 for (uint k = 0; k < tasks_for_dense_prefix; k++) {
jcoomes@810 2105 if (region_index_start >= region_index_end_dense_prefix) {
duke@435 2106 break;
duke@435 2107 }
jcoomes@810 2108 // region_index_end is not processed
jcoomes@810 2109 size_t region_index_end = MIN2(region_index_start + regions_per_thread,
jcoomes@810 2110 region_index_end_dense_prefix);
duke@435 2111 q->enqueue(new UpdateDensePrefixTask(
duke@435 2112 space_id,
jcoomes@810 2113 region_index_start,
jcoomes@810 2114 region_index_end));
jcoomes@810 2115 region_index_start = region_index_end;
duke@435 2116 }
duke@435 2117 }
duke@435 2118 // This gets any part of the dense prefix that did not
duke@435 2119 // fit evenly.
jcoomes@810 2120 if (region_index_start < region_index_end_dense_prefix) {
duke@435 2121 q->enqueue(new UpdateDensePrefixTask(
duke@435 2122 space_id,
jcoomes@810 2123 region_index_start,
jcoomes@810 2124 region_index_end_dense_prefix));
duke@435 2125 }
duke@435 2126 space_id = next_compaction_space_id(space_id);
duke@435 2127 } // End tasks for dense prefix
duke@435 2128 }
duke@435 2129
jcoomes@810 2130 void PSParallelCompact::enqueue_region_stealing_tasks(
duke@435 2131 GCTaskQueue* q,
duke@435 2132 ParallelTaskTerminator* terminator_ptr,
duke@435 2133 uint parallel_gc_threads) {
duke@435 2134 TraceTime tm("steal task setup", print_phases(), true, gclog_or_tty);
duke@435 2135
jcoomes@810 2136 // Once a thread has drained it's stack, it should try to steal regions from
duke@435 2137 // other threads.
duke@435 2138 if (parallel_gc_threads > 1) {
duke@435 2139 for (uint j = 0; j < parallel_gc_threads; j++) {
jcoomes@810 2140 q->enqueue(new StealRegionCompactionTask(terminator_ptr));
duke@435 2141 }
duke@435 2142 }
duke@435 2143 }
duke@435 2144
duke@435 2145 void PSParallelCompact::compact() {
duke@435 2146 EventMark m("5 compact");
duke@435 2147 // trace("5");
duke@435 2148 TraceTime tm("compaction phase", print_phases(), true, gclog_or_tty);
duke@435 2149
duke@435 2150 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
duke@435 2151 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
duke@435 2152 PSOldGen* old_gen = heap->old_gen();
duke@435 2153 old_gen->start_array()->reset();
duke@435 2154 uint parallel_gc_threads = heap->gc_task_manager()->workers();
jcoomes@810 2155 TaskQueueSetSuper* qset = ParCompactionManager::region_array();
duke@435 2156 ParallelTaskTerminator terminator(parallel_gc_threads, qset);
duke@435 2157
duke@435 2158 GCTaskQueue* q = GCTaskQueue::create();
jcoomes@810 2159 enqueue_region_draining_tasks(q, parallel_gc_threads);
duke@435 2160 enqueue_dense_prefix_tasks(q, parallel_gc_threads);
jcoomes@810 2161 enqueue_region_stealing_tasks(q, &terminator, parallel_gc_threads);
duke@435 2162
duke@435 2163 {
duke@435 2164 TraceTime tm_pc("par compact", print_phases(), true, gclog_or_tty);
duke@435 2165
duke@435 2166 WaitForBarrierGCTask* fin = WaitForBarrierGCTask::create();
duke@435 2167 q->enqueue(fin);
duke@435 2168
duke@435 2169 gc_task_manager()->add_list(q);
duke@435 2170
duke@435 2171 fin->wait_for();
duke@435 2172
duke@435 2173 // We have to release the barrier tasks!
duke@435 2174 WaitForBarrierGCTask::destroy(fin);
duke@435 2175
duke@435 2176 #ifdef ASSERT
jcoomes@810 2177 // Verify that all regions have been processed before the deferred updates.
duke@435 2178 // Note that perm_space_id is skipped; this type of verification is not
jcoomes@810 2179 // valid until the perm gen is compacted by regions.
duke@435 2180 for (unsigned int id = old_space_id; id < last_space_id; ++id) {
duke@435 2181 verify_complete(SpaceId(id));
duke@435 2182 }
duke@435 2183 #endif
duke@435 2184 }
duke@435 2185
duke@435 2186 {
duke@435 2187 // Update the deferred objects, if any. Any compaction manager can be used.
duke@435 2188 TraceTime tm_du("deferred updates", print_phases(), true, gclog_or_tty);
duke@435 2189 ParCompactionManager* cm = ParCompactionManager::manager_array(0);
duke@435 2190 for (unsigned int id = old_space_id; id < last_space_id; ++id) {
duke@435 2191 update_deferred_objects(cm, SpaceId(id));
duke@435 2192 }
duke@435 2193 }
duke@435 2194 }
duke@435 2195
duke@435 2196 #ifdef ASSERT
duke@435 2197 void PSParallelCompact::verify_complete(SpaceId space_id) {
jcoomes@810 2198 // All Regions between space bottom() to new_top() should be marked as filled
jcoomes@810 2199 // and all Regions between new_top() and top() should be available (i.e.,
duke@435 2200 // should have been emptied).
duke@435 2201 ParallelCompactData& sd = summary_data();
duke@435 2202 SpaceInfo si = _space_info[space_id];
jcoomes@810 2203 HeapWord* new_top_addr = sd.region_align_up(si.new_top());
jcoomes@810 2204 HeapWord* old_top_addr = sd.region_align_up(si.space()->top());
jcoomes@810 2205 const size_t beg_region = sd.addr_to_region_idx(si.space()->bottom());
jcoomes@810 2206 const size_t new_top_region = sd.addr_to_region_idx(new_top_addr);
jcoomes@810 2207 const size_t old_top_region = sd.addr_to_region_idx(old_top_addr);
duke@435 2208
duke@435 2209 bool issued_a_warning = false;
duke@435 2210
jcoomes@810 2211 size_t cur_region;
jcoomes@810 2212 for (cur_region = beg_region; cur_region < new_top_region; ++cur_region) {
jcoomes@810 2213 const RegionData* const c = sd.region(cur_region);
duke@435 2214 if (!c->completed()) {
jcoomes@810 2215 warning("region " SIZE_FORMAT " not filled: "
duke@435 2216 "destination_count=" SIZE_FORMAT,
jcoomes@810 2217 cur_region, c->destination_count());
duke@435 2218 issued_a_warning = true;
duke@435 2219 }
duke@435 2220 }
duke@435 2221
jcoomes@810 2222 for (cur_region = new_top_region; cur_region < old_top_region; ++cur_region) {
jcoomes@810 2223 const RegionData* const c = sd.region(cur_region);
duke@435 2224 if (!c->available()) {
jcoomes@810 2225 warning("region " SIZE_FORMAT " not empty: "
duke@435 2226 "destination_count=" SIZE_FORMAT,
jcoomes@810 2227 cur_region, c->destination_count());
duke@435 2228 issued_a_warning = true;
duke@435 2229 }
duke@435 2230 }
duke@435 2231
duke@435 2232 if (issued_a_warning) {
jcoomes@810 2233 print_region_ranges();
duke@435 2234 }
duke@435 2235 }
duke@435 2236 #endif // #ifdef ASSERT
duke@435 2237
duke@435 2238 void PSParallelCompact::compact_serial(ParCompactionManager* cm) {
duke@435 2239 EventMark m("5 compact serial");
duke@435 2240 TraceTime tm("compact serial", print_phases(), true, gclog_or_tty);
duke@435 2241
duke@435 2242 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
duke@435 2243 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
duke@435 2244
duke@435 2245 PSYoungGen* young_gen = heap->young_gen();
duke@435 2246 PSOldGen* old_gen = heap->old_gen();
duke@435 2247
duke@435 2248 old_gen->start_array()->reset();
duke@435 2249 old_gen->move_and_update(cm);
duke@435 2250 young_gen->move_and_update(cm);
duke@435 2251 }
duke@435 2252
duke@435 2253
duke@435 2254 void PSParallelCompact::follow_stack(ParCompactionManager* cm) {
duke@435 2255 while(!cm->overflow_stack()->is_empty()) {
duke@435 2256 oop obj = cm->overflow_stack()->pop();
duke@435 2257 obj->follow_contents(cm);
duke@435 2258 }
duke@435 2259
duke@435 2260 oop obj;
duke@435 2261 // obj is a reference!!!
duke@435 2262 while (cm->marking_stack()->pop_local(obj)) {
duke@435 2263 // It would be nice to assert about the type of objects we might
duke@435 2264 // pop, but they can come from anywhere, unfortunately.
duke@435 2265 obj->follow_contents(cm);
duke@435 2266 }
duke@435 2267 }
duke@435 2268
duke@435 2269 void
duke@435 2270 PSParallelCompact::follow_weak_klass_links(ParCompactionManager* serial_cm) {
duke@435 2271 // All klasses on the revisit stack are marked at this point.
duke@435 2272 // Update and follow all subklass, sibling and implementor links.
duke@435 2273 for (uint i = 0; i < ParallelGCThreads+1; i++) {
duke@435 2274 ParCompactionManager* cm = ParCompactionManager::manager_array(i);
duke@435 2275 KeepAliveClosure keep_alive_closure(cm);
duke@435 2276 for (int i = 0; i < cm->revisit_klass_stack()->length(); i++) {
duke@435 2277 cm->revisit_klass_stack()->at(i)->follow_weak_klass_links(
duke@435 2278 is_alive_closure(),
duke@435 2279 &keep_alive_closure);
duke@435 2280 }
duke@435 2281 follow_stack(cm);
duke@435 2282 }
duke@435 2283 }
duke@435 2284
duke@435 2285 void
duke@435 2286 PSParallelCompact::revisit_weak_klass_link(ParCompactionManager* cm, Klass* k) {
duke@435 2287 cm->revisit_klass_stack()->push(k);
duke@435 2288 }
duke@435 2289
duke@435 2290 #ifdef VALIDATE_MARK_SWEEP
duke@435 2291
coleenp@548 2292 void PSParallelCompact::track_adjusted_pointer(void* p, bool isroot) {
duke@435 2293 if (!ValidateMarkSweep)
duke@435 2294 return;
duke@435 2295
duke@435 2296 if (!isroot) {
duke@435 2297 if (_pointer_tracking) {
duke@435 2298 guarantee(_adjusted_pointers->contains(p), "should have seen this pointer");
duke@435 2299 _adjusted_pointers->remove(p);
duke@435 2300 }
duke@435 2301 } else {
duke@435 2302 ptrdiff_t index = _root_refs_stack->find(p);
duke@435 2303 if (index != -1) {
duke@435 2304 int l = _root_refs_stack->length();
duke@435 2305 if (l > 0 && l - 1 != index) {
coleenp@548 2306 void* last = _root_refs_stack->pop();
duke@435 2307 assert(last != p, "should be different");
duke@435 2308 _root_refs_stack->at_put(index, last);
duke@435 2309 } else {
duke@435 2310 _root_refs_stack->remove(p);
duke@435 2311 }
duke@435 2312 }
duke@435 2313 }
duke@435 2314 }
duke@435 2315
duke@435 2316
coleenp@548 2317 void PSParallelCompact::check_adjust_pointer(void* p) {
duke@435 2318 _adjusted_pointers->push(p);
duke@435 2319 }
duke@435 2320
duke@435 2321
duke@435 2322 class AdjusterTracker: public OopClosure {
duke@435 2323 public:
duke@435 2324 AdjusterTracker() {};
coleenp@548 2325 void do_oop(oop* o) { PSParallelCompact::check_adjust_pointer(o); }
coleenp@548 2326 void do_oop(narrowOop* o) { PSParallelCompact::check_adjust_pointer(o); }
duke@435 2327 };
duke@435 2328
duke@435 2329
duke@435 2330 void PSParallelCompact::track_interior_pointers(oop obj) {
duke@435 2331 if (ValidateMarkSweep) {
duke@435 2332 _adjusted_pointers->clear();
duke@435 2333 _pointer_tracking = true;
duke@435 2334
duke@435 2335 AdjusterTracker checker;
duke@435 2336 obj->oop_iterate(&checker);
duke@435 2337 }
duke@435 2338 }
duke@435 2339
duke@435 2340
duke@435 2341 void PSParallelCompact::check_interior_pointers() {
duke@435 2342 if (ValidateMarkSweep) {
duke@435 2343 _pointer_tracking = false;
duke@435 2344 guarantee(_adjusted_pointers->length() == 0, "should have processed the same pointers");
duke@435 2345 }
duke@435 2346 }
duke@435 2347
duke@435 2348
duke@435 2349 void PSParallelCompact::reset_live_oop_tracking(bool at_perm) {
duke@435 2350 if (ValidateMarkSweep) {
duke@435 2351 guarantee((size_t)_live_oops->length() == _live_oops_index, "should be at end of live oops");
duke@435 2352 _live_oops_index = at_perm ? _live_oops_index_at_perm : 0;
duke@435 2353 }
duke@435 2354 }
duke@435 2355
duke@435 2356
duke@435 2357 void PSParallelCompact::register_live_oop(oop p, size_t size) {
duke@435 2358 if (ValidateMarkSweep) {
duke@435 2359 _live_oops->push(p);
duke@435 2360 _live_oops_size->push(size);
duke@435 2361 _live_oops_index++;
duke@435 2362 }
duke@435 2363 }
duke@435 2364
duke@435 2365 void PSParallelCompact::validate_live_oop(oop p, size_t size) {
duke@435 2366 if (ValidateMarkSweep) {
duke@435 2367 oop obj = _live_oops->at((int)_live_oops_index);
duke@435 2368 guarantee(obj == p, "should be the same object");
duke@435 2369 guarantee(_live_oops_size->at((int)_live_oops_index) == size, "should be the same size");
duke@435 2370 _live_oops_index++;
duke@435 2371 }
duke@435 2372 }
duke@435 2373
duke@435 2374 void PSParallelCompact::live_oop_moved_to(HeapWord* q, size_t size,
duke@435 2375 HeapWord* compaction_top) {
duke@435 2376 assert(oop(q)->forwardee() == NULL || oop(q)->forwardee() == oop(compaction_top),
duke@435 2377 "should be moved to forwarded location");
duke@435 2378 if (ValidateMarkSweep) {
duke@435 2379 PSParallelCompact::validate_live_oop(oop(q), size);
duke@435 2380 _live_oops_moved_to->push(oop(compaction_top));
duke@435 2381 }
duke@435 2382 if (RecordMarkSweepCompaction) {
duke@435 2383 _cur_gc_live_oops->push(q);
duke@435 2384 _cur_gc_live_oops_moved_to->push(compaction_top);
duke@435 2385 _cur_gc_live_oops_size->push(size);
duke@435 2386 }
duke@435 2387 }
duke@435 2388
duke@435 2389
duke@435 2390 void PSParallelCompact::compaction_complete() {
duke@435 2391 if (RecordMarkSweepCompaction) {
duke@435 2392 GrowableArray<HeapWord*>* _tmp_live_oops = _cur_gc_live_oops;
duke@435 2393 GrowableArray<HeapWord*>* _tmp_live_oops_moved_to = _cur_gc_live_oops_moved_to;
duke@435 2394 GrowableArray<size_t> * _tmp_live_oops_size = _cur_gc_live_oops_size;
duke@435 2395
duke@435 2396 _cur_gc_live_oops = _last_gc_live_oops;
duke@435 2397 _cur_gc_live_oops_moved_to = _last_gc_live_oops_moved_to;
duke@435 2398 _cur_gc_live_oops_size = _last_gc_live_oops_size;
duke@435 2399 _last_gc_live_oops = _tmp_live_oops;
duke@435 2400 _last_gc_live_oops_moved_to = _tmp_live_oops_moved_to;
duke@435 2401 _last_gc_live_oops_size = _tmp_live_oops_size;
duke@435 2402 }
duke@435 2403 }
duke@435 2404
duke@435 2405
duke@435 2406 void PSParallelCompact::print_new_location_of_heap_address(HeapWord* q) {
duke@435 2407 if (!RecordMarkSweepCompaction) {
duke@435 2408 tty->print_cr("Requires RecordMarkSweepCompaction to be enabled");
duke@435 2409 return;
duke@435 2410 }
duke@435 2411
duke@435 2412 if (_last_gc_live_oops == NULL) {
duke@435 2413 tty->print_cr("No compaction information gathered yet");
duke@435 2414 return;
duke@435 2415 }
duke@435 2416
duke@435 2417 for (int i = 0; i < _last_gc_live_oops->length(); i++) {
duke@435 2418 HeapWord* old_oop = _last_gc_live_oops->at(i);
duke@435 2419 size_t sz = _last_gc_live_oops_size->at(i);
duke@435 2420 if (old_oop <= q && q < (old_oop + sz)) {
duke@435 2421 HeapWord* new_oop = _last_gc_live_oops_moved_to->at(i);
duke@435 2422 size_t offset = (q - old_oop);
duke@435 2423 tty->print_cr("Address " PTR_FORMAT, q);
duke@435 2424 tty->print_cr(" Was in oop " PTR_FORMAT ", size %d, at offset %d", old_oop, sz, offset);
duke@435 2425 tty->print_cr(" Now in oop " PTR_FORMAT ", actual address " PTR_FORMAT, new_oop, new_oop + offset);
duke@435 2426 return;
duke@435 2427 }
duke@435 2428 }
duke@435 2429
duke@435 2430 tty->print_cr("Address " PTR_FORMAT " not found in live oop information from last GC", q);
duke@435 2431 }
duke@435 2432 #endif //VALIDATE_MARK_SWEEP
duke@435 2433
jcoomes@810 2434 // Update interior oops in the ranges of regions [beg_region, end_region).
duke@435 2435 void
duke@435 2436 PSParallelCompact::update_and_deadwood_in_dense_prefix(ParCompactionManager* cm,
duke@435 2437 SpaceId space_id,
jcoomes@810 2438 size_t beg_region,
jcoomes@810 2439 size_t end_region) {
duke@435 2440 ParallelCompactData& sd = summary_data();
duke@435 2441 ParMarkBitMap* const mbm = mark_bitmap();
duke@435 2442
jcoomes@810 2443 HeapWord* beg_addr = sd.region_to_addr(beg_region);
jcoomes@810 2444 HeapWord* const end_addr = sd.region_to_addr(end_region);
jcoomes@810 2445 assert(beg_region <= end_region, "bad region range");
duke@435 2446 assert(end_addr <= dense_prefix(space_id), "not in the dense prefix");
duke@435 2447
duke@435 2448 #ifdef ASSERT
jcoomes@810 2449 // Claim the regions to avoid triggering an assert when they are marked as
duke@435 2450 // filled.
jcoomes@810 2451 for (size_t claim_region = beg_region; claim_region < end_region; ++claim_region) {
jcoomes@810 2452 assert(sd.region(claim_region)->claim_unsafe(), "claim() failed");
duke@435 2453 }
duke@435 2454 #endif // #ifdef ASSERT
duke@435 2455
duke@435 2456 if (beg_addr != space(space_id)->bottom()) {
duke@435 2457 // Find the first live object or block of dead space that *starts* in this
jcoomes@810 2458 // range of regions. If a partial object crosses onto the region, skip it;
jcoomes@810 2459 // it will be marked for 'deferred update' when the object head is
jcoomes@810 2460 // processed. If dead space crosses onto the region, it is also skipped; it
jcoomes@810 2461 // will be filled when the prior region is processed. If neither of those
jcoomes@810 2462 // apply, the first word in the region is the start of a live object or dead
jcoomes@810 2463 // space.
duke@435 2464 assert(beg_addr > space(space_id)->bottom(), "sanity");
jcoomes@810 2465 const RegionData* const cp = sd.region(beg_region);
duke@435 2466 if (cp->partial_obj_size() != 0) {
jcoomes@810 2467 beg_addr = sd.partial_obj_end(beg_region);
duke@435 2468 } else if (dead_space_crosses_boundary(cp, mbm->addr_to_bit(beg_addr))) {
duke@435 2469 beg_addr = mbm->find_obj_beg(beg_addr, end_addr);
duke@435 2470 }
duke@435 2471 }
duke@435 2472
duke@435 2473 if (beg_addr < end_addr) {
jcoomes@810 2474 // A live object or block of dead space starts in this range of Regions.
duke@435 2475 HeapWord* const dense_prefix_end = dense_prefix(space_id);
duke@435 2476
duke@435 2477 // Create closures and iterate.
duke@435 2478 UpdateOnlyClosure update_closure(mbm, cm, space_id);
duke@435 2479 FillClosure fill_closure(cm, space_id);
duke@435 2480 ParMarkBitMap::IterationStatus status;
duke@435 2481 status = mbm->iterate(&update_closure, &fill_closure, beg_addr, end_addr,
duke@435 2482 dense_prefix_end);
duke@435 2483 if (status == ParMarkBitMap::incomplete) {
duke@435 2484 update_closure.do_addr(update_closure.source());
duke@435 2485 }
duke@435 2486 }
duke@435 2487
jcoomes@810 2488 // Mark the regions as filled.
jcoomes@810 2489 RegionData* const beg_cp = sd.region(beg_region);
jcoomes@810 2490 RegionData* const end_cp = sd.region(end_region);
jcoomes@810 2491 for (RegionData* cp = beg_cp; cp < end_cp; ++cp) {
duke@435 2492 cp->set_completed();
duke@435 2493 }
duke@435 2494 }
duke@435 2495
duke@435 2496 // Return the SpaceId for the space containing addr. If addr is not in the
duke@435 2497 // heap, last_space_id is returned. In debug mode it expects the address to be
duke@435 2498 // in the heap and asserts such.
duke@435 2499 PSParallelCompact::SpaceId PSParallelCompact::space_id(HeapWord* addr) {
duke@435 2500 assert(Universe::heap()->is_in_reserved(addr), "addr not in the heap");
duke@435 2501
duke@435 2502 for (unsigned int id = perm_space_id; id < last_space_id; ++id) {
duke@435 2503 if (_space_info[id].space()->contains(addr)) {
duke@435 2504 return SpaceId(id);
duke@435 2505 }
duke@435 2506 }
duke@435 2507
duke@435 2508 assert(false, "no space contains the addr");
duke@435 2509 return last_space_id;
duke@435 2510 }
duke@435 2511
duke@435 2512 void PSParallelCompact::update_deferred_objects(ParCompactionManager* cm,
duke@435 2513 SpaceId id) {
duke@435 2514 assert(id < last_space_id, "bad space id");
duke@435 2515
duke@435 2516 ParallelCompactData& sd = summary_data();
duke@435 2517 const SpaceInfo* const space_info = _space_info + id;
duke@435 2518 ObjectStartArray* const start_array = space_info->start_array();
duke@435 2519
duke@435 2520 const MutableSpace* const space = space_info->space();
duke@435 2521 assert(space_info->dense_prefix() >= space->bottom(), "dense_prefix not set");
duke@435 2522 HeapWord* const beg_addr = space_info->dense_prefix();
jcoomes@810 2523 HeapWord* const end_addr = sd.region_align_up(space_info->new_top());
jcoomes@810 2524
jcoomes@810 2525 const RegionData* const beg_region = sd.addr_to_region_ptr(beg_addr);
jcoomes@810 2526 const RegionData* const end_region = sd.addr_to_region_ptr(end_addr);
jcoomes@810 2527 const RegionData* cur_region;
jcoomes@810 2528 for (cur_region = beg_region; cur_region < end_region; ++cur_region) {
jcoomes@810 2529 HeapWord* const addr = cur_region->deferred_obj_addr();
duke@435 2530 if (addr != NULL) {
duke@435 2531 if (start_array != NULL) {
duke@435 2532 start_array->allocate_block(addr);
duke@435 2533 }
duke@435 2534 oop(addr)->update_contents(cm);
duke@435 2535 assert(oop(addr)->is_oop_or_null(), "should be an oop now");
duke@435 2536 }
duke@435 2537 }
duke@435 2538 }
duke@435 2539
duke@435 2540 // Skip over count live words starting from beg, and return the address of the
duke@435 2541 // next live word. Unless marked, the word corresponding to beg is assumed to
duke@435 2542 // be dead. Callers must either ensure beg does not correspond to the middle of
duke@435 2543 // an object, or account for those live words in some other way. Callers must
duke@435 2544 // also ensure that there are enough live words in the range [beg, end) to skip.
duke@435 2545 HeapWord*
duke@435 2546 PSParallelCompact::skip_live_words(HeapWord* beg, HeapWord* end, size_t count)
duke@435 2547 {
duke@435 2548 assert(count > 0, "sanity");
duke@435 2549
duke@435 2550 ParMarkBitMap* m = mark_bitmap();
duke@435 2551 idx_t bits_to_skip = m->words_to_bits(count);
duke@435 2552 idx_t cur_beg = m->addr_to_bit(beg);
duke@435 2553 const idx_t search_end = BitMap::word_align_up(m->addr_to_bit(end));
duke@435 2554
duke@435 2555 do {
duke@435 2556 cur_beg = m->find_obj_beg(cur_beg, search_end);
duke@435 2557 idx_t cur_end = m->find_obj_end(cur_beg, search_end);
duke@435 2558 const size_t obj_bits = cur_end - cur_beg + 1;
duke@435 2559 if (obj_bits > bits_to_skip) {
duke@435 2560 return m->bit_to_addr(cur_beg + bits_to_skip);
duke@435 2561 }
duke@435 2562 bits_to_skip -= obj_bits;
duke@435 2563 cur_beg = cur_end + 1;
duke@435 2564 } while (bits_to_skip > 0);
duke@435 2565
duke@435 2566 // Skipping the desired number of words landed just past the end of an object.
duke@435 2567 // Find the start of the next object.
duke@435 2568 cur_beg = m->find_obj_beg(cur_beg, search_end);
duke@435 2569 assert(cur_beg < m->addr_to_bit(end), "not enough live words to skip");
duke@435 2570 return m->bit_to_addr(cur_beg);
duke@435 2571 }
duke@435 2572
duke@435 2573 HeapWord*
duke@435 2574 PSParallelCompact::first_src_addr(HeapWord* const dest_addr,
jcoomes@810 2575 size_t src_region_idx)
duke@435 2576 {
duke@435 2577 ParMarkBitMap* const bitmap = mark_bitmap();
duke@435 2578 const ParallelCompactData& sd = summary_data();
jcoomes@810 2579 const size_t RegionSize = ParallelCompactData::RegionSize;
jcoomes@810 2580
jcoomes@810 2581 assert(sd.is_region_aligned(dest_addr), "not aligned");
jcoomes@810 2582
jcoomes@810 2583 const RegionData* const src_region_ptr = sd.region(src_region_idx);
jcoomes@810 2584 const size_t partial_obj_size = src_region_ptr->partial_obj_size();
jcoomes@810 2585 HeapWord* const src_region_destination = src_region_ptr->destination();
jcoomes@810 2586
jcoomes@810 2587 assert(dest_addr >= src_region_destination, "wrong src region");
jcoomes@810 2588 assert(src_region_ptr->data_size() > 0, "src region cannot be empty");
jcoomes@810 2589
jcoomes@810 2590 HeapWord* const src_region_beg = sd.region_to_addr(src_region_idx);
jcoomes@810 2591 HeapWord* const src_region_end = src_region_beg + RegionSize;
jcoomes@810 2592
jcoomes@810 2593 HeapWord* addr = src_region_beg;
jcoomes@810 2594 if (dest_addr == src_region_destination) {
jcoomes@810 2595 // Return the first live word in the source region.
duke@435 2596 if (partial_obj_size == 0) {
jcoomes@810 2597 addr = bitmap->find_obj_beg(addr, src_region_end);
jcoomes@810 2598 assert(addr < src_region_end, "no objects start in src region");
duke@435 2599 }
duke@435 2600 return addr;
duke@435 2601 }
duke@435 2602
duke@435 2603 // Must skip some live data.
jcoomes@810 2604 size_t words_to_skip = dest_addr - src_region_destination;
jcoomes@810 2605 assert(src_region_ptr->data_size() > words_to_skip, "wrong src region");
duke@435 2606
duke@435 2607 if (partial_obj_size >= words_to_skip) {
duke@435 2608 // All the live words to skip are part of the partial object.
duke@435 2609 addr += words_to_skip;
duke@435 2610 if (partial_obj_size == words_to_skip) {
duke@435 2611 // Find the first live word past the partial object.
jcoomes@810 2612 addr = bitmap->find_obj_beg(addr, src_region_end);
jcoomes@810 2613 assert(addr < src_region_end, "wrong src region");
duke@435 2614 }
duke@435 2615 return addr;
duke@435 2616 }
duke@435 2617
duke@435 2618 // Skip over the partial object (if any).
duke@435 2619 if (partial_obj_size != 0) {
duke@435 2620 words_to_skip -= partial_obj_size;
duke@435 2621 addr += partial_obj_size;
duke@435 2622 }
duke@435 2623
jcoomes@810 2624 // Skip over live words due to objects that start in the region.
jcoomes@810 2625 addr = skip_live_words(addr, src_region_end, words_to_skip);
jcoomes@810 2626 assert(addr < src_region_end, "wrong src region");
duke@435 2627 return addr;
duke@435 2628 }
duke@435 2629
duke@435 2630 void PSParallelCompact::decrement_destination_counts(ParCompactionManager* cm,
jcoomes@810 2631 size_t beg_region,
duke@435 2632 HeapWord* end_addr)
duke@435 2633 {
duke@435 2634 ParallelCompactData& sd = summary_data();
jcoomes@810 2635 RegionData* const beg = sd.region(beg_region);
jcoomes@810 2636 HeapWord* const end_addr_aligned_up = sd.region_align_up(end_addr);
jcoomes@810 2637 RegionData* const end = sd.addr_to_region_ptr(end_addr_aligned_up);
jcoomes@810 2638 size_t cur_idx = beg_region;
jcoomes@810 2639 for (RegionData* cur = beg; cur < end; ++cur, ++cur_idx) {
jcoomes@810 2640 assert(cur->data_size() > 0, "region must have live data");
duke@435 2641 cur->decrement_destination_count();
jcoomes@810 2642 if (cur_idx <= cur->source_region() && cur->available() && cur->claim()) {
duke@435 2643 cm->save_for_processing(cur_idx);
duke@435 2644 }
duke@435 2645 }
duke@435 2646 }
duke@435 2647
jcoomes@810 2648 size_t PSParallelCompact::next_src_region(MoveAndUpdateClosure& closure,
jcoomes@810 2649 SpaceId& src_space_id,
jcoomes@810 2650 HeapWord*& src_space_top,
jcoomes@810 2651 HeapWord* end_addr)
duke@435 2652 {
jcoomes@810 2653 typedef ParallelCompactData::RegionData RegionData;
duke@435 2654
duke@435 2655 ParallelCompactData& sd = PSParallelCompact::summary_data();
jcoomes@810 2656 const size_t region_size = ParallelCompactData::RegionSize;
jcoomes@810 2657
jcoomes@810 2658 size_t src_region_idx = 0;
jcoomes@810 2659
jcoomes@810 2660 // Skip empty regions (if any) up to the top of the space.
jcoomes@810 2661 HeapWord* const src_aligned_up = sd.region_align_up(end_addr);
jcoomes@810 2662 RegionData* src_region_ptr = sd.addr_to_region_ptr(src_aligned_up);
jcoomes@810 2663 HeapWord* const top_aligned_up = sd.region_align_up(src_space_top);
jcoomes@810 2664 const RegionData* const top_region_ptr =
jcoomes@810 2665 sd.addr_to_region_ptr(top_aligned_up);
jcoomes@810 2666 while (src_region_ptr < top_region_ptr && src_region_ptr->data_size() == 0) {
jcoomes@810 2667 ++src_region_ptr;
duke@435 2668 }
duke@435 2669
jcoomes@810 2670 if (src_region_ptr < top_region_ptr) {
jcoomes@810 2671 // The next source region is in the current space. Update src_region_idx
jcoomes@810 2672 // and the source address to match src_region_ptr.
jcoomes@810 2673 src_region_idx = sd.region(src_region_ptr);
jcoomes@810 2674 HeapWord* const src_region_addr = sd.region_to_addr(src_region_idx);
jcoomes@810 2675 if (src_region_addr > closure.source()) {
jcoomes@810 2676 closure.set_source(src_region_addr);
duke@435 2677 }
jcoomes@810 2678 return src_region_idx;
duke@435 2679 }
duke@435 2680
jcoomes@810 2681 // Switch to a new source space and find the first non-empty region.
duke@435 2682 unsigned int space_id = src_space_id + 1;
duke@435 2683 assert(space_id < last_space_id, "not enough spaces");
duke@435 2684
duke@435 2685 HeapWord* const destination = closure.destination();
duke@435 2686
duke@435 2687 do {
duke@435 2688 MutableSpace* space = _space_info[space_id].space();
duke@435 2689 HeapWord* const bottom = space->bottom();
jcoomes@810 2690 const RegionData* const bottom_cp = sd.addr_to_region_ptr(bottom);
duke@435 2691
duke@435 2692 // Iterate over the spaces that do not compact into themselves.
duke@435 2693 if (bottom_cp->destination() != bottom) {
jcoomes@810 2694 HeapWord* const top_aligned_up = sd.region_align_up(space->top());
jcoomes@810 2695 const RegionData* const top_cp = sd.addr_to_region_ptr(top_aligned_up);
jcoomes@810 2696
jcoomes@810 2697 for (const RegionData* src_cp = bottom_cp; src_cp < top_cp; ++src_cp) {
duke@435 2698 if (src_cp->live_obj_size() > 0) {
duke@435 2699 // Found it.
duke@435 2700 assert(src_cp->destination() == destination,
duke@435 2701 "first live obj in the space must match the destination");
duke@435 2702 assert(src_cp->partial_obj_size() == 0,
duke@435 2703 "a space cannot begin with a partial obj");
duke@435 2704
duke@435 2705 src_space_id = SpaceId(space_id);
duke@435 2706 src_space_top = space->top();
jcoomes@810 2707 const size_t src_region_idx = sd.region(src_cp);
jcoomes@810 2708 closure.set_source(sd.region_to_addr(src_region_idx));
jcoomes@810 2709 return src_region_idx;
duke@435 2710 } else {
duke@435 2711 assert(src_cp->data_size() == 0, "sanity");
duke@435 2712 }
duke@435 2713 }
duke@435 2714 }
duke@435 2715 } while (++space_id < last_space_id);
duke@435 2716
jcoomes@810 2717 assert(false, "no source region was found");
duke@435 2718 return 0;
duke@435 2719 }
duke@435 2720
jcoomes@810 2721 void PSParallelCompact::fill_region(ParCompactionManager* cm, size_t region_idx)
duke@435 2722 {
duke@435 2723 typedef ParMarkBitMap::IterationStatus IterationStatus;
jcoomes@810 2724 const size_t RegionSize = ParallelCompactData::RegionSize;
duke@435 2725 ParMarkBitMap* const bitmap = mark_bitmap();
duke@435 2726 ParallelCompactData& sd = summary_data();
jcoomes@810 2727 RegionData* const region_ptr = sd.region(region_idx);
duke@435 2728
duke@435 2729 // Get the items needed to construct the closure.
jcoomes@810 2730 HeapWord* dest_addr = sd.region_to_addr(region_idx);
duke@435 2731 SpaceId dest_space_id = space_id(dest_addr);
duke@435 2732 ObjectStartArray* start_array = _space_info[dest_space_id].start_array();
duke@435 2733 HeapWord* new_top = _space_info[dest_space_id].new_top();
duke@435 2734 assert(dest_addr < new_top, "sanity");
jcoomes@810 2735 const size_t words = MIN2(pointer_delta(new_top, dest_addr), RegionSize);
jcoomes@810 2736
jcoomes@810 2737 // Get the source region and related info.
jcoomes@810 2738 size_t src_region_idx = region_ptr->source_region();
jcoomes@810 2739 SpaceId src_space_id = space_id(sd.region_to_addr(src_region_idx));
duke@435 2740 HeapWord* src_space_top = _space_info[src_space_id].space()->top();
duke@435 2741
duke@435 2742 MoveAndUpdateClosure closure(bitmap, cm, start_array, dest_addr, words);
jcoomes@810 2743 closure.set_source(first_src_addr(dest_addr, src_region_idx));
jcoomes@810 2744
jcoomes@810 2745 // Adjust src_region_idx to prepare for decrementing destination counts (the
jcoomes@810 2746 // destination count is not decremented when a region is copied to itself).
jcoomes@810 2747 if (src_region_idx == region_idx) {
jcoomes@810 2748 src_region_idx += 1;
duke@435 2749 }
duke@435 2750
duke@435 2751 if (bitmap->is_unmarked(closure.source())) {
duke@435 2752 // The first source word is in the middle of an object; copy the remainder
duke@435 2753 // of the object or as much as will fit. The fact that pointer updates were
duke@435 2754 // deferred will be noted when the object header is processed.
duke@435 2755 HeapWord* const old_src_addr = closure.source();
duke@435 2756 closure.copy_partial_obj();
duke@435 2757 if (closure.is_full()) {
jcoomes@810 2758 decrement_destination_counts(cm, src_region_idx, closure.source());
jcoomes@810 2759 region_ptr->set_deferred_obj_addr(NULL);
jcoomes@810 2760 region_ptr->set_completed();
duke@435 2761 return;
duke@435 2762 }
duke@435 2763
jcoomes@810 2764 HeapWord* const end_addr = sd.region_align_down(closure.source());
jcoomes@810 2765 if (sd.region_align_down(old_src_addr) != end_addr) {
jcoomes@810 2766 // The partial object was copied from more than one source region.
jcoomes@810 2767 decrement_destination_counts(cm, src_region_idx, end_addr);
jcoomes@810 2768
jcoomes@810 2769 // Move to the next source region, possibly switching spaces as well. All
duke@435 2770 // args except end_addr may be modified.
jcoomes@810 2771 src_region_idx = next_src_region(closure, src_space_id, src_space_top,
jcoomes@810 2772 end_addr);
duke@435 2773 }
duke@435 2774 }
duke@435 2775
duke@435 2776 do {
duke@435 2777 HeapWord* const cur_addr = closure.source();
jcoomes@810 2778 HeapWord* const end_addr = MIN2(sd.region_align_up(cur_addr + 1),
duke@435 2779 src_space_top);
duke@435 2780 IterationStatus status = bitmap->iterate(&closure, cur_addr, end_addr);
duke@435 2781
duke@435 2782 if (status == ParMarkBitMap::incomplete) {
jcoomes@810 2783 // The last obj that starts in the source region does not end in the
jcoomes@810 2784 // region.
duke@435 2785 assert(closure.source() < end_addr, "sanity")
duke@435 2786 HeapWord* const obj_beg = closure.source();
duke@435 2787 HeapWord* const range_end = MIN2(obj_beg + closure.words_remaining(),
duke@435 2788 src_space_top);
duke@435 2789 HeapWord* const obj_end = bitmap->find_obj_end(obj_beg, range_end);
duke@435 2790 if (obj_end < range_end) {
duke@435 2791 // The end was found; the entire object will fit.
duke@435 2792 status = closure.do_addr(obj_beg, bitmap->obj_size(obj_beg, obj_end));
duke@435 2793 assert(status != ParMarkBitMap::would_overflow, "sanity");
duke@435 2794 } else {
duke@435 2795 // The end was not found; the object will not fit.
duke@435 2796 assert(range_end < src_space_top, "obj cannot cross space boundary");
duke@435 2797 status = ParMarkBitMap::would_overflow;
duke@435 2798 }
duke@435 2799 }
duke@435 2800
duke@435 2801 if (status == ParMarkBitMap::would_overflow) {
duke@435 2802 // The last object did not fit. Note that interior oop updates were
jcoomes@810 2803 // deferred, then copy enough of the object to fill the region.
jcoomes@810 2804 region_ptr->set_deferred_obj_addr(closure.destination());
duke@435 2805 status = closure.copy_until_full(); // copies from closure.source()
duke@435 2806
jcoomes@810 2807 decrement_destination_counts(cm, src_region_idx, closure.source());
jcoomes@810 2808 region_ptr->set_completed();
duke@435 2809 return;
duke@435 2810 }
duke@435 2811
duke@435 2812 if (status == ParMarkBitMap::full) {
jcoomes@810 2813 decrement_destination_counts(cm, src_region_idx, closure.source());
jcoomes@810 2814 region_ptr->set_deferred_obj_addr(NULL);
jcoomes@810 2815 region_ptr->set_completed();
duke@435 2816 return;
duke@435 2817 }
duke@435 2818
jcoomes@810 2819 decrement_destination_counts(cm, src_region_idx, end_addr);
jcoomes@810 2820
jcoomes@810 2821 // Move to the next source region, possibly switching spaces as well. All
duke@435 2822 // args except end_addr may be modified.
jcoomes@810 2823 src_region_idx = next_src_region(closure, src_space_id, src_space_top,
jcoomes@810 2824 end_addr);
duke@435 2825 } while (true);
duke@435 2826 }
duke@435 2827
duke@435 2828 void
duke@435 2829 PSParallelCompact::move_and_update(ParCompactionManager* cm, SpaceId space_id) {
duke@435 2830 const MutableSpace* sp = space(space_id);
duke@435 2831 if (sp->is_empty()) {
duke@435 2832 return;
duke@435 2833 }
duke@435 2834
duke@435 2835 ParallelCompactData& sd = PSParallelCompact::summary_data();
duke@435 2836 ParMarkBitMap* const bitmap = mark_bitmap();
duke@435 2837 HeapWord* const dp_addr = dense_prefix(space_id);
duke@435 2838 HeapWord* beg_addr = sp->bottom();
duke@435 2839 HeapWord* end_addr = sp->top();
duke@435 2840
duke@435 2841 #ifdef ASSERT
duke@435 2842 assert(beg_addr <= dp_addr && dp_addr <= end_addr, "bad dense prefix");
duke@435 2843 if (cm->should_verify_only()) {
duke@435 2844 VerifyUpdateClosure verify_update(cm, sp);
duke@435 2845 bitmap->iterate(&verify_update, beg_addr, end_addr);
duke@435 2846 return;
duke@435 2847 }
duke@435 2848
duke@435 2849 if (cm->should_reset_only()) {
duke@435 2850 ResetObjectsClosure reset_objects(cm);
duke@435 2851 bitmap->iterate(&reset_objects, beg_addr, end_addr);
duke@435 2852 return;
duke@435 2853 }
duke@435 2854 #endif
duke@435 2855
jcoomes@810 2856 const size_t beg_region = sd.addr_to_region_idx(beg_addr);
jcoomes@810 2857 const size_t dp_region = sd.addr_to_region_idx(dp_addr);
jcoomes@810 2858 if (beg_region < dp_region) {
jcoomes@810 2859 update_and_deadwood_in_dense_prefix(cm, space_id, beg_region, dp_region);
duke@435 2860 }
duke@435 2861
jcoomes@810 2862 // The destination of the first live object that starts in the region is one
jcoomes@810 2863 // past the end of the partial object entering the region (if any).
jcoomes@810 2864 HeapWord* const dest_addr = sd.partial_obj_end(dp_region);
duke@435 2865 HeapWord* const new_top = _space_info[space_id].new_top();
duke@435 2866 assert(new_top >= dest_addr, "bad new_top value");
duke@435 2867 const size_t words = pointer_delta(new_top, dest_addr);
duke@435 2868
duke@435 2869 if (words > 0) {
duke@435 2870 ObjectStartArray* start_array = _space_info[space_id].start_array();
duke@435 2871 MoveAndUpdateClosure closure(bitmap, cm, start_array, dest_addr, words);
duke@435 2872
duke@435 2873 ParMarkBitMap::IterationStatus status;
duke@435 2874 status = bitmap->iterate(&closure, dest_addr, end_addr);
duke@435 2875 assert(status == ParMarkBitMap::full, "iteration not complete");
duke@435 2876 assert(bitmap->find_obj_beg(closure.source(), end_addr) == end_addr,
duke@435 2877 "live objects skipped because closure is full");
duke@435 2878 }
duke@435 2879 }
duke@435 2880
duke@435 2881 jlong PSParallelCompact::millis_since_last_gc() {
duke@435 2882 jlong ret_val = os::javaTimeMillis() - _time_of_last_gc;
duke@435 2883 // XXX See note in genCollectedHeap::millis_since_last_gc().
duke@435 2884 if (ret_val < 0) {
duke@435 2885 NOT_PRODUCT(warning("time warp: %d", ret_val);)
duke@435 2886 return 0;
duke@435 2887 }
duke@435 2888 return ret_val;
duke@435 2889 }
duke@435 2890
duke@435 2891 void PSParallelCompact::reset_millis_since_last_gc() {
duke@435 2892 _time_of_last_gc = os::javaTimeMillis();
duke@435 2893 }
duke@435 2894
duke@435 2895 ParMarkBitMap::IterationStatus MoveAndUpdateClosure::copy_until_full()
duke@435 2896 {
duke@435 2897 if (source() != destination()) {
duke@435 2898 assert(source() > destination(), "must copy to the left");
duke@435 2899 Copy::aligned_conjoint_words(source(), destination(), words_remaining());
duke@435 2900 }
duke@435 2901 update_state(words_remaining());
duke@435 2902 assert(is_full(), "sanity");
duke@435 2903 return ParMarkBitMap::full;
duke@435 2904 }
duke@435 2905
duke@435 2906 void MoveAndUpdateClosure::copy_partial_obj()
duke@435 2907 {
duke@435 2908 size_t words = words_remaining();
duke@435 2909
duke@435 2910 HeapWord* const range_end = MIN2(source() + words, bitmap()->region_end());
duke@435 2911 HeapWord* const end_addr = bitmap()->find_obj_end(source(), range_end);
duke@435 2912 if (end_addr < range_end) {
duke@435 2913 words = bitmap()->obj_size(source(), end_addr);
duke@435 2914 }
duke@435 2915
duke@435 2916 // This test is necessary; if omitted, the pointer updates to a partial object
duke@435 2917 // that crosses the dense prefix boundary could be overwritten.
duke@435 2918 if (source() != destination()) {
duke@435 2919 assert(source() > destination(), "must copy to the left");
duke@435 2920 Copy::aligned_conjoint_words(source(), destination(), words);
duke@435 2921 }
duke@435 2922 update_state(words);
duke@435 2923 }
duke@435 2924
duke@435 2925 ParMarkBitMapClosure::IterationStatus
duke@435 2926 MoveAndUpdateClosure::do_addr(HeapWord* addr, size_t words) {
duke@435 2927 assert(destination() != NULL, "sanity");
duke@435 2928 assert(bitmap()->obj_size(addr) == words, "bad size");
duke@435 2929
duke@435 2930 _source = addr;
duke@435 2931 assert(PSParallelCompact::summary_data().calc_new_pointer(source()) ==
duke@435 2932 destination(), "wrong destination");
duke@435 2933
duke@435 2934 if (words > words_remaining()) {
duke@435 2935 return ParMarkBitMap::would_overflow;
duke@435 2936 }
duke@435 2937
duke@435 2938 // The start_array must be updated even if the object is not moving.
duke@435 2939 if (_start_array != NULL) {
duke@435 2940 _start_array->allocate_block(destination());
duke@435 2941 }
duke@435 2942
duke@435 2943 if (destination() != source()) {
duke@435 2944 assert(destination() < source(), "must copy to the left");
duke@435 2945 Copy::aligned_conjoint_words(source(), destination(), words);
duke@435 2946 }
duke@435 2947
duke@435 2948 oop moved_oop = (oop) destination();
duke@435 2949 moved_oop->update_contents(compaction_manager());
duke@435 2950 assert(moved_oop->is_oop_or_null(), "Object should be whole at this point");
duke@435 2951
duke@435 2952 update_state(words);
duke@435 2953 assert(destination() == (HeapWord*)moved_oop + moved_oop->size(), "sanity");
duke@435 2954 return is_full() ? ParMarkBitMap::full : ParMarkBitMap::incomplete;
duke@435 2955 }
duke@435 2956
duke@435 2957 UpdateOnlyClosure::UpdateOnlyClosure(ParMarkBitMap* mbm,
duke@435 2958 ParCompactionManager* cm,
duke@435 2959 PSParallelCompact::SpaceId space_id) :
duke@435 2960 ParMarkBitMapClosure(mbm, cm),
duke@435 2961 _space_id(space_id),
duke@435 2962 _start_array(PSParallelCompact::start_array(space_id))
duke@435 2963 {
duke@435 2964 }
duke@435 2965
duke@435 2966 // Updates the references in the object to their new values.
duke@435 2967 ParMarkBitMapClosure::IterationStatus
duke@435 2968 UpdateOnlyClosure::do_addr(HeapWord* addr, size_t words) {
duke@435 2969 do_addr(addr);
duke@435 2970 return ParMarkBitMap::incomplete;
duke@435 2971 }
duke@435 2972
duke@435 2973 // Verify the new location using the forwarding pointer
duke@435 2974 // from MarkSweep::mark_sweep_phase2(). Set the mark_word
duke@435 2975 // to the initial value.
duke@435 2976 ParMarkBitMapClosure::IterationStatus
duke@435 2977 PSParallelCompact::VerifyUpdateClosure::do_addr(HeapWord* addr, size_t words) {
duke@435 2978 // The second arg (words) is not used.
duke@435 2979 oop obj = (oop) addr;
duke@435 2980 HeapWord* forwarding_ptr = (HeapWord*) obj->mark()->decode_pointer();
duke@435 2981 HeapWord* new_pointer = summary_data().calc_new_pointer(obj);
duke@435 2982 if (forwarding_ptr == NULL) {
duke@435 2983 // The object is dead or not moving.
duke@435 2984 assert(bitmap()->is_unmarked(obj) || (new_pointer == (HeapWord*) obj),
duke@435 2985 "Object liveness is wrong.");
duke@435 2986 return ParMarkBitMap::incomplete;
duke@435 2987 }
duke@435 2988 assert(UseParallelOldGCDensePrefix ||
duke@435 2989 (HeapMaximumCompactionInterval > 1) ||
duke@435 2990 (MarkSweepAlwaysCompactCount > 1) ||
duke@435 2991 (forwarding_ptr == new_pointer),
duke@435 2992 "Calculation of new location is incorrect");
duke@435 2993 return ParMarkBitMap::incomplete;
duke@435 2994 }
duke@435 2995
duke@435 2996 // Reset objects modified for debug checking.
duke@435 2997 ParMarkBitMapClosure::IterationStatus
duke@435 2998 PSParallelCompact::ResetObjectsClosure::do_addr(HeapWord* addr, size_t words) {
duke@435 2999 // The second arg (words) is not used.
duke@435 3000 oop obj = (oop) addr;
duke@435 3001 obj->init_mark();
duke@435 3002 return ParMarkBitMap::incomplete;
duke@435 3003 }
duke@435 3004
duke@435 3005 // Prepare for compaction. This method is executed once
duke@435 3006 // (i.e., by a single thread) before compaction.
duke@435 3007 // Save the updated location of the intArrayKlassObj for
duke@435 3008 // filling holes in the dense prefix.
duke@435 3009 void PSParallelCompact::compact_prologue() {
duke@435 3010 _updated_int_array_klass_obj = (klassOop)
duke@435 3011 summary_data().calc_new_pointer(Universe::intArrayKlassObj());
duke@435 3012 }
duke@435 3013
duke@435 3014 // The initial implementation of this method created a field
duke@435 3015 // _next_compaction_space_id in SpaceInfo and initialized
duke@435 3016 // that field in SpaceInfo::initialize_space_info(). That
duke@435 3017 // required that _next_compaction_space_id be declared a
duke@435 3018 // SpaceId in SpaceInfo and that would have required that
duke@435 3019 // either SpaceId be declared in a separate class or that
duke@435 3020 // it be declared in SpaceInfo. It didn't seem consistent
duke@435 3021 // to declare it in SpaceInfo (didn't really fit logically).
duke@435 3022 // Alternatively, defining a separate class to define SpaceId
duke@435 3023 // seem excessive. This implementation is simple and localizes
duke@435 3024 // the knowledge.
duke@435 3025
duke@435 3026 PSParallelCompact::SpaceId
duke@435 3027 PSParallelCompact::next_compaction_space_id(SpaceId id) {
duke@435 3028 assert(id < last_space_id, "id out of range");
duke@435 3029 switch (id) {
duke@435 3030 case perm_space_id :
duke@435 3031 return last_space_id;
duke@435 3032 case old_space_id :
duke@435 3033 return eden_space_id;
duke@435 3034 case eden_space_id :
duke@435 3035 return from_space_id;
duke@435 3036 case from_space_id :
duke@435 3037 return to_space_id;
duke@435 3038 case to_space_id :
duke@435 3039 return last_space_id;
duke@435 3040 default:
duke@435 3041 assert(false, "Bad space id");
duke@435 3042 return last_space_id;
duke@435 3043 }
duke@435 3044 }

mercurial