Wed, 09 Jul 2008 15:08:55 -0700
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
Summary: Maintain a high water mark for the allocations in a space and mangle only up to that high water mark.
Reviewed-by: ysr, apetrusenko
1 /*
2 * Copyright 2005-2007 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 #include "incls/_precompiled.incl"
26 #include "incls/_psParallelCompact.cpp.incl"
28 #include <math.h>
30 // All sizes are in HeapWords.
31 const size_t ParallelCompactData::Log2ChunkSize = 9; // 512 words
32 const size_t ParallelCompactData::ChunkSize = (size_t)1 << Log2ChunkSize;
33 const size_t ParallelCompactData::ChunkSizeBytes = ChunkSize << LogHeapWordSize;
34 const size_t ParallelCompactData::ChunkSizeOffsetMask = ChunkSize - 1;
35 const size_t ParallelCompactData::ChunkAddrOffsetMask = ChunkSizeBytes - 1;
36 const size_t ParallelCompactData::ChunkAddrMask = ~ChunkAddrOffsetMask;
38 // 32-bit: 128 words covers 4 bitmap words
39 // 64-bit: 128 words covers 2 bitmap words
40 const size_t ParallelCompactData::Log2BlockSize = 7; // 128 words
41 const size_t ParallelCompactData::BlockSize = (size_t)1 << Log2BlockSize;
42 const size_t ParallelCompactData::BlockOffsetMask = BlockSize - 1;
43 const size_t ParallelCompactData::BlockMask = ~BlockOffsetMask;
45 const size_t ParallelCompactData::BlocksPerChunk = ChunkSize / BlockSize;
47 const ParallelCompactData::ChunkData::chunk_sz_t
48 ParallelCompactData::ChunkData::dc_shift = 27;
50 const ParallelCompactData::ChunkData::chunk_sz_t
51 ParallelCompactData::ChunkData::dc_mask = ~0U << dc_shift;
53 const ParallelCompactData::ChunkData::chunk_sz_t
54 ParallelCompactData::ChunkData::dc_one = 0x1U << dc_shift;
56 const ParallelCompactData::ChunkData::chunk_sz_t
57 ParallelCompactData::ChunkData::los_mask = ~dc_mask;
59 const ParallelCompactData::ChunkData::chunk_sz_t
60 ParallelCompactData::ChunkData::dc_claimed = 0x8U << dc_shift;
62 const ParallelCompactData::ChunkData::chunk_sz_t
63 ParallelCompactData::ChunkData::dc_completed = 0xcU << dc_shift;
65 #ifdef ASSERT
66 short ParallelCompactData::BlockData::_cur_phase = 0;
67 #endif
69 SpaceInfo PSParallelCompact::_space_info[PSParallelCompact::last_space_id];
70 bool PSParallelCompact::_print_phases = false;
72 ReferenceProcessor* PSParallelCompact::_ref_processor = NULL;
73 klassOop PSParallelCompact::_updated_int_array_klass_obj = NULL;
75 double PSParallelCompact::_dwl_mean;
76 double PSParallelCompact::_dwl_std_dev;
77 double PSParallelCompact::_dwl_first_term;
78 double PSParallelCompact::_dwl_adjustment;
79 #ifdef ASSERT
80 bool PSParallelCompact::_dwl_initialized = false;
81 #endif // #ifdef ASSERT
83 #ifdef VALIDATE_MARK_SWEEP
84 GrowableArray<void*>* PSParallelCompact::_root_refs_stack = NULL;
85 GrowableArray<oop> * PSParallelCompact::_live_oops = NULL;
86 GrowableArray<oop> * PSParallelCompact::_live_oops_moved_to = NULL;
87 GrowableArray<size_t>* PSParallelCompact::_live_oops_size = NULL;
88 size_t PSParallelCompact::_live_oops_index = 0;
89 size_t PSParallelCompact::_live_oops_index_at_perm = 0;
90 GrowableArray<void*>* PSParallelCompact::_other_refs_stack = NULL;
91 GrowableArray<void*>* PSParallelCompact::_adjusted_pointers = NULL;
92 bool PSParallelCompact::_pointer_tracking = false;
93 bool PSParallelCompact::_root_tracking = true;
95 GrowableArray<HeapWord*>* PSParallelCompact::_cur_gc_live_oops = NULL;
96 GrowableArray<HeapWord*>* PSParallelCompact::_cur_gc_live_oops_moved_to = NULL;
97 GrowableArray<size_t> * PSParallelCompact::_cur_gc_live_oops_size = NULL;
98 GrowableArray<HeapWord*>* PSParallelCompact::_last_gc_live_oops = NULL;
99 GrowableArray<HeapWord*>* PSParallelCompact::_last_gc_live_oops_moved_to = NULL;
100 GrowableArray<size_t> * PSParallelCompact::_last_gc_live_oops_size = NULL;
101 #endif
103 // XXX beg - verification code; only works while we also mark in object headers
104 static void
105 verify_mark_bitmap(ParMarkBitMap& _mark_bitmap)
106 {
107 ParallelScavengeHeap* heap = PSParallelCompact::gc_heap();
109 PSPermGen* perm_gen = heap->perm_gen();
110 PSOldGen* old_gen = heap->old_gen();
111 PSYoungGen* young_gen = heap->young_gen();
113 MutableSpace* perm_space = perm_gen->object_space();
114 MutableSpace* old_space = old_gen->object_space();
115 MutableSpace* eden_space = young_gen->eden_space();
116 MutableSpace* from_space = young_gen->from_space();
117 MutableSpace* to_space = young_gen->to_space();
119 // 'from_space' here is the survivor space at the lower address.
120 if (to_space->bottom() < from_space->bottom()) {
121 from_space = to_space;
122 to_space = young_gen->from_space();
123 }
125 HeapWord* boundaries[12];
126 unsigned int bidx = 0;
127 const unsigned int bidx_max = sizeof(boundaries) / sizeof(boundaries[0]);
129 boundaries[0] = perm_space->bottom();
130 boundaries[1] = perm_space->top();
131 boundaries[2] = old_space->bottom();
132 boundaries[3] = old_space->top();
133 boundaries[4] = eden_space->bottom();
134 boundaries[5] = eden_space->top();
135 boundaries[6] = from_space->bottom();
136 boundaries[7] = from_space->top();
137 boundaries[8] = to_space->bottom();
138 boundaries[9] = to_space->top();
139 boundaries[10] = to_space->end();
140 boundaries[11] = to_space->end();
142 BitMap::idx_t beg_bit = 0;
143 BitMap::idx_t end_bit;
144 BitMap::idx_t tmp_bit;
145 const BitMap::idx_t last_bit = _mark_bitmap.size();
146 do {
147 HeapWord* addr = _mark_bitmap.bit_to_addr(beg_bit);
148 if (_mark_bitmap.is_marked(beg_bit)) {
149 oop obj = (oop)addr;
150 assert(obj->is_gc_marked(), "obj header is not marked");
151 end_bit = _mark_bitmap.find_obj_end(beg_bit, last_bit);
152 const size_t size = _mark_bitmap.obj_size(beg_bit, end_bit);
153 assert(size == (size_t)obj->size(), "end bit wrong?");
154 beg_bit = _mark_bitmap.find_obj_beg(beg_bit + 1, last_bit);
155 assert(beg_bit > end_bit, "bit set in middle of an obj");
156 } else {
157 if (addr >= boundaries[bidx] && addr < boundaries[bidx + 1]) {
158 // a dead object in the current space.
159 oop obj = (oop)addr;
160 end_bit = _mark_bitmap.addr_to_bit(addr + obj->size());
161 assert(!obj->is_gc_marked(), "obj marked in header, not in bitmap");
162 tmp_bit = beg_bit + 1;
163 beg_bit = _mark_bitmap.find_obj_beg(tmp_bit, end_bit);
164 assert(beg_bit == end_bit, "beg bit set in unmarked obj");
165 beg_bit = _mark_bitmap.find_obj_end(tmp_bit, end_bit);
166 assert(beg_bit == end_bit, "end bit set in unmarked obj");
167 } else if (addr < boundaries[bidx + 2]) {
168 // addr is between top in the current space and bottom in the next.
169 end_bit = beg_bit + pointer_delta(boundaries[bidx + 2], addr);
170 tmp_bit = beg_bit;
171 beg_bit = _mark_bitmap.find_obj_beg(tmp_bit, end_bit);
172 assert(beg_bit == end_bit, "beg bit set above top");
173 beg_bit = _mark_bitmap.find_obj_end(tmp_bit, end_bit);
174 assert(beg_bit == end_bit, "end bit set above top");
175 bidx += 2;
176 } else if (bidx < bidx_max - 2) {
177 bidx += 2; // ???
178 } else {
179 tmp_bit = beg_bit;
180 beg_bit = _mark_bitmap.find_obj_beg(tmp_bit, last_bit);
181 assert(beg_bit == last_bit, "beg bit set outside heap");
182 beg_bit = _mark_bitmap.find_obj_end(tmp_bit, last_bit);
183 assert(beg_bit == last_bit, "end bit set outside heap");
184 }
185 }
186 } while (beg_bit < last_bit);
187 }
188 // XXX end - verification code; only works while we also mark in object headers
190 #ifndef PRODUCT
191 const char* PSParallelCompact::space_names[] = {
192 "perm", "old ", "eden", "from", "to "
193 };
195 void PSParallelCompact::print_chunk_ranges()
196 {
197 tty->print_cr("space bottom top end new_top");
198 tty->print_cr("------ ---------- ---------- ---------- ----------");
200 for (unsigned int id = 0; id < last_space_id; ++id) {
201 const MutableSpace* space = _space_info[id].space();
202 tty->print_cr("%u %s "
203 SIZE_FORMAT_W("10") " " SIZE_FORMAT_W("10") " "
204 SIZE_FORMAT_W("10") " " SIZE_FORMAT_W("10") " ",
205 id, space_names[id],
206 summary_data().addr_to_chunk_idx(space->bottom()),
207 summary_data().addr_to_chunk_idx(space->top()),
208 summary_data().addr_to_chunk_idx(space->end()),
209 summary_data().addr_to_chunk_idx(_space_info[id].new_top()));
210 }
211 }
213 void
214 print_generic_summary_chunk(size_t i, const ParallelCompactData::ChunkData* c)
215 {
216 #define CHUNK_IDX_FORMAT SIZE_FORMAT_W("7")
217 #define CHUNK_DATA_FORMAT SIZE_FORMAT_W("5")
219 ParallelCompactData& sd = PSParallelCompact::summary_data();
220 size_t dci = c->destination() ? sd.addr_to_chunk_idx(c->destination()) : 0;
221 tty->print_cr(CHUNK_IDX_FORMAT " " PTR_FORMAT " "
222 CHUNK_IDX_FORMAT " " PTR_FORMAT " "
223 CHUNK_DATA_FORMAT " " CHUNK_DATA_FORMAT " "
224 CHUNK_DATA_FORMAT " " CHUNK_IDX_FORMAT " %d",
225 i, c->data_location(), dci, c->destination(),
226 c->partial_obj_size(), c->live_obj_size(),
227 c->data_size(), c->source_chunk(), c->destination_count());
229 #undef CHUNK_IDX_FORMAT
230 #undef CHUNK_DATA_FORMAT
231 }
233 void
234 print_generic_summary_data(ParallelCompactData& summary_data,
235 HeapWord* const beg_addr,
236 HeapWord* const end_addr)
237 {
238 size_t total_words = 0;
239 size_t i = summary_data.addr_to_chunk_idx(beg_addr);
240 const size_t last = summary_data.addr_to_chunk_idx(end_addr);
241 HeapWord* pdest = 0;
243 while (i <= last) {
244 ParallelCompactData::ChunkData* c = summary_data.chunk(i);
245 if (c->data_size() != 0 || c->destination() != pdest) {
246 print_generic_summary_chunk(i, c);
247 total_words += c->data_size();
248 pdest = c->destination();
249 }
250 ++i;
251 }
253 tty->print_cr("summary_data_bytes=" SIZE_FORMAT, total_words * HeapWordSize);
254 }
256 void
257 print_generic_summary_data(ParallelCompactData& summary_data,
258 SpaceInfo* space_info)
259 {
260 for (unsigned int id = 0; id < PSParallelCompact::last_space_id; ++id) {
261 const MutableSpace* space = space_info[id].space();
262 print_generic_summary_data(summary_data, space->bottom(),
263 MAX2(space->top(), space_info[id].new_top()));
264 }
265 }
267 void
268 print_initial_summary_chunk(size_t i,
269 const ParallelCompactData::ChunkData* c,
270 bool newline = true)
271 {
272 tty->print(SIZE_FORMAT_W("5") " " PTR_FORMAT " "
273 SIZE_FORMAT_W("5") " " SIZE_FORMAT_W("5") " "
274 SIZE_FORMAT_W("5") " " SIZE_FORMAT_W("5") " %d",
275 i, c->destination(),
276 c->partial_obj_size(), c->live_obj_size(),
277 c->data_size(), c->source_chunk(), c->destination_count());
278 if (newline) tty->cr();
279 }
281 void
282 print_initial_summary_data(ParallelCompactData& summary_data,
283 const MutableSpace* space) {
284 if (space->top() == space->bottom()) {
285 return;
286 }
288 const size_t chunk_size = ParallelCompactData::ChunkSize;
289 HeapWord* const top_aligned_up = summary_data.chunk_align_up(space->top());
290 const size_t end_chunk = summary_data.addr_to_chunk_idx(top_aligned_up);
291 const ParallelCompactData::ChunkData* c = summary_data.chunk(end_chunk - 1);
292 HeapWord* end_addr = c->destination() + c->data_size();
293 const size_t live_in_space = pointer_delta(end_addr, space->bottom());
295 // Print (and count) the full chunks at the beginning of the space.
296 size_t full_chunk_count = 0;
297 size_t i = summary_data.addr_to_chunk_idx(space->bottom());
298 while (i < end_chunk && summary_data.chunk(i)->data_size() == chunk_size) {
299 print_initial_summary_chunk(i, summary_data.chunk(i));
300 ++full_chunk_count;
301 ++i;
302 }
304 size_t live_to_right = live_in_space - full_chunk_count * chunk_size;
306 double max_reclaimed_ratio = 0.0;
307 size_t max_reclaimed_ratio_chunk = 0;
308 size_t max_dead_to_right = 0;
309 size_t max_live_to_right = 0;
311 // Print the 'reclaimed ratio' for chunks while there is something live in the
312 // chunk or to the right of it. The remaining chunks are empty (and
313 // uninteresting), and computing the ratio will result in division by 0.
314 while (i < end_chunk && live_to_right > 0) {
315 c = summary_data.chunk(i);
316 HeapWord* const chunk_addr = summary_data.chunk_to_addr(i);
317 const size_t used_to_right = pointer_delta(space->top(), chunk_addr);
318 const size_t dead_to_right = used_to_right - live_to_right;
319 const double reclaimed_ratio = double(dead_to_right) / live_to_right;
321 if (reclaimed_ratio > max_reclaimed_ratio) {
322 max_reclaimed_ratio = reclaimed_ratio;
323 max_reclaimed_ratio_chunk = i;
324 max_dead_to_right = dead_to_right;
325 max_live_to_right = live_to_right;
326 }
328 print_initial_summary_chunk(i, c, false);
329 tty->print_cr(" %12.10f " SIZE_FORMAT_W("10") " " SIZE_FORMAT_W("10"),
330 reclaimed_ratio, dead_to_right, live_to_right);
332 live_to_right -= c->data_size();
333 ++i;
334 }
336 // Any remaining chunks are empty. Print one more if there is one.
337 if (i < end_chunk) {
338 print_initial_summary_chunk(i, summary_data.chunk(i));
339 }
341 tty->print_cr("max: " SIZE_FORMAT_W("4") " d2r=" SIZE_FORMAT_W("10") " "
342 "l2r=" SIZE_FORMAT_W("10") " max_ratio=%14.12f",
343 max_reclaimed_ratio_chunk, max_dead_to_right,
344 max_live_to_right, max_reclaimed_ratio);
345 }
347 void
348 print_initial_summary_data(ParallelCompactData& summary_data,
349 SpaceInfo* space_info) {
350 unsigned int id = PSParallelCompact::perm_space_id;
351 const MutableSpace* space;
352 do {
353 space = space_info[id].space();
354 print_initial_summary_data(summary_data, space);
355 } while (++id < PSParallelCompact::eden_space_id);
357 do {
358 space = space_info[id].space();
359 print_generic_summary_data(summary_data, space->bottom(), space->top());
360 } while (++id < PSParallelCompact::last_space_id);
361 }
362 #endif // #ifndef PRODUCT
364 #ifdef ASSERT
365 size_t add_obj_count;
366 size_t add_obj_size;
367 size_t mark_bitmap_count;
368 size_t mark_bitmap_size;
369 #endif // #ifdef ASSERT
371 ParallelCompactData::ParallelCompactData()
372 {
373 _region_start = 0;
375 _chunk_vspace = 0;
376 _chunk_data = 0;
377 _chunk_count = 0;
379 _block_vspace = 0;
380 _block_data = 0;
381 _block_count = 0;
382 }
384 bool ParallelCompactData::initialize(MemRegion covered_region)
385 {
386 _region_start = covered_region.start();
387 const size_t region_size = covered_region.word_size();
388 DEBUG_ONLY(_region_end = _region_start + region_size;)
390 assert(chunk_align_down(_region_start) == _region_start,
391 "region start not aligned");
392 assert((region_size & ChunkSizeOffsetMask) == 0,
393 "region size not a multiple of ChunkSize");
395 bool result = initialize_chunk_data(region_size);
397 // Initialize the block data if it will be used for updating pointers, or if
398 // this is a debug build.
399 if (!UseParallelOldGCChunkPointerCalc || trueInDebug) {
400 result = result && initialize_block_data(region_size);
401 }
403 return result;
404 }
406 PSVirtualSpace*
407 ParallelCompactData::create_vspace(size_t count, size_t element_size)
408 {
409 const size_t raw_bytes = count * element_size;
410 const size_t page_sz = os::page_size_for_region(raw_bytes, raw_bytes, 10);
411 const size_t granularity = os::vm_allocation_granularity();
412 const size_t bytes = align_size_up(raw_bytes, MAX2(page_sz, granularity));
414 const size_t rs_align = page_sz == (size_t) os::vm_page_size() ? 0 :
415 MAX2(page_sz, granularity);
416 ReservedSpace rs(bytes, rs_align, rs_align > 0);
417 os::trace_page_sizes("par compact", raw_bytes, raw_bytes, page_sz, rs.base(),
418 rs.size());
419 PSVirtualSpace* vspace = new PSVirtualSpace(rs, page_sz);
420 if (vspace != 0) {
421 if (vspace->expand_by(bytes)) {
422 return vspace;
423 }
424 delete vspace;
425 }
427 return 0;
428 }
430 bool ParallelCompactData::initialize_chunk_data(size_t region_size)
431 {
432 const size_t count = (region_size + ChunkSizeOffsetMask) >> Log2ChunkSize;
433 _chunk_vspace = create_vspace(count, sizeof(ChunkData));
434 if (_chunk_vspace != 0) {
435 _chunk_data = (ChunkData*)_chunk_vspace->reserved_low_addr();
436 _chunk_count = count;
437 return true;
438 }
439 return false;
440 }
442 bool ParallelCompactData::initialize_block_data(size_t region_size)
443 {
444 const size_t count = (region_size + BlockOffsetMask) >> Log2BlockSize;
445 _block_vspace = create_vspace(count, sizeof(BlockData));
446 if (_block_vspace != 0) {
447 _block_data = (BlockData*)_block_vspace->reserved_low_addr();
448 _block_count = count;
449 return true;
450 }
451 return false;
452 }
454 void ParallelCompactData::clear()
455 {
456 if (_block_data) {
457 memset(_block_data, 0, _block_vspace->committed_size());
458 }
459 memset(_chunk_data, 0, _chunk_vspace->committed_size());
460 }
462 void ParallelCompactData::clear_range(size_t beg_chunk, size_t end_chunk) {
463 assert(beg_chunk <= _chunk_count, "beg_chunk out of range");
464 assert(end_chunk <= _chunk_count, "end_chunk out of range");
465 assert(ChunkSize % BlockSize == 0, "ChunkSize not a multiple of BlockSize");
467 const size_t chunk_cnt = end_chunk - beg_chunk;
469 if (_block_data) {
470 const size_t blocks_per_chunk = ChunkSize / BlockSize;
471 const size_t beg_block = beg_chunk * blocks_per_chunk;
472 const size_t block_cnt = chunk_cnt * blocks_per_chunk;
473 memset(_block_data + beg_block, 0, block_cnt * sizeof(BlockData));
474 }
475 memset(_chunk_data + beg_chunk, 0, chunk_cnt * sizeof(ChunkData));
476 }
478 HeapWord* ParallelCompactData::partial_obj_end(size_t chunk_idx) const
479 {
480 const ChunkData* cur_cp = chunk(chunk_idx);
481 const ChunkData* const end_cp = chunk(chunk_count() - 1);
483 HeapWord* result = chunk_to_addr(chunk_idx);
484 if (cur_cp < end_cp) {
485 do {
486 result += cur_cp->partial_obj_size();
487 } while (cur_cp->partial_obj_size() == ChunkSize && ++cur_cp < end_cp);
488 }
489 return result;
490 }
492 void ParallelCompactData::add_obj(HeapWord* addr, size_t len)
493 {
494 const size_t obj_ofs = pointer_delta(addr, _region_start);
495 const size_t beg_chunk = obj_ofs >> Log2ChunkSize;
496 const size_t end_chunk = (obj_ofs + len - 1) >> Log2ChunkSize;
498 DEBUG_ONLY(Atomic::inc_ptr(&add_obj_count);)
499 DEBUG_ONLY(Atomic::add_ptr(len, &add_obj_size);)
501 if (beg_chunk == end_chunk) {
502 // All in one chunk.
503 _chunk_data[beg_chunk].add_live_obj(len);
504 return;
505 }
507 // First chunk.
508 const size_t beg_ofs = chunk_offset(addr);
509 _chunk_data[beg_chunk].add_live_obj(ChunkSize - beg_ofs);
511 klassOop klass = ((oop)addr)->klass();
512 // Middle chunks--completely spanned by this object.
513 for (size_t chunk = beg_chunk + 1; chunk < end_chunk; ++chunk) {
514 _chunk_data[chunk].set_partial_obj_size(ChunkSize);
515 _chunk_data[chunk].set_partial_obj_addr(addr);
516 }
518 // Last chunk.
519 const size_t end_ofs = chunk_offset(addr + len - 1);
520 _chunk_data[end_chunk].set_partial_obj_size(end_ofs + 1);
521 _chunk_data[end_chunk].set_partial_obj_addr(addr);
522 }
524 void
525 ParallelCompactData::summarize_dense_prefix(HeapWord* beg, HeapWord* end)
526 {
527 assert(chunk_offset(beg) == 0, "not ChunkSize aligned");
528 assert(chunk_offset(end) == 0, "not ChunkSize aligned");
530 size_t cur_chunk = addr_to_chunk_idx(beg);
531 const size_t end_chunk = addr_to_chunk_idx(end);
532 HeapWord* addr = beg;
533 while (cur_chunk < end_chunk) {
534 _chunk_data[cur_chunk].set_destination(addr);
535 _chunk_data[cur_chunk].set_destination_count(0);
536 _chunk_data[cur_chunk].set_source_chunk(cur_chunk);
537 _chunk_data[cur_chunk].set_data_location(addr);
539 // Update live_obj_size so the chunk appears completely full.
540 size_t live_size = ChunkSize - _chunk_data[cur_chunk].partial_obj_size();
541 _chunk_data[cur_chunk].set_live_obj_size(live_size);
543 ++cur_chunk;
544 addr += ChunkSize;
545 }
546 }
548 bool ParallelCompactData::summarize(HeapWord* target_beg, HeapWord* target_end,
549 HeapWord* source_beg, HeapWord* source_end,
550 HeapWord** target_next,
551 HeapWord** source_next) {
552 // This is too strict.
553 // assert(chunk_offset(source_beg) == 0, "not ChunkSize aligned");
555 if (TraceParallelOldGCSummaryPhase) {
556 tty->print_cr("tb=" PTR_FORMAT " te=" PTR_FORMAT " "
557 "sb=" PTR_FORMAT " se=" PTR_FORMAT " "
558 "tn=" PTR_FORMAT " sn=" PTR_FORMAT,
559 target_beg, target_end,
560 source_beg, source_end,
561 target_next != 0 ? *target_next : (HeapWord*) 0,
562 source_next != 0 ? *source_next : (HeapWord*) 0);
563 }
565 size_t cur_chunk = addr_to_chunk_idx(source_beg);
566 const size_t end_chunk = addr_to_chunk_idx(chunk_align_up(source_end));
568 HeapWord *dest_addr = target_beg;
569 while (cur_chunk < end_chunk) {
570 size_t words = _chunk_data[cur_chunk].data_size();
572 #if 1
573 assert(pointer_delta(target_end, dest_addr) >= words,
574 "source region does not fit into target region");
575 #else
576 // XXX - need some work on the corner cases here. If the chunk does not
577 // fit, then must either make sure any partial_obj from the chunk fits, or
578 // 'undo' the initial part of the partial_obj that is in the previous chunk.
579 if (dest_addr + words >= target_end) {
580 // Let the caller know where to continue.
581 *target_next = dest_addr;
582 *source_next = chunk_to_addr(cur_chunk);
583 return false;
584 }
585 #endif // #if 1
587 _chunk_data[cur_chunk].set_destination(dest_addr);
589 // Set the destination_count for cur_chunk, and if necessary, update
590 // source_chunk for a destination chunk. The source_chunk field is updated
591 // if cur_chunk is the first (left-most) chunk to be copied to a destination
592 // chunk.
593 //
594 // The destination_count calculation is a bit subtle. A chunk that has data
595 // that compacts into itself does not count itself as a destination. This
596 // maintains the invariant that a zero count means the chunk is available
597 // and can be claimed and then filled.
598 if (words > 0) {
599 HeapWord* const last_addr = dest_addr + words - 1;
600 const size_t dest_chunk_1 = addr_to_chunk_idx(dest_addr);
601 const size_t dest_chunk_2 = addr_to_chunk_idx(last_addr);
602 #if 0
603 // Initially assume that the destination chunks will be the same and
604 // adjust the value below if necessary. Under this assumption, if
605 // cur_chunk == dest_chunk_2, then cur_chunk will be compacted completely
606 // into itself.
607 uint destination_count = cur_chunk == dest_chunk_2 ? 0 : 1;
608 if (dest_chunk_1 != dest_chunk_2) {
609 // Destination chunks differ; adjust destination_count.
610 destination_count += 1;
611 // Data from cur_chunk will be copied to the start of dest_chunk_2.
612 _chunk_data[dest_chunk_2].set_source_chunk(cur_chunk);
613 } else if (chunk_offset(dest_addr) == 0) {
614 // Data from cur_chunk will be copied to the start of the destination
615 // chunk.
616 _chunk_data[dest_chunk_1].set_source_chunk(cur_chunk);
617 }
618 #else
619 // Initially assume that the destination chunks will be different and
620 // adjust the value below if necessary. Under this assumption, if
621 // cur_chunk == dest_chunk2, then cur_chunk will be compacted partially
622 // into dest_chunk_1 and partially into itself.
623 uint destination_count = cur_chunk == dest_chunk_2 ? 1 : 2;
624 if (dest_chunk_1 != dest_chunk_2) {
625 // Data from cur_chunk will be copied to the start of dest_chunk_2.
626 _chunk_data[dest_chunk_2].set_source_chunk(cur_chunk);
627 } else {
628 // Destination chunks are the same; adjust destination_count.
629 destination_count -= 1;
630 if (chunk_offset(dest_addr) == 0) {
631 // Data from cur_chunk will be copied to the start of the destination
632 // chunk.
633 _chunk_data[dest_chunk_1].set_source_chunk(cur_chunk);
634 }
635 }
636 #endif // #if 0
638 _chunk_data[cur_chunk].set_destination_count(destination_count);
639 _chunk_data[cur_chunk].set_data_location(chunk_to_addr(cur_chunk));
640 dest_addr += words;
641 }
643 ++cur_chunk;
644 }
646 *target_next = dest_addr;
647 return true;
648 }
650 bool ParallelCompactData::partial_obj_ends_in_block(size_t block_index) {
651 HeapWord* block_addr = block_to_addr(block_index);
652 HeapWord* block_end_addr = block_addr + BlockSize;
653 size_t chunk_index = addr_to_chunk_idx(block_addr);
654 HeapWord* partial_obj_end_addr = partial_obj_end(chunk_index);
656 // An object that ends at the end of the block, ends
657 // in the block (the last word of the object is to
658 // the left of the end).
659 if ((block_addr < partial_obj_end_addr) &&
660 (partial_obj_end_addr <= block_end_addr)) {
661 return true;
662 }
664 return false;
665 }
667 HeapWord* ParallelCompactData::calc_new_pointer(HeapWord* addr) {
668 HeapWord* result = NULL;
669 if (UseParallelOldGCChunkPointerCalc) {
670 result = chunk_calc_new_pointer(addr);
671 } else {
672 result = block_calc_new_pointer(addr);
673 }
674 return result;
675 }
677 // This method is overly complicated (expensive) to be called
678 // for every reference.
679 // Try to restructure this so that a NULL is returned if
680 // the object is dead. But don't wast the cycles to explicitly check
681 // that it is dead since only live objects should be passed in.
683 HeapWord* ParallelCompactData::chunk_calc_new_pointer(HeapWord* addr) {
684 assert(addr != NULL, "Should detect NULL oop earlier");
685 assert(PSParallelCompact::gc_heap()->is_in(addr), "addr not in heap");
686 #ifdef ASSERT
687 if (PSParallelCompact::mark_bitmap()->is_unmarked(addr)) {
688 gclog_or_tty->print_cr("calc_new_pointer:: addr " PTR_FORMAT, addr);
689 }
690 #endif
691 assert(PSParallelCompact::mark_bitmap()->is_marked(addr), "obj not marked");
693 // Chunk covering the object.
694 size_t chunk_index = addr_to_chunk_idx(addr);
695 const ChunkData* const chunk_ptr = chunk(chunk_index);
696 HeapWord* const chunk_addr = chunk_align_down(addr);
698 assert(addr < chunk_addr + ChunkSize, "Chunk does not cover object");
699 assert(addr_to_chunk_ptr(chunk_addr) == chunk_ptr, "sanity check");
701 HeapWord* result = chunk_ptr->destination();
703 // If all the data in the chunk is live, then the new location of the object
704 // can be calculated from the destination of the chunk plus the offset of the
705 // object in the chunk.
706 if (chunk_ptr->data_size() == ChunkSize) {
707 result += pointer_delta(addr, chunk_addr);
708 return result;
709 }
711 // The new location of the object is
712 // chunk destination +
713 // size of the partial object extending onto the chunk +
714 // sizes of the live objects in the Chunk that are to the left of addr
715 const size_t partial_obj_size = chunk_ptr->partial_obj_size();
716 HeapWord* const search_start = chunk_addr + partial_obj_size;
718 const ParMarkBitMap* bitmap = PSParallelCompact::mark_bitmap();
719 size_t live_to_left = bitmap->live_words_in_range(search_start, oop(addr));
721 result += partial_obj_size + live_to_left;
722 assert(result <= addr, "object cannot move to the right");
723 return result;
724 }
726 HeapWord* ParallelCompactData::block_calc_new_pointer(HeapWord* addr) {
727 assert(addr != NULL, "Should detect NULL oop earlier");
728 assert(PSParallelCompact::gc_heap()->is_in(addr), "addr not in heap");
729 #ifdef ASSERT
730 if (PSParallelCompact::mark_bitmap()->is_unmarked(addr)) {
731 gclog_or_tty->print_cr("calc_new_pointer:: addr " PTR_FORMAT, addr);
732 }
733 #endif
734 assert(PSParallelCompact::mark_bitmap()->is_marked(addr), "obj not marked");
736 // Chunk covering the object.
737 size_t chunk_index = addr_to_chunk_idx(addr);
738 const ChunkData* const chunk_ptr = chunk(chunk_index);
739 HeapWord* const chunk_addr = chunk_align_down(addr);
741 assert(addr < chunk_addr + ChunkSize, "Chunk does not cover object");
742 assert(addr_to_chunk_ptr(chunk_addr) == chunk_ptr, "sanity check");
744 HeapWord* result = chunk_ptr->destination();
746 // If all the data in the chunk is live, then the new location of the object
747 // can be calculated from the destination of the chunk plus the offset of the
748 // object in the chunk.
749 if (chunk_ptr->data_size() == ChunkSize) {
750 result += pointer_delta(addr, chunk_addr);
751 return result;
752 }
754 // The new location of the object is
755 // chunk destination +
756 // block offset +
757 // sizes of the live objects in the Block that are to the left of addr
758 const size_t block_offset = addr_to_block_ptr(addr)->offset();
759 HeapWord* const search_start = chunk_addr + block_offset;
761 const ParMarkBitMap* bitmap = PSParallelCompact::mark_bitmap();
762 size_t live_to_left = bitmap->live_words_in_range(search_start, oop(addr));
764 result += block_offset + live_to_left;
765 assert(result <= addr, "object cannot move to the right");
766 assert(result == chunk_calc_new_pointer(addr), "Should match");
767 return result;
768 }
770 klassOop ParallelCompactData::calc_new_klass(klassOop old_klass) {
771 klassOop updated_klass;
772 if (PSParallelCompact::should_update_klass(old_klass)) {
773 updated_klass = (klassOop) calc_new_pointer(old_klass);
774 } else {
775 updated_klass = old_klass;
776 }
778 return updated_klass;
779 }
781 #ifdef ASSERT
782 void ParallelCompactData::verify_clear(const PSVirtualSpace* vspace)
783 {
784 const size_t* const beg = (const size_t*)vspace->committed_low_addr();
785 const size_t* const end = (const size_t*)vspace->committed_high_addr();
786 for (const size_t* p = beg; p < end; ++p) {
787 assert(*p == 0, "not zero");
788 }
789 }
791 void ParallelCompactData::verify_clear()
792 {
793 verify_clear(_chunk_vspace);
794 verify_clear(_block_vspace);
795 }
796 #endif // #ifdef ASSERT
798 #ifdef NOT_PRODUCT
799 ParallelCompactData::ChunkData* debug_chunk(size_t chunk_index) {
800 ParallelCompactData& sd = PSParallelCompact::summary_data();
801 return sd.chunk(chunk_index);
802 }
803 #endif
805 elapsedTimer PSParallelCompact::_accumulated_time;
806 unsigned int PSParallelCompact::_total_invocations = 0;
807 unsigned int PSParallelCompact::_maximum_compaction_gc_num = 0;
808 jlong PSParallelCompact::_time_of_last_gc = 0;
809 CollectorCounters* PSParallelCompact::_counters = NULL;
810 ParMarkBitMap PSParallelCompact::_mark_bitmap;
811 ParallelCompactData PSParallelCompact::_summary_data;
813 PSParallelCompact::IsAliveClosure PSParallelCompact::_is_alive_closure;
815 void PSParallelCompact::IsAliveClosure::do_object(oop p) { ShouldNotReachHere(); }
816 bool PSParallelCompact::IsAliveClosure::do_object_b(oop p) { return mark_bitmap()->is_marked(p); }
818 void PSParallelCompact::KeepAliveClosure::do_oop(oop* p) { PSParallelCompact::KeepAliveClosure::do_oop_work(p); }
819 void PSParallelCompact::KeepAliveClosure::do_oop(narrowOop* p) { PSParallelCompact::KeepAliveClosure::do_oop_work(p); }
821 PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_root_pointer_closure(true);
822 PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_pointer_closure(false);
824 void PSParallelCompact::AdjustPointerClosure::do_oop(oop* p) { adjust_pointer(p, _is_root); }
825 void PSParallelCompact::AdjustPointerClosure::do_oop(narrowOop* p) { adjust_pointer(p, _is_root); }
827 void PSParallelCompact::FollowStackClosure::do_void() { follow_stack(_compaction_manager); }
829 void PSParallelCompact::MarkAndPushClosure::do_oop(oop* p) { mark_and_push(_compaction_manager, p); }
830 void PSParallelCompact::MarkAndPushClosure::do_oop(narrowOop* p) { mark_and_push(_compaction_manager, p); }
832 void PSParallelCompact::post_initialize() {
833 ParallelScavengeHeap* heap = gc_heap();
834 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
836 MemRegion mr = heap->reserved_region();
837 _ref_processor = ReferenceProcessor::create_ref_processor(
838 mr, // span
839 true, // atomic_discovery
840 true, // mt_discovery
841 &_is_alive_closure,
842 ParallelGCThreads,
843 ParallelRefProcEnabled);
844 _counters = new CollectorCounters("PSParallelCompact", 1);
846 // Initialize static fields in ParCompactionManager.
847 ParCompactionManager::initialize(mark_bitmap());
848 }
850 bool PSParallelCompact::initialize() {
851 ParallelScavengeHeap* heap = gc_heap();
852 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
853 MemRegion mr = heap->reserved_region();
855 // Was the old gen get allocated successfully?
856 if (!heap->old_gen()->is_allocated()) {
857 return false;
858 }
860 initialize_space_info();
861 initialize_dead_wood_limiter();
863 if (!_mark_bitmap.initialize(mr)) {
864 vm_shutdown_during_initialization("Unable to allocate bit map for "
865 "parallel garbage collection for the requested heap size.");
866 return false;
867 }
869 if (!_summary_data.initialize(mr)) {
870 vm_shutdown_during_initialization("Unable to allocate tables for "
871 "parallel garbage collection for the requested heap size.");
872 return false;
873 }
875 return true;
876 }
878 void PSParallelCompact::initialize_space_info()
879 {
880 memset(&_space_info, 0, sizeof(_space_info));
882 ParallelScavengeHeap* heap = gc_heap();
883 PSYoungGen* young_gen = heap->young_gen();
884 MutableSpace* perm_space = heap->perm_gen()->object_space();
886 _space_info[perm_space_id].set_space(perm_space);
887 _space_info[old_space_id].set_space(heap->old_gen()->object_space());
888 _space_info[eden_space_id].set_space(young_gen->eden_space());
889 _space_info[from_space_id].set_space(young_gen->from_space());
890 _space_info[to_space_id].set_space(young_gen->to_space());
892 _space_info[perm_space_id].set_start_array(heap->perm_gen()->start_array());
893 _space_info[old_space_id].set_start_array(heap->old_gen()->start_array());
895 _space_info[perm_space_id].set_min_dense_prefix(perm_space->top());
896 if (TraceParallelOldGCDensePrefix) {
897 tty->print_cr("perm min_dense_prefix=" PTR_FORMAT,
898 _space_info[perm_space_id].min_dense_prefix());
899 }
900 }
902 void PSParallelCompact::initialize_dead_wood_limiter()
903 {
904 const size_t max = 100;
905 _dwl_mean = double(MIN2(ParallelOldDeadWoodLimiterMean, max)) / 100.0;
906 _dwl_std_dev = double(MIN2(ParallelOldDeadWoodLimiterStdDev, max)) / 100.0;
907 _dwl_first_term = 1.0 / (sqrt(2.0 * M_PI) * _dwl_std_dev);
908 DEBUG_ONLY(_dwl_initialized = true;)
909 _dwl_adjustment = normal_distribution(1.0);
910 }
912 // Simple class for storing info about the heap at the start of GC, to be used
913 // after GC for comparison/printing.
914 class PreGCValues {
915 public:
916 PreGCValues() { }
917 PreGCValues(ParallelScavengeHeap* heap) { fill(heap); }
919 void fill(ParallelScavengeHeap* heap) {
920 _heap_used = heap->used();
921 _young_gen_used = heap->young_gen()->used_in_bytes();
922 _old_gen_used = heap->old_gen()->used_in_bytes();
923 _perm_gen_used = heap->perm_gen()->used_in_bytes();
924 };
926 size_t heap_used() const { return _heap_used; }
927 size_t young_gen_used() const { return _young_gen_used; }
928 size_t old_gen_used() const { return _old_gen_used; }
929 size_t perm_gen_used() const { return _perm_gen_used; }
931 private:
932 size_t _heap_used;
933 size_t _young_gen_used;
934 size_t _old_gen_used;
935 size_t _perm_gen_used;
936 };
938 void
939 PSParallelCompact::clear_data_covering_space(SpaceId id)
940 {
941 // At this point, top is the value before GC, new_top() is the value that will
942 // be set at the end of GC. The marking bitmap is cleared to top; nothing
943 // should be marked above top. The summary data is cleared to the larger of
944 // top & new_top.
945 MutableSpace* const space = _space_info[id].space();
946 HeapWord* const bot = space->bottom();
947 HeapWord* const top = space->top();
948 HeapWord* const max_top = MAX2(top, _space_info[id].new_top());
950 const idx_t beg_bit = _mark_bitmap.addr_to_bit(bot);
951 const idx_t end_bit = BitMap::word_align_up(_mark_bitmap.addr_to_bit(top));
952 _mark_bitmap.clear_range(beg_bit, end_bit);
954 const size_t beg_chunk = _summary_data.addr_to_chunk_idx(bot);
955 const size_t end_chunk =
956 _summary_data.addr_to_chunk_idx(_summary_data.chunk_align_up(max_top));
957 _summary_data.clear_range(beg_chunk, end_chunk);
958 }
960 void PSParallelCompact::pre_compact(PreGCValues* pre_gc_values)
961 {
962 // Update the from & to space pointers in space_info, since they are swapped
963 // at each young gen gc. Do the update unconditionally (even though a
964 // promotion failure does not swap spaces) because an unknown number of minor
965 // collections will have swapped the spaces an unknown number of times.
966 TraceTime tm("pre compact", print_phases(), true, gclog_or_tty);
967 ParallelScavengeHeap* heap = gc_heap();
968 _space_info[from_space_id].set_space(heap->young_gen()->from_space());
969 _space_info[to_space_id].set_space(heap->young_gen()->to_space());
971 pre_gc_values->fill(heap);
973 ParCompactionManager::reset();
974 NOT_PRODUCT(_mark_bitmap.reset_counters());
975 DEBUG_ONLY(add_obj_count = add_obj_size = 0;)
976 DEBUG_ONLY(mark_bitmap_count = mark_bitmap_size = 0;)
978 // Increment the invocation count
979 heap->increment_total_collections(true);
981 // We need to track unique mark sweep invocations as well.
982 _total_invocations++;
984 if (PrintHeapAtGC) {
985 Universe::print_heap_before_gc();
986 }
988 // Fill in TLABs
989 heap->accumulate_statistics_all_tlabs();
990 heap->ensure_parsability(true); // retire TLABs
992 if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
993 HandleMark hm; // Discard invalid handles created during verification
994 gclog_or_tty->print(" VerifyBeforeGC:");
995 Universe::verify(true);
996 }
998 // Verify object start arrays
999 if (VerifyObjectStartArray &&
1000 VerifyBeforeGC) {
1001 heap->old_gen()->verify_object_start_array();
1002 heap->perm_gen()->verify_object_start_array();
1003 }
1005 DEBUG_ONLY(mark_bitmap()->verify_clear();)
1006 DEBUG_ONLY(summary_data().verify_clear();)
1008 // Have worker threads release resources the next time they run a task.
1009 gc_task_manager()->release_all_resources();
1010 }
1012 void PSParallelCompact::post_compact()
1013 {
1014 TraceTime tm("post compact", print_phases(), true, gclog_or_tty);
1016 // Clear the marking bitmap and summary data and update top() in each space.
1017 for (unsigned int id = perm_space_id; id < last_space_id; ++id) {
1018 clear_data_covering_space(SpaceId(id));
1019 _space_info[id].space()->set_top(_space_info[id].new_top());
1020 }
1022 MutableSpace* const eden_space = _space_info[eden_space_id].space();
1023 MutableSpace* const from_space = _space_info[from_space_id].space();
1024 MutableSpace* const to_space = _space_info[to_space_id].space();
1026 ParallelScavengeHeap* heap = gc_heap();
1027 bool eden_empty = eden_space->is_empty();
1028 if (!eden_empty) {
1029 eden_empty = absorb_live_data_from_eden(heap->size_policy(),
1030 heap->young_gen(), heap->old_gen());
1031 }
1033 // Update heap occupancy information which is used as input to the soft ref
1034 // clearing policy at the next gc.
1035 Universe::update_heap_info_at_gc();
1037 bool young_gen_empty = eden_empty && from_space->is_empty() &&
1038 to_space->is_empty();
1040 BarrierSet* bs = heap->barrier_set();
1041 if (bs->is_a(BarrierSet::ModRef)) {
1042 ModRefBarrierSet* modBS = (ModRefBarrierSet*)bs;
1043 MemRegion old_mr = heap->old_gen()->reserved();
1044 MemRegion perm_mr = heap->perm_gen()->reserved();
1045 assert(perm_mr.end() <= old_mr.start(), "Generations out of order");
1047 if (young_gen_empty) {
1048 modBS->clear(MemRegion(perm_mr.start(), old_mr.end()));
1049 } else {
1050 modBS->invalidate(MemRegion(perm_mr.start(), old_mr.end()));
1051 }
1052 }
1054 Threads::gc_epilogue();
1055 CodeCache::gc_epilogue();
1057 COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
1059 ref_processor()->enqueue_discovered_references(NULL);
1061 if (ZapUnusedHeapArea) {
1062 heap->gen_mangle_unused_area();
1063 }
1065 // Update time of last GC
1066 reset_millis_since_last_gc();
1067 }
1069 HeapWord*
1070 PSParallelCompact::compute_dense_prefix_via_density(const SpaceId id,
1071 bool maximum_compaction)
1072 {
1073 const size_t chunk_size = ParallelCompactData::ChunkSize;
1074 const ParallelCompactData& sd = summary_data();
1076 const MutableSpace* const space = _space_info[id].space();
1077 HeapWord* const top_aligned_up = sd.chunk_align_up(space->top());
1078 const ChunkData* const beg_cp = sd.addr_to_chunk_ptr(space->bottom());
1079 const ChunkData* const end_cp = sd.addr_to_chunk_ptr(top_aligned_up);
1081 // Skip full chunks at the beginning of the space--they are necessarily part
1082 // of the dense prefix.
1083 size_t full_count = 0;
1084 const ChunkData* cp;
1085 for (cp = beg_cp; cp < end_cp && cp->data_size() == chunk_size; ++cp) {
1086 ++full_count;
1087 }
1089 assert(total_invocations() >= _maximum_compaction_gc_num, "sanity");
1090 const size_t gcs_since_max = total_invocations() - _maximum_compaction_gc_num;
1091 const bool interval_ended = gcs_since_max > HeapMaximumCompactionInterval;
1092 if (maximum_compaction || cp == end_cp || interval_ended) {
1093 _maximum_compaction_gc_num = total_invocations();
1094 return sd.chunk_to_addr(cp);
1095 }
1097 HeapWord* const new_top = _space_info[id].new_top();
1098 const size_t space_live = pointer_delta(new_top, space->bottom());
1099 const size_t space_used = space->used_in_words();
1100 const size_t space_capacity = space->capacity_in_words();
1102 const double cur_density = double(space_live) / space_capacity;
1103 const double deadwood_density =
1104 (1.0 - cur_density) * (1.0 - cur_density) * cur_density * cur_density;
1105 const size_t deadwood_goal = size_t(space_capacity * deadwood_density);
1107 if (TraceParallelOldGCDensePrefix) {
1108 tty->print_cr("cur_dens=%5.3f dw_dens=%5.3f dw_goal=" SIZE_FORMAT,
1109 cur_density, deadwood_density, deadwood_goal);
1110 tty->print_cr("space_live=" SIZE_FORMAT " " "space_used=" SIZE_FORMAT " "
1111 "space_cap=" SIZE_FORMAT,
1112 space_live, space_used,
1113 space_capacity);
1114 }
1116 // XXX - Use binary search?
1117 HeapWord* dense_prefix = sd.chunk_to_addr(cp);
1118 const ChunkData* full_cp = cp;
1119 const ChunkData* const top_cp = sd.addr_to_chunk_ptr(space->top() - 1);
1120 while (cp < end_cp) {
1121 HeapWord* chunk_destination = cp->destination();
1122 const size_t cur_deadwood = pointer_delta(dense_prefix, chunk_destination);
1123 if (TraceParallelOldGCDensePrefix && Verbose) {
1124 tty->print_cr("c#=" SIZE_FORMAT_W("04") " dst=" PTR_FORMAT " "
1125 "dp=" SIZE_FORMAT_W("08") " " "cdw=" SIZE_FORMAT_W("08"),
1126 sd.chunk(cp), chunk_destination,
1127 dense_prefix, cur_deadwood);
1128 }
1130 if (cur_deadwood >= deadwood_goal) {
1131 // Found the chunk that has the correct amount of deadwood to the left.
1132 // This typically occurs after crossing a fairly sparse set of chunks, so
1133 // iterate backwards over those sparse chunks, looking for the chunk that
1134 // has the lowest density of live objects 'to the right.'
1135 size_t space_to_left = sd.chunk(cp) * chunk_size;
1136 size_t live_to_left = space_to_left - cur_deadwood;
1137 size_t space_to_right = space_capacity - space_to_left;
1138 size_t live_to_right = space_live - live_to_left;
1139 double density_to_right = double(live_to_right) / space_to_right;
1140 while (cp > full_cp) {
1141 --cp;
1142 const size_t prev_chunk_live_to_right = live_to_right - cp->data_size();
1143 const size_t prev_chunk_space_to_right = space_to_right + chunk_size;
1144 double prev_chunk_density_to_right =
1145 double(prev_chunk_live_to_right) / prev_chunk_space_to_right;
1146 if (density_to_right <= prev_chunk_density_to_right) {
1147 return dense_prefix;
1148 }
1149 if (TraceParallelOldGCDensePrefix && Verbose) {
1150 tty->print_cr("backing up from c=" SIZE_FORMAT_W("4") " d2r=%10.8f "
1151 "pc_d2r=%10.8f", sd.chunk(cp), density_to_right,
1152 prev_chunk_density_to_right);
1153 }
1154 dense_prefix -= chunk_size;
1155 live_to_right = prev_chunk_live_to_right;
1156 space_to_right = prev_chunk_space_to_right;
1157 density_to_right = prev_chunk_density_to_right;
1158 }
1159 return dense_prefix;
1160 }
1162 dense_prefix += chunk_size;
1163 ++cp;
1164 }
1166 return dense_prefix;
1167 }
1169 #ifndef PRODUCT
1170 void PSParallelCompact::print_dense_prefix_stats(const char* const algorithm,
1171 const SpaceId id,
1172 const bool maximum_compaction,
1173 HeapWord* const addr)
1174 {
1175 const size_t chunk_idx = summary_data().addr_to_chunk_idx(addr);
1176 ChunkData* const cp = summary_data().chunk(chunk_idx);
1177 const MutableSpace* const space = _space_info[id].space();
1178 HeapWord* const new_top = _space_info[id].new_top();
1180 const size_t space_live = pointer_delta(new_top, space->bottom());
1181 const size_t dead_to_left = pointer_delta(addr, cp->destination());
1182 const size_t space_cap = space->capacity_in_words();
1183 const double dead_to_left_pct = double(dead_to_left) / space_cap;
1184 const size_t live_to_right = new_top - cp->destination();
1185 const size_t dead_to_right = space->top() - addr - live_to_right;
1187 tty->print_cr("%s=" PTR_FORMAT " dpc=" SIZE_FORMAT_W("05") " "
1188 "spl=" SIZE_FORMAT " "
1189 "d2l=" SIZE_FORMAT " d2l%%=%6.4f "
1190 "d2r=" SIZE_FORMAT " l2r=" SIZE_FORMAT
1191 " ratio=%10.8f",
1192 algorithm, addr, chunk_idx,
1193 space_live,
1194 dead_to_left, dead_to_left_pct,
1195 dead_to_right, live_to_right,
1196 double(dead_to_right) / live_to_right);
1197 }
1198 #endif // #ifndef PRODUCT
1200 // Return a fraction indicating how much of the generation can be treated as
1201 // "dead wood" (i.e., not reclaimed). The function uses a normal distribution
1202 // based on the density of live objects in the generation to determine a limit,
1203 // which is then adjusted so the return value is min_percent when the density is
1204 // 1.
1205 //
1206 // The following table shows some return values for a different values of the
1207 // standard deviation (ParallelOldDeadWoodLimiterStdDev); the mean is 0.5 and
1208 // min_percent is 1.
1209 //
1210 // fraction allowed as dead wood
1211 // -----------------------------------------------------------------
1212 // density std_dev=70 std_dev=75 std_dev=80 std_dev=85 std_dev=90 std_dev=95
1213 // ------- ---------- ---------- ---------- ---------- ---------- ----------
1214 // 0.00000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000
1215 // 0.05000 0.03193096 0.02836880 0.02550828 0.02319280 0.02130337 0.01974941
1216 // 0.10000 0.05247504 0.04547452 0.03988045 0.03537016 0.03170171 0.02869272
1217 // 0.15000 0.07135702 0.06111390 0.05296419 0.04641639 0.04110601 0.03676066
1218 // 0.20000 0.08831616 0.07509618 0.06461766 0.05622444 0.04943437 0.04388975
1219 // 0.25000 0.10311208 0.08724696 0.07471205 0.06469760 0.05661313 0.05002313
1220 // 0.30000 0.11553050 0.09741183 0.08313394 0.07175114 0.06257797 0.05511132
1221 // 0.35000 0.12538832 0.10545958 0.08978741 0.07731366 0.06727491 0.05911289
1222 // 0.40000 0.13253818 0.11128511 0.09459590 0.08132834 0.07066107 0.06199500
1223 // 0.45000 0.13687208 0.11481163 0.09750361 0.08375387 0.07270534 0.06373386
1224 // 0.50000 0.13832410 0.11599237 0.09847664 0.08456518 0.07338887 0.06431510
1225 // 0.55000 0.13687208 0.11481163 0.09750361 0.08375387 0.07270534 0.06373386
1226 // 0.60000 0.13253818 0.11128511 0.09459590 0.08132834 0.07066107 0.06199500
1227 // 0.65000 0.12538832 0.10545958 0.08978741 0.07731366 0.06727491 0.05911289
1228 // 0.70000 0.11553050 0.09741183 0.08313394 0.07175114 0.06257797 0.05511132
1229 // 0.75000 0.10311208 0.08724696 0.07471205 0.06469760 0.05661313 0.05002313
1230 // 0.80000 0.08831616 0.07509618 0.06461766 0.05622444 0.04943437 0.04388975
1231 // 0.85000 0.07135702 0.06111390 0.05296419 0.04641639 0.04110601 0.03676066
1232 // 0.90000 0.05247504 0.04547452 0.03988045 0.03537016 0.03170171 0.02869272
1233 // 0.95000 0.03193096 0.02836880 0.02550828 0.02319280 0.02130337 0.01974941
1234 // 1.00000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000
1236 double PSParallelCompact::dead_wood_limiter(double density, size_t min_percent)
1237 {
1238 assert(_dwl_initialized, "uninitialized");
1240 // The raw limit is the value of the normal distribution at x = density.
1241 const double raw_limit = normal_distribution(density);
1243 // Adjust the raw limit so it becomes the minimum when the density is 1.
1244 //
1245 // First subtract the adjustment value (which is simply the precomputed value
1246 // normal_distribution(1.0)); this yields a value of 0 when the density is 1.
1247 // Then add the minimum value, so the minimum is returned when the density is
1248 // 1. Finally, prevent negative values, which occur when the mean is not 0.5.
1249 const double min = double(min_percent) / 100.0;
1250 const double limit = raw_limit - _dwl_adjustment + min;
1251 return MAX2(limit, 0.0);
1252 }
1254 ParallelCompactData::ChunkData*
1255 PSParallelCompact::first_dead_space_chunk(const ChunkData* beg,
1256 const ChunkData* end)
1257 {
1258 const size_t chunk_size = ParallelCompactData::ChunkSize;
1259 ParallelCompactData& sd = summary_data();
1260 size_t left = sd.chunk(beg);
1261 size_t right = end > beg ? sd.chunk(end) - 1 : left;
1263 // Binary search.
1264 while (left < right) {
1265 // Equivalent to (left + right) / 2, but does not overflow.
1266 const size_t middle = left + (right - left) / 2;
1267 ChunkData* const middle_ptr = sd.chunk(middle);
1268 HeapWord* const dest = middle_ptr->destination();
1269 HeapWord* const addr = sd.chunk_to_addr(middle);
1270 assert(dest != NULL, "sanity");
1271 assert(dest <= addr, "must move left");
1273 if (middle > left && dest < addr) {
1274 right = middle - 1;
1275 } else if (middle < right && middle_ptr->data_size() == chunk_size) {
1276 left = middle + 1;
1277 } else {
1278 return middle_ptr;
1279 }
1280 }
1281 return sd.chunk(left);
1282 }
1284 ParallelCompactData::ChunkData*
1285 PSParallelCompact::dead_wood_limit_chunk(const ChunkData* beg,
1286 const ChunkData* end,
1287 size_t dead_words)
1288 {
1289 ParallelCompactData& sd = summary_data();
1290 size_t left = sd.chunk(beg);
1291 size_t right = end > beg ? sd.chunk(end) - 1 : left;
1293 // Binary search.
1294 while (left < right) {
1295 // Equivalent to (left + right) / 2, but does not overflow.
1296 const size_t middle = left + (right - left) / 2;
1297 ChunkData* const middle_ptr = sd.chunk(middle);
1298 HeapWord* const dest = middle_ptr->destination();
1299 HeapWord* const addr = sd.chunk_to_addr(middle);
1300 assert(dest != NULL, "sanity");
1301 assert(dest <= addr, "must move left");
1303 const size_t dead_to_left = pointer_delta(addr, dest);
1304 if (middle > left && dead_to_left > dead_words) {
1305 right = middle - 1;
1306 } else if (middle < right && dead_to_left < dead_words) {
1307 left = middle + 1;
1308 } else {
1309 return middle_ptr;
1310 }
1311 }
1312 return sd.chunk(left);
1313 }
1315 // The result is valid during the summary phase, after the initial summarization
1316 // of each space into itself, and before final summarization.
1317 inline double
1318 PSParallelCompact::reclaimed_ratio(const ChunkData* const cp,
1319 HeapWord* const bottom,
1320 HeapWord* const top,
1321 HeapWord* const new_top)
1322 {
1323 ParallelCompactData& sd = summary_data();
1325 assert(cp != NULL, "sanity");
1326 assert(bottom != NULL, "sanity");
1327 assert(top != NULL, "sanity");
1328 assert(new_top != NULL, "sanity");
1329 assert(top >= new_top, "summary data problem?");
1330 assert(new_top > bottom, "space is empty; should not be here");
1331 assert(new_top >= cp->destination(), "sanity");
1332 assert(top >= sd.chunk_to_addr(cp), "sanity");
1334 HeapWord* const destination = cp->destination();
1335 const size_t dense_prefix_live = pointer_delta(destination, bottom);
1336 const size_t compacted_region_live = pointer_delta(new_top, destination);
1337 const size_t compacted_region_used = pointer_delta(top, sd.chunk_to_addr(cp));
1338 const size_t reclaimable = compacted_region_used - compacted_region_live;
1340 const double divisor = dense_prefix_live + 1.25 * compacted_region_live;
1341 return double(reclaimable) / divisor;
1342 }
1344 // Return the address of the end of the dense prefix, a.k.a. the start of the
1345 // compacted region. The address is always on a chunk boundary.
1346 //
1347 // Completely full chunks at the left are skipped, since no compaction can occur
1348 // in those chunks. Then the maximum amount of dead wood to allow is computed,
1349 // based on the density (amount live / capacity) of the generation; the chunk
1350 // with approximately that amount of dead space to the left is identified as the
1351 // limit chunk. Chunks between the last completely full chunk and the limit
1352 // chunk are scanned and the one that has the best (maximum) reclaimed_ratio()
1353 // is selected.
1354 HeapWord*
1355 PSParallelCompact::compute_dense_prefix(const SpaceId id,
1356 bool maximum_compaction)
1357 {
1358 const size_t chunk_size = ParallelCompactData::ChunkSize;
1359 const ParallelCompactData& sd = summary_data();
1361 const MutableSpace* const space = _space_info[id].space();
1362 HeapWord* const top = space->top();
1363 HeapWord* const top_aligned_up = sd.chunk_align_up(top);
1364 HeapWord* const new_top = _space_info[id].new_top();
1365 HeapWord* const new_top_aligned_up = sd.chunk_align_up(new_top);
1366 HeapWord* const bottom = space->bottom();
1367 const ChunkData* const beg_cp = sd.addr_to_chunk_ptr(bottom);
1368 const ChunkData* const top_cp = sd.addr_to_chunk_ptr(top_aligned_up);
1369 const ChunkData* const new_top_cp = sd.addr_to_chunk_ptr(new_top_aligned_up);
1371 // Skip full chunks at the beginning of the space--they are necessarily part
1372 // of the dense prefix.
1373 const ChunkData* const full_cp = first_dead_space_chunk(beg_cp, new_top_cp);
1374 assert(full_cp->destination() == sd.chunk_to_addr(full_cp) ||
1375 space->is_empty(), "no dead space allowed to the left");
1376 assert(full_cp->data_size() < chunk_size || full_cp == new_top_cp - 1,
1377 "chunk must have dead space");
1379 // The gc number is saved whenever a maximum compaction is done, and used to
1380 // determine when the maximum compaction interval has expired. This avoids
1381 // successive max compactions for different reasons.
1382 assert(total_invocations() >= _maximum_compaction_gc_num, "sanity");
1383 const size_t gcs_since_max = total_invocations() - _maximum_compaction_gc_num;
1384 const bool interval_ended = gcs_since_max > HeapMaximumCompactionInterval ||
1385 total_invocations() == HeapFirstMaximumCompactionCount;
1386 if (maximum_compaction || full_cp == top_cp || interval_ended) {
1387 _maximum_compaction_gc_num = total_invocations();
1388 return sd.chunk_to_addr(full_cp);
1389 }
1391 const size_t space_live = pointer_delta(new_top, bottom);
1392 const size_t space_used = space->used_in_words();
1393 const size_t space_capacity = space->capacity_in_words();
1395 const double density = double(space_live) / double(space_capacity);
1396 const size_t min_percent_free =
1397 id == perm_space_id ? PermMarkSweepDeadRatio : MarkSweepDeadRatio;
1398 const double limiter = dead_wood_limiter(density, min_percent_free);
1399 const size_t dead_wood_max = space_used - space_live;
1400 const size_t dead_wood_limit = MIN2(size_t(space_capacity * limiter),
1401 dead_wood_max);
1403 if (TraceParallelOldGCDensePrefix) {
1404 tty->print_cr("space_live=" SIZE_FORMAT " " "space_used=" SIZE_FORMAT " "
1405 "space_cap=" SIZE_FORMAT,
1406 space_live, space_used,
1407 space_capacity);
1408 tty->print_cr("dead_wood_limiter(%6.4f, %d)=%6.4f "
1409 "dead_wood_max=" SIZE_FORMAT " dead_wood_limit=" SIZE_FORMAT,
1410 density, min_percent_free, limiter,
1411 dead_wood_max, dead_wood_limit);
1412 }
1414 // Locate the chunk with the desired amount of dead space to the left.
1415 const ChunkData* const limit_cp =
1416 dead_wood_limit_chunk(full_cp, top_cp, dead_wood_limit);
1418 // Scan from the first chunk with dead space to the limit chunk and find the
1419 // one with the best (largest) reclaimed ratio.
1420 double best_ratio = 0.0;
1421 const ChunkData* best_cp = full_cp;
1422 for (const ChunkData* cp = full_cp; cp < limit_cp; ++cp) {
1423 double tmp_ratio = reclaimed_ratio(cp, bottom, top, new_top);
1424 if (tmp_ratio > best_ratio) {
1425 best_cp = cp;
1426 best_ratio = tmp_ratio;
1427 }
1428 }
1430 #if 0
1431 // Something to consider: if the chunk with the best ratio is 'close to' the
1432 // first chunk w/free space, choose the first chunk with free space
1433 // ("first-free"). The first-free chunk is usually near the start of the
1434 // heap, which means we are copying most of the heap already, so copy a bit
1435 // more to get complete compaction.
1436 if (pointer_delta(best_cp, full_cp, sizeof(ChunkData)) < 4) {
1437 _maximum_compaction_gc_num = total_invocations();
1438 best_cp = full_cp;
1439 }
1440 #endif // #if 0
1442 return sd.chunk_to_addr(best_cp);
1443 }
1445 void PSParallelCompact::summarize_spaces_quick()
1446 {
1447 for (unsigned int i = 0; i < last_space_id; ++i) {
1448 const MutableSpace* space = _space_info[i].space();
1449 bool result = _summary_data.summarize(space->bottom(), space->end(),
1450 space->bottom(), space->top(),
1451 _space_info[i].new_top_addr());
1452 assert(result, "should never fail");
1453 _space_info[i].set_dense_prefix(space->bottom());
1454 }
1455 }
1457 void PSParallelCompact::fill_dense_prefix_end(SpaceId id)
1458 {
1459 HeapWord* const dense_prefix_end = dense_prefix(id);
1460 const ChunkData* chunk = _summary_data.addr_to_chunk_ptr(dense_prefix_end);
1461 const idx_t dense_prefix_bit = _mark_bitmap.addr_to_bit(dense_prefix_end);
1462 if (dead_space_crosses_boundary(chunk, dense_prefix_bit)) {
1463 // Only enough dead space is filled so that any remaining dead space to the
1464 // left is larger than the minimum filler object. (The remainder is filled
1465 // during the copy/update phase.)
1466 //
1467 // The size of the dead space to the right of the boundary is not a
1468 // concern, since compaction will be able to use whatever space is
1469 // available.
1470 //
1471 // Here '||' is the boundary, 'x' represents a don't care bit and a box
1472 // surrounds the space to be filled with an object.
1473 //
1474 // In the 32-bit VM, each bit represents two 32-bit words:
1475 // +---+
1476 // a) beg_bits: ... x x x | 0 | || 0 x x ...
1477 // end_bits: ... x x x | 0 | || 0 x x ...
1478 // +---+
1479 //
1480 // In the 64-bit VM, each bit represents one 64-bit word:
1481 // +------------+
1482 // b) beg_bits: ... x x x | 0 || 0 | x x ...
1483 // end_bits: ... x x 1 | 0 || 0 | x x ...
1484 // +------------+
1485 // +-------+
1486 // c) beg_bits: ... x x | 0 0 | || 0 x x ...
1487 // end_bits: ... x 1 | 0 0 | || 0 x x ...
1488 // +-------+
1489 // +-----------+
1490 // d) beg_bits: ... x | 0 0 0 | || 0 x x ...
1491 // end_bits: ... 1 | 0 0 0 | || 0 x x ...
1492 // +-----------+
1493 // +-------+
1494 // e) beg_bits: ... 0 0 | 0 0 | || 0 x x ...
1495 // end_bits: ... 0 0 | 0 0 | || 0 x x ...
1496 // +-------+
1498 // Initially assume case a, c or e will apply.
1499 size_t obj_len = (size_t)oopDesc::header_size();
1500 HeapWord* obj_beg = dense_prefix_end - obj_len;
1502 #ifdef _LP64
1503 if (_mark_bitmap.is_obj_end(dense_prefix_bit - 2)) {
1504 // Case b above.
1505 obj_beg = dense_prefix_end - 1;
1506 } else if (!_mark_bitmap.is_obj_end(dense_prefix_bit - 3) &&
1507 _mark_bitmap.is_obj_end(dense_prefix_bit - 4)) {
1508 // Case d above.
1509 obj_beg = dense_prefix_end - 3;
1510 obj_len = 3;
1511 }
1512 #endif // #ifdef _LP64
1514 MemRegion region(obj_beg, obj_len);
1515 SharedHeap::fill_region_with_object(region);
1516 _mark_bitmap.mark_obj(obj_beg, obj_len);
1517 _summary_data.add_obj(obj_beg, obj_len);
1518 assert(start_array(id) != NULL, "sanity");
1519 start_array(id)->allocate_block(obj_beg);
1520 }
1521 }
1523 void
1524 PSParallelCompact::summarize_space(SpaceId id, bool maximum_compaction)
1525 {
1526 assert(id < last_space_id, "id out of range");
1528 const MutableSpace* space = _space_info[id].space();
1529 HeapWord** new_top_addr = _space_info[id].new_top_addr();
1531 HeapWord* dense_prefix_end = compute_dense_prefix(id, maximum_compaction);
1532 _space_info[id].set_dense_prefix(dense_prefix_end);
1534 #ifndef PRODUCT
1535 if (TraceParallelOldGCDensePrefix) {
1536 print_dense_prefix_stats("ratio", id, maximum_compaction, dense_prefix_end);
1537 HeapWord* addr = compute_dense_prefix_via_density(id, maximum_compaction);
1538 print_dense_prefix_stats("density", id, maximum_compaction, addr);
1539 }
1540 #endif // #ifndef PRODUCT
1542 // If dead space crosses the dense prefix boundary, it is (at least partially)
1543 // filled with a dummy object, marked live and added to the summary data.
1544 // This simplifies the copy/update phase and must be done before the final
1545 // locations of objects are determined, to prevent leaving a fragment of dead
1546 // space that is too small to fill with an object.
1547 if (!maximum_compaction && dense_prefix_end != space->bottom()) {
1548 fill_dense_prefix_end(id);
1549 }
1551 // Compute the destination of each Chunk, and thus each object.
1552 _summary_data.summarize_dense_prefix(space->bottom(), dense_prefix_end);
1553 _summary_data.summarize(dense_prefix_end, space->end(),
1554 dense_prefix_end, space->top(),
1555 new_top_addr);
1557 if (TraceParallelOldGCSummaryPhase) {
1558 const size_t chunk_size = ParallelCompactData::ChunkSize;
1559 const size_t dp_chunk = _summary_data.addr_to_chunk_idx(dense_prefix_end);
1560 const size_t dp_words = pointer_delta(dense_prefix_end, space->bottom());
1561 const HeapWord* nt_aligned_up = _summary_data.chunk_align_up(*new_top_addr);
1562 const size_t cr_words = pointer_delta(nt_aligned_up, dense_prefix_end);
1563 tty->print_cr("id=%d cap=" SIZE_FORMAT " dp=" PTR_FORMAT " "
1564 "dp_chunk=" SIZE_FORMAT " " "dp_count=" SIZE_FORMAT " "
1565 "cr_count=" SIZE_FORMAT " " "nt=" PTR_FORMAT,
1566 id, space->capacity_in_words(), dense_prefix_end,
1567 dp_chunk, dp_words / chunk_size,
1568 cr_words / chunk_size, *new_top_addr);
1569 }
1570 }
1572 void PSParallelCompact::summary_phase(ParCompactionManager* cm,
1573 bool maximum_compaction)
1574 {
1575 EventMark m("2 summarize");
1576 TraceTime tm("summary phase", print_phases(), true, gclog_or_tty);
1577 // trace("2");
1579 #ifdef ASSERT
1580 if (VerifyParallelOldWithMarkSweep &&
1581 (PSParallelCompact::total_invocations() %
1582 VerifyParallelOldWithMarkSweepInterval) == 0) {
1583 verify_mark_bitmap(_mark_bitmap);
1584 }
1585 if (TraceParallelOldGCMarkingPhase) {
1586 tty->print_cr("add_obj_count=" SIZE_FORMAT " "
1587 "add_obj_bytes=" SIZE_FORMAT,
1588 add_obj_count, add_obj_size * HeapWordSize);
1589 tty->print_cr("mark_bitmap_count=" SIZE_FORMAT " "
1590 "mark_bitmap_bytes=" SIZE_FORMAT,
1591 mark_bitmap_count, mark_bitmap_size * HeapWordSize);
1592 }
1593 #endif // #ifdef ASSERT
1595 // Quick summarization of each space into itself, to see how much is live.
1596 summarize_spaces_quick();
1598 if (TraceParallelOldGCSummaryPhase) {
1599 tty->print_cr("summary_phase: after summarizing each space to self");
1600 Universe::print();
1601 NOT_PRODUCT(print_chunk_ranges());
1602 if (Verbose) {
1603 NOT_PRODUCT(print_initial_summary_data(_summary_data, _space_info));
1604 }
1605 }
1607 // The amount of live data that will end up in old space (assuming it fits).
1608 size_t old_space_total_live = 0;
1609 unsigned int id;
1610 for (id = old_space_id; id < last_space_id; ++id) {
1611 old_space_total_live += pointer_delta(_space_info[id].new_top(),
1612 _space_info[id].space()->bottom());
1613 }
1615 const MutableSpace* old_space = _space_info[old_space_id].space();
1616 if (old_space_total_live > old_space->capacity_in_words()) {
1617 // XXX - should also try to expand
1618 maximum_compaction = true;
1619 } else if (!UseParallelOldGCDensePrefix) {
1620 maximum_compaction = true;
1621 }
1623 // Permanent and Old generations.
1624 summarize_space(perm_space_id, maximum_compaction);
1625 summarize_space(old_space_id, maximum_compaction);
1627 // Summarize the remaining spaces (those in the young gen) into old space. If
1628 // the live data from a space doesn't fit, the existing summarization is left
1629 // intact, so the data is compacted down within the space itself.
1630 HeapWord** new_top_addr = _space_info[old_space_id].new_top_addr();
1631 HeapWord* const target_space_end = old_space->end();
1632 for (id = eden_space_id; id < last_space_id; ++id) {
1633 const MutableSpace* space = _space_info[id].space();
1634 const size_t live = pointer_delta(_space_info[id].new_top(),
1635 space->bottom());
1636 const size_t available = pointer_delta(target_space_end, *new_top_addr);
1637 if (live <= available) {
1638 // All the live data will fit.
1639 if (TraceParallelOldGCSummaryPhase) {
1640 tty->print_cr("summarizing %d into old_space @ " PTR_FORMAT,
1641 id, *new_top_addr);
1642 }
1643 _summary_data.summarize(*new_top_addr, target_space_end,
1644 space->bottom(), space->top(),
1645 new_top_addr);
1647 // Reset the new_top value for the space.
1648 _space_info[id].set_new_top(space->bottom());
1650 // Clear the source_chunk field for each chunk in the space.
1651 ChunkData* beg_chunk = _summary_data.addr_to_chunk_ptr(space->bottom());
1652 ChunkData* end_chunk = _summary_data.addr_to_chunk_ptr(space->top() - 1);
1653 while (beg_chunk <= end_chunk) {
1654 beg_chunk->set_source_chunk(0);
1655 ++beg_chunk;
1656 }
1657 }
1658 }
1660 // Fill in the block data after any changes to the chunks have
1661 // been made.
1662 #ifdef ASSERT
1663 summarize_blocks(cm, perm_space_id);
1664 summarize_blocks(cm, old_space_id);
1665 #else
1666 if (!UseParallelOldGCChunkPointerCalc) {
1667 summarize_blocks(cm, perm_space_id);
1668 summarize_blocks(cm, old_space_id);
1669 }
1670 #endif
1672 if (TraceParallelOldGCSummaryPhase) {
1673 tty->print_cr("summary_phase: after final summarization");
1674 Universe::print();
1675 NOT_PRODUCT(print_chunk_ranges());
1676 if (Verbose) {
1677 NOT_PRODUCT(print_generic_summary_data(_summary_data, _space_info));
1678 }
1679 }
1680 }
1682 // Fill in the BlockData.
1683 // Iterate over the spaces and within each space iterate over
1684 // the chunks and fill in the BlockData for each chunk.
1686 void PSParallelCompact::summarize_blocks(ParCompactionManager* cm,
1687 SpaceId first_compaction_space_id) {
1688 #if 0
1689 DEBUG_ONLY(ParallelCompactData::BlockData::set_cur_phase(1);)
1690 for (SpaceId cur_space_id = first_compaction_space_id;
1691 cur_space_id != last_space_id;
1692 cur_space_id = next_compaction_space_id(cur_space_id)) {
1693 // Iterate over the chunks in the space
1694 size_t start_chunk_index =
1695 _summary_data.addr_to_chunk_idx(space(cur_space_id)->bottom());
1696 BitBlockUpdateClosure bbu(mark_bitmap(),
1697 cm,
1698 start_chunk_index);
1699 // Iterate over blocks.
1700 for (size_t chunk_index = start_chunk_index;
1701 chunk_index < _summary_data.chunk_count() &&
1702 _summary_data.chunk_to_addr(chunk_index) < space(cur_space_id)->top();
1703 chunk_index++) {
1705 // Reset the closure for the new chunk. Note that the closure
1706 // maintains some data that does not get reset for each chunk
1707 // so a new instance of the closure is no appropriate.
1708 bbu.reset_chunk(chunk_index);
1710 // Start the iteration with the first live object. This
1711 // may return the end of the chunk. That is acceptable since
1712 // it will properly limit the iterations.
1713 ParMarkBitMap::idx_t left_offset = mark_bitmap()->addr_to_bit(
1714 _summary_data.first_live_or_end_in_chunk(chunk_index));
1716 // End the iteration at the end of the chunk.
1717 HeapWord* chunk_addr = _summary_data.chunk_to_addr(chunk_index);
1718 HeapWord* chunk_end = chunk_addr + ParallelCompactData::ChunkSize;
1719 ParMarkBitMap::idx_t right_offset =
1720 mark_bitmap()->addr_to_bit(chunk_end);
1722 // Blocks that have not objects starting in them can be
1723 // skipped because their data will never be used.
1724 if (left_offset < right_offset) {
1726 // Iterate through the objects in the chunk.
1727 ParMarkBitMap::idx_t last_offset =
1728 mark_bitmap()->pair_iterate(&bbu, left_offset, right_offset);
1730 // If last_offset is less than right_offset, then the iterations
1731 // terminated while it was looking for an end bit. "last_offset"
1732 // is then the offset for the last start bit. In this situation
1733 // the "offset" field for the next block to the right (_cur_block + 1)
1734 // will not have been update although there may be live data
1735 // to the left of the chunk.
1737 size_t cur_block_plus_1 = bbu.cur_block() + 1;
1738 HeapWord* cur_block_plus_1_addr =
1739 _summary_data.block_to_addr(bbu.cur_block()) +
1740 ParallelCompactData::BlockSize;
1741 HeapWord* last_offset_addr = mark_bitmap()->bit_to_addr(last_offset);
1742 #if 1 // This code works. The else doesn't but should. Why does it?
1743 // The current block (cur_block()) has already been updated.
1744 // The last block that may need to be updated is either the
1745 // next block (current block + 1) or the block where the
1746 // last object starts (which can be greater than the
1747 // next block if there were no objects found in intervening
1748 // blocks).
1749 size_t last_block =
1750 MAX2(bbu.cur_block() + 1,
1751 _summary_data.addr_to_block_idx(last_offset_addr));
1752 #else
1753 // The current block has already been updated. The only block
1754 // that remains to be updated is the block where the last
1755 // object in the chunk starts.
1756 size_t last_block = _summary_data.addr_to_block_idx(last_offset_addr);
1757 #endif
1758 assert_bit_is_start(last_offset);
1759 assert((last_block == _summary_data.block_count()) ||
1760 (_summary_data.block(last_block)->raw_offset() == 0),
1761 "Should not have been set");
1762 // Is the last block still in the current chunk? If still
1763 // in this chunk, update the last block (the counting that
1764 // included the current block is meant for the offset of the last
1765 // block). If not in this chunk, do nothing. Should not
1766 // update a block in the next chunk.
1767 if (ParallelCompactData::chunk_contains_block(bbu.chunk_index(),
1768 last_block)) {
1769 if (last_offset < right_offset) {
1770 // The last object started in this chunk but ends beyond
1771 // this chunk. Update the block for this last object.
1772 assert(mark_bitmap()->is_marked(last_offset), "Should be marked");
1773 // No end bit was found. The closure takes care of
1774 // the cases where
1775 // an objects crosses over into the next block
1776 // an objects starts and ends in the next block
1777 // It does not handle the case where an object is
1778 // the first object in a later block and extends
1779 // past the end of the chunk (i.e., the closure
1780 // only handles complete objects that are in the range
1781 // it is given). That object is handed back here
1782 // for any special consideration necessary.
1783 //
1784 // Is the first bit in the last block a start or end bit?
1785 //
1786 // If the partial object ends in the last block L,
1787 // then the 1st bit in L may be an end bit.
1788 //
1789 // Else does the last object start in a block after the current
1790 // block? A block AA will already have been updated if an
1791 // object ends in the next block AA+1. An object found to end in
1792 // the AA+1 is the trigger that updates AA. Objects are being
1793 // counted in the current block for updaing a following
1794 // block. An object may start in later block
1795 // block but may extend beyond the last block in the chunk.
1796 // Updates are only done when the end of an object has been
1797 // found. If the last object (covered by block L) starts
1798 // beyond the current block, then no object ends in L (otherwise
1799 // L would be the current block). So the first bit in L is
1800 // a start bit.
1801 //
1802 // Else the last objects start in the current block and ends
1803 // beyond the chunk. The current block has already been
1804 // updated and there is no later block (with an object
1805 // starting in it) that needs to be updated.
1806 //
1807 if (_summary_data.partial_obj_ends_in_block(last_block)) {
1808 _summary_data.block(last_block)->set_end_bit_offset(
1809 bbu.live_data_left());
1810 } else if (last_offset_addr >= cur_block_plus_1_addr) {
1811 // The start of the object is on a later block
1812 // (to the right of the current block and there are no
1813 // complete live objects to the left of this last object
1814 // within the chunk.
1815 // The first bit in the block is for the start of the
1816 // last object.
1817 _summary_data.block(last_block)->set_start_bit_offset(
1818 bbu.live_data_left());
1819 } else {
1820 // The start of the last object was found in
1821 // the current chunk (which has already
1822 // been updated).
1823 assert(bbu.cur_block() ==
1824 _summary_data.addr_to_block_idx(last_offset_addr),
1825 "Should be a block already processed");
1826 }
1827 #ifdef ASSERT
1828 // Is there enough block information to find this object?
1829 // The destination of the chunk has not been set so the
1830 // values returned by calc_new_pointer() and
1831 // block_calc_new_pointer() will only be
1832 // offsets. But they should agree.
1833 HeapWord* moved_obj_with_chunks =
1834 _summary_data.chunk_calc_new_pointer(last_offset_addr);
1835 HeapWord* moved_obj_with_blocks =
1836 _summary_data.calc_new_pointer(last_offset_addr);
1837 assert(moved_obj_with_chunks == moved_obj_with_blocks,
1838 "Block calculation is wrong");
1839 #endif
1840 } else if (last_block < _summary_data.block_count()) {
1841 // Iterations ended looking for a start bit (but
1842 // did not run off the end of the block table).
1843 _summary_data.block(last_block)->set_start_bit_offset(
1844 bbu.live_data_left());
1845 }
1846 }
1847 #ifdef ASSERT
1848 // Is there enough block information to find this object?
1849 HeapWord* left_offset_addr = mark_bitmap()->bit_to_addr(left_offset);
1850 HeapWord* moved_obj_with_chunks =
1851 _summary_data.calc_new_pointer(left_offset_addr);
1852 HeapWord* moved_obj_with_blocks =
1853 _summary_data.calc_new_pointer(left_offset_addr);
1854 assert(moved_obj_with_chunks == moved_obj_with_blocks,
1855 "Block calculation is wrong");
1856 #endif
1858 // Is there another block after the end of this chunk?
1859 #ifdef ASSERT
1860 if (last_block < _summary_data.block_count()) {
1861 // No object may have been found in a block. If that
1862 // block is at the end of the chunk, the iteration will
1863 // terminate without incrementing the current block so
1864 // that the current block is not the last block in the
1865 // chunk. That situation precludes asserting that the
1866 // current block is the last block in the chunk. Assert
1867 // the lesser condition that the current block does not
1868 // exceed the chunk.
1869 assert(_summary_data.block_to_addr(last_block) <=
1870 (_summary_data.chunk_to_addr(chunk_index) +
1871 ParallelCompactData::ChunkSize),
1872 "Chunk and block inconsistency");
1873 assert(last_offset <= right_offset, "Iteration over ran end");
1874 }
1875 #endif
1876 }
1877 #ifdef ASSERT
1878 if (PrintGCDetails && Verbose) {
1879 if (_summary_data.chunk(chunk_index)->partial_obj_size() == 1) {
1880 size_t first_block =
1881 chunk_index / ParallelCompactData::BlocksPerChunk;
1882 gclog_or_tty->print_cr("first_block " PTR_FORMAT
1883 " _offset " PTR_FORMAT
1884 "_first_is_start_bit %d",
1885 first_block,
1886 _summary_data.block(first_block)->raw_offset(),
1887 _summary_data.block(first_block)->first_is_start_bit());
1888 }
1889 }
1890 #endif
1891 }
1892 }
1893 DEBUG_ONLY(ParallelCompactData::BlockData::set_cur_phase(16);)
1894 #endif // #if 0
1895 }
1897 // This method should contain all heap-specific policy for invoking a full
1898 // collection. invoke_no_policy() will only attempt to compact the heap; it
1899 // will do nothing further. If we need to bail out for policy reasons, scavenge
1900 // before full gc, or any other specialized behavior, it needs to be added here.
1901 //
1902 // Note that this method should only be called from the vm_thread while at a
1903 // safepoint.
1904 void PSParallelCompact::invoke(bool maximum_heap_compaction) {
1905 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
1906 assert(Thread::current() == (Thread*)VMThread::vm_thread(),
1907 "should be in vm thread");
1908 ParallelScavengeHeap* heap = gc_heap();
1909 GCCause::Cause gc_cause = heap->gc_cause();
1910 assert(!heap->is_gc_active(), "not reentrant");
1912 PSAdaptiveSizePolicy* policy = heap->size_policy();
1914 // Before each allocation/collection attempt, find out from the
1915 // policy object if GCs are, on the whole, taking too long. If so,
1916 // bail out without attempting a collection. The exceptions are
1917 // for explicitly requested GC's.
1918 if (!policy->gc_time_limit_exceeded() ||
1919 GCCause::is_user_requested_gc(gc_cause) ||
1920 GCCause::is_serviceability_requested_gc(gc_cause)) {
1921 IsGCActiveMark mark;
1923 if (ScavengeBeforeFullGC) {
1924 PSScavenge::invoke_no_policy();
1925 }
1927 PSParallelCompact::invoke_no_policy(maximum_heap_compaction);
1928 }
1929 }
1931 bool ParallelCompactData::chunk_contains(size_t chunk_index, HeapWord* addr) {
1932 size_t addr_chunk_index = addr_to_chunk_idx(addr);
1933 return chunk_index == addr_chunk_index;
1934 }
1936 bool ParallelCompactData::chunk_contains_block(size_t chunk_index,
1937 size_t block_index) {
1938 size_t first_block_in_chunk = chunk_index * BlocksPerChunk;
1939 size_t last_block_in_chunk = (chunk_index + 1) * BlocksPerChunk - 1;
1941 return (first_block_in_chunk <= block_index) &&
1942 (block_index <= last_block_in_chunk);
1943 }
1945 // This method contains no policy. You should probably
1946 // be calling invoke() instead.
1947 void PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
1948 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
1949 assert(ref_processor() != NULL, "Sanity");
1951 if (GC_locker::check_active_before_gc()) {
1952 return;
1953 }
1955 TimeStamp marking_start;
1956 TimeStamp compaction_start;
1957 TimeStamp collection_exit;
1959 ParallelScavengeHeap* heap = gc_heap();
1960 GCCause::Cause gc_cause = heap->gc_cause();
1961 PSYoungGen* young_gen = heap->young_gen();
1962 PSOldGen* old_gen = heap->old_gen();
1963 PSPermGen* perm_gen = heap->perm_gen();
1964 PSAdaptiveSizePolicy* size_policy = heap->size_policy();
1966 if (ZapUnusedHeapArea) {
1967 // Save information needed to minimize mangling
1968 heap->record_gen_tops_before_GC();
1969 }
1971 _print_phases = PrintGCDetails && PrintParallelOldGCPhaseTimes;
1973 // Make sure data structures are sane, make the heap parsable, and do other
1974 // miscellaneous bookkeeping.
1975 PreGCValues pre_gc_values;
1976 pre_compact(&pre_gc_values);
1978 // Get the compaction manager reserved for the VM thread.
1979 ParCompactionManager* const vmthread_cm =
1980 ParCompactionManager::manager_array(gc_task_manager()->workers());
1982 // Place after pre_compact() where the number of invocations is incremented.
1983 AdaptiveSizePolicyOutput(size_policy, heap->total_collections());
1985 {
1986 ResourceMark rm;
1987 HandleMark hm;
1989 const bool is_system_gc = gc_cause == GCCause::_java_lang_system_gc;
1991 // This is useful for debugging but don't change the output the
1992 // the customer sees.
1993 const char* gc_cause_str = "Full GC";
1994 if (is_system_gc && PrintGCDetails) {
1995 gc_cause_str = "Full GC (System)";
1996 }
1997 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
1998 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
1999 TraceTime t1(gc_cause_str, PrintGC, !PrintGCDetails, gclog_or_tty);
2000 TraceCollectorStats tcs(counters());
2001 TraceMemoryManagerStats tms(true /* Full GC */);
2003 if (TraceGen1Time) accumulated_time()->start();
2005 // Let the size policy know we're starting
2006 size_policy->major_collection_begin();
2008 // When collecting the permanent generation methodOops may be moving,
2009 // so we either have to flush all bcp data or convert it into bci.
2010 CodeCache::gc_prologue();
2011 Threads::gc_prologue();
2013 NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
2014 COMPILER2_PRESENT(DerivedPointerTable::clear());
2016 ref_processor()->enable_discovery();
2018 bool marked_for_unloading = false;
2020 marking_start.update();
2021 marking_phase(vmthread_cm, maximum_heap_compaction);
2023 #ifndef PRODUCT
2024 if (TraceParallelOldGCMarkingPhase) {
2025 gclog_or_tty->print_cr("marking_phase: cas_tries %d cas_retries %d "
2026 "cas_by_another %d",
2027 mark_bitmap()->cas_tries(), mark_bitmap()->cas_retries(),
2028 mark_bitmap()->cas_by_another());
2029 }
2030 #endif // #ifndef PRODUCT
2032 #ifdef ASSERT
2033 if (VerifyParallelOldWithMarkSweep &&
2034 (PSParallelCompact::total_invocations() %
2035 VerifyParallelOldWithMarkSweepInterval) == 0) {
2036 gclog_or_tty->print_cr("Verify marking with mark_sweep_phase1()");
2037 if (PrintGCDetails && Verbose) {
2038 gclog_or_tty->print_cr("mark_sweep_phase1:");
2039 }
2040 // Clear the discovered lists so that discovered objects
2041 // don't look like they have been discovered twice.
2042 ref_processor()->clear_discovered_references();
2044 PSMarkSweep::allocate_stacks();
2045 MemRegion mr = Universe::heap()->reserved_region();
2046 PSMarkSweep::ref_processor()->enable_discovery();
2047 PSMarkSweep::mark_sweep_phase1(maximum_heap_compaction);
2048 }
2049 #endif
2051 bool max_on_system_gc = UseMaximumCompactionOnSystemGC && is_system_gc;
2052 summary_phase(vmthread_cm, maximum_heap_compaction || max_on_system_gc);
2054 #ifdef ASSERT
2055 if (VerifyParallelOldWithMarkSweep &&
2056 (PSParallelCompact::total_invocations() %
2057 VerifyParallelOldWithMarkSweepInterval) == 0) {
2058 if (PrintGCDetails && Verbose) {
2059 gclog_or_tty->print_cr("mark_sweep_phase2:");
2060 }
2061 PSMarkSweep::mark_sweep_phase2();
2062 }
2063 #endif
2065 COMPILER2_PRESENT(assert(DerivedPointerTable::is_active(), "Sanity"));
2066 COMPILER2_PRESENT(DerivedPointerTable::set_active(false));
2068 // adjust_roots() updates Universe::_intArrayKlassObj which is
2069 // needed by the compaction for filling holes in the dense prefix.
2070 adjust_roots();
2072 #ifdef ASSERT
2073 if (VerifyParallelOldWithMarkSweep &&
2074 (PSParallelCompact::total_invocations() %
2075 VerifyParallelOldWithMarkSweepInterval) == 0) {
2076 // Do a separate verify phase so that the verify
2077 // code can use the the forwarding pointers to
2078 // check the new pointer calculation. The restore_marks()
2079 // has to be done before the real compact.
2080 vmthread_cm->set_action(ParCompactionManager::VerifyUpdate);
2081 compact_perm(vmthread_cm);
2082 compact_serial(vmthread_cm);
2083 vmthread_cm->set_action(ParCompactionManager::ResetObjects);
2084 compact_perm(vmthread_cm);
2085 compact_serial(vmthread_cm);
2086 vmthread_cm->set_action(ParCompactionManager::UpdateAndCopy);
2088 // For debugging only
2089 PSMarkSweep::restore_marks();
2090 PSMarkSweep::deallocate_stacks();
2091 }
2092 #endif
2094 compaction_start.update();
2095 // Does the perm gen always have to be done serially because
2096 // klasses are used in the update of an object?
2097 compact_perm(vmthread_cm);
2099 if (UseParallelOldGCCompacting) {
2100 compact();
2101 } else {
2102 compact_serial(vmthread_cm);
2103 }
2105 // Reset the mark bitmap, summary data, and do other bookkeeping. Must be
2106 // done before resizing.
2107 post_compact();
2109 // Let the size policy know we're done
2110 size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause);
2112 if (UseAdaptiveSizePolicy) {
2113 if (PrintAdaptiveSizePolicy) {
2114 gclog_or_tty->print("AdaptiveSizeStart: ");
2115 gclog_or_tty->stamp();
2116 gclog_or_tty->print_cr(" collection: %d ",
2117 heap->total_collections());
2118 if (Verbose) {
2119 gclog_or_tty->print("old_gen_capacity: %d young_gen_capacity: %d"
2120 " perm_gen_capacity: %d ",
2121 old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes(),
2122 perm_gen->capacity_in_bytes());
2123 }
2124 }
2126 // Don't check if the size_policy is ready here. Let
2127 // the size_policy check that internally.
2128 if (UseAdaptiveGenerationSizePolicyAtMajorCollection &&
2129 ((gc_cause != GCCause::_java_lang_system_gc) ||
2130 UseAdaptiveSizePolicyWithSystemGC)) {
2131 // Calculate optimal free space amounts
2132 assert(young_gen->max_size() >
2133 young_gen->from_space()->capacity_in_bytes() +
2134 young_gen->to_space()->capacity_in_bytes(),
2135 "Sizes of space in young gen are out-of-bounds");
2136 size_t max_eden_size = young_gen->max_size() -
2137 young_gen->from_space()->capacity_in_bytes() -
2138 young_gen->to_space()->capacity_in_bytes();
2139 size_policy->compute_generation_free_space(
2140 young_gen->used_in_bytes(),
2141 young_gen->eden_space()->used_in_bytes(),
2142 old_gen->used_in_bytes(),
2143 perm_gen->used_in_bytes(),
2144 young_gen->eden_space()->capacity_in_bytes(),
2145 old_gen->max_gen_size(),
2146 max_eden_size,
2147 true /* full gc*/,
2148 gc_cause);
2150 heap->resize_old_gen(
2151 size_policy->calculated_old_free_size_in_bytes());
2153 // Don't resize the young generation at an major collection. A
2154 // desired young generation size may have been calculated but
2155 // resizing the young generation complicates the code because the
2156 // resizing of the old generation may have moved the boundary
2157 // between the young generation and the old generation. Let the
2158 // young generation resizing happen at the minor collections.
2159 }
2160 if (PrintAdaptiveSizePolicy) {
2161 gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ",
2162 heap->total_collections());
2163 }
2164 }
2166 if (UsePerfData) {
2167 PSGCAdaptivePolicyCounters* const counters = heap->gc_policy_counters();
2168 counters->update_counters();
2169 counters->update_old_capacity(old_gen->capacity_in_bytes());
2170 counters->update_young_capacity(young_gen->capacity_in_bytes());
2171 }
2173 heap->resize_all_tlabs();
2175 // We collected the perm gen, so we'll resize it here.
2176 perm_gen->compute_new_size(pre_gc_values.perm_gen_used());
2178 if (TraceGen1Time) accumulated_time()->stop();
2180 if (PrintGC) {
2181 if (PrintGCDetails) {
2182 // No GC timestamp here. This is after GC so it would be confusing.
2183 young_gen->print_used_change(pre_gc_values.young_gen_used());
2184 old_gen->print_used_change(pre_gc_values.old_gen_used());
2185 heap->print_heap_change(pre_gc_values.heap_used());
2186 // Print perm gen last (print_heap_change() excludes the perm gen).
2187 perm_gen->print_used_change(pre_gc_values.perm_gen_used());
2188 } else {
2189 heap->print_heap_change(pre_gc_values.heap_used());
2190 }
2191 }
2193 // Track memory usage and detect low memory
2194 MemoryService::track_memory_usage();
2195 heap->update_counters();
2197 if (PrintGCDetails) {
2198 if (size_policy->print_gc_time_limit_would_be_exceeded()) {
2199 if (size_policy->gc_time_limit_exceeded()) {
2200 gclog_or_tty->print_cr(" GC time is exceeding GCTimeLimit "
2201 "of %d%%", GCTimeLimit);
2202 } else {
2203 gclog_or_tty->print_cr(" GC time would exceed GCTimeLimit "
2204 "of %d%%", GCTimeLimit);
2205 }
2206 }
2207 size_policy->set_print_gc_time_limit_would_be_exceeded(false);
2208 }
2209 }
2211 if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
2212 HandleMark hm; // Discard invalid handles created during verification
2213 gclog_or_tty->print(" VerifyAfterGC:");
2214 Universe::verify(false);
2215 }
2217 // Re-verify object start arrays
2218 if (VerifyObjectStartArray &&
2219 VerifyAfterGC) {
2220 old_gen->verify_object_start_array();
2221 perm_gen->verify_object_start_array();
2222 }
2224 if (ZapUnusedHeapArea) {
2225 old_gen->object_space()->check_mangled_unused_area_complete();
2226 perm_gen->object_space()->check_mangled_unused_area_complete();
2227 }
2229 NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
2231 collection_exit.update();
2233 if (PrintHeapAtGC) {
2234 Universe::print_heap_after_gc();
2235 }
2236 if (PrintGCTaskTimeStamps) {
2237 gclog_or_tty->print_cr("VM-Thread " INT64_FORMAT " " INT64_FORMAT " "
2238 INT64_FORMAT,
2239 marking_start.ticks(), compaction_start.ticks(),
2240 collection_exit.ticks());
2241 gc_task_manager()->print_task_time_stamps();
2242 }
2243 }
2245 bool PSParallelCompact::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
2246 PSYoungGen* young_gen,
2247 PSOldGen* old_gen) {
2248 MutableSpace* const eden_space = young_gen->eden_space();
2249 assert(!eden_space->is_empty(), "eden must be non-empty");
2250 assert(young_gen->virtual_space()->alignment() ==
2251 old_gen->virtual_space()->alignment(), "alignments do not match");
2253 if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary)) {
2254 return false;
2255 }
2257 // Both generations must be completely committed.
2258 if (young_gen->virtual_space()->uncommitted_size() != 0) {
2259 return false;
2260 }
2261 if (old_gen->virtual_space()->uncommitted_size() != 0) {
2262 return false;
2263 }
2265 // Figure out how much to take from eden. Include the average amount promoted
2266 // in the total; otherwise the next young gen GC will simply bail out to a
2267 // full GC.
2268 const size_t alignment = old_gen->virtual_space()->alignment();
2269 const size_t eden_used = eden_space->used_in_bytes();
2270 const size_t promoted = (size_t)size_policy->avg_promoted()->padded_average();
2271 const size_t absorb_size = align_size_up(eden_used + promoted, alignment);
2272 const size_t eden_capacity = eden_space->capacity_in_bytes();
2274 if (absorb_size >= eden_capacity) {
2275 return false; // Must leave some space in eden.
2276 }
2278 const size_t new_young_size = young_gen->capacity_in_bytes() - absorb_size;
2279 if (new_young_size < young_gen->min_gen_size()) {
2280 return false; // Respect young gen minimum size.
2281 }
2283 if (TraceAdaptiveGCBoundary && Verbose) {
2284 gclog_or_tty->print(" absorbing " SIZE_FORMAT "K: "
2285 "eden " SIZE_FORMAT "K->" SIZE_FORMAT "K "
2286 "from " SIZE_FORMAT "K, to " SIZE_FORMAT "K "
2287 "young_gen " SIZE_FORMAT "K->" SIZE_FORMAT "K ",
2288 absorb_size / K,
2289 eden_capacity / K, (eden_capacity - absorb_size) / K,
2290 young_gen->from_space()->used_in_bytes() / K,
2291 young_gen->to_space()->used_in_bytes() / K,
2292 young_gen->capacity_in_bytes() / K, new_young_size / K);
2293 }
2295 // Fill the unused part of the old gen.
2296 MutableSpace* const old_space = old_gen->object_space();
2297 MemRegion old_gen_unused(old_space->top(), old_space->end());
2298 if (!old_gen_unused.is_empty()) {
2299 SharedHeap::fill_region_with_object(old_gen_unused);
2300 }
2302 // Take the live data from eden and set both top and end in the old gen to
2303 // eden top. (Need to set end because reset_after_change() mangles the region
2304 // from end to virtual_space->high() in debug builds).
2305 HeapWord* const new_top = eden_space->top();
2306 old_gen->virtual_space()->expand_into(young_gen->virtual_space(),
2307 absorb_size);
2308 young_gen->reset_after_change();
2309 old_space->set_top(new_top);
2310 old_space->set_end(new_top);
2311 old_gen->reset_after_change();
2313 // Update the object start array for the filler object and the data from eden.
2314 ObjectStartArray* const start_array = old_gen->start_array();
2315 HeapWord* const start = old_gen_unused.start();
2316 for (HeapWord* addr = start; addr < new_top; addr += oop(addr)->size()) {
2317 start_array->allocate_block(addr);
2318 }
2320 // Could update the promoted average here, but it is not typically updated at
2321 // full GCs and the value to use is unclear. Something like
2322 //
2323 // cur_promoted_avg + absorb_size / number_of_scavenges_since_last_full_gc.
2325 size_policy->set_bytes_absorbed_from_eden(absorb_size);
2326 return true;
2327 }
2329 GCTaskManager* const PSParallelCompact::gc_task_manager() {
2330 assert(ParallelScavengeHeap::gc_task_manager() != NULL,
2331 "shouldn't return NULL");
2332 return ParallelScavengeHeap::gc_task_manager();
2333 }
2335 void PSParallelCompact::marking_phase(ParCompactionManager* cm,
2336 bool maximum_heap_compaction) {
2337 // Recursively traverse all live objects and mark them
2338 EventMark m("1 mark object");
2339 TraceTime tm("marking phase", print_phases(), true, gclog_or_tty);
2341 ParallelScavengeHeap* heap = gc_heap();
2342 uint parallel_gc_threads = heap->gc_task_manager()->workers();
2343 TaskQueueSetSuper* qset = ParCompactionManager::chunk_array();
2344 ParallelTaskTerminator terminator(parallel_gc_threads, qset);
2346 PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
2347 PSParallelCompact::FollowStackClosure follow_stack_closure(cm);
2349 {
2350 TraceTime tm_m("par mark", print_phases(), true, gclog_or_tty);
2352 GCTaskQueue* q = GCTaskQueue::create();
2354 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::universe));
2355 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::jni_handles));
2356 // We scan the thread roots in parallel
2357 Threads::create_thread_roots_marking_tasks(q);
2358 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::object_synchronizer));
2359 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::flat_profiler));
2360 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::management));
2361 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::system_dictionary));
2362 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::jvmti));
2363 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::vm_symbols));
2365 if (parallel_gc_threads > 1) {
2366 for (uint j = 0; j < parallel_gc_threads; j++) {
2367 q->enqueue(new StealMarkingTask(&terminator));
2368 }
2369 }
2371 WaitForBarrierGCTask* fin = WaitForBarrierGCTask::create();
2372 q->enqueue(fin);
2374 gc_task_manager()->add_list(q);
2376 fin->wait_for();
2378 // We have to release the barrier tasks!
2379 WaitForBarrierGCTask::destroy(fin);
2380 }
2382 // Process reference objects found during marking
2383 {
2384 TraceTime tm_r("reference processing", print_phases(), true, gclog_or_tty);
2385 ReferencePolicy *soft_ref_policy;
2386 if (maximum_heap_compaction) {
2387 soft_ref_policy = new AlwaysClearPolicy();
2388 } else {
2389 #ifdef COMPILER2
2390 soft_ref_policy = new LRUMaxHeapPolicy();
2391 #else
2392 soft_ref_policy = new LRUCurrentHeapPolicy();
2393 #endif // COMPILER2
2394 }
2395 assert(soft_ref_policy != NULL, "No soft reference policy");
2396 if (ref_processor()->processing_is_mt()) {
2397 RefProcTaskExecutor task_executor;
2398 ref_processor()->process_discovered_references(
2399 soft_ref_policy, is_alive_closure(), &mark_and_push_closure,
2400 &follow_stack_closure, &task_executor);
2401 } else {
2402 ref_processor()->process_discovered_references(
2403 soft_ref_policy, is_alive_closure(), &mark_and_push_closure,
2404 &follow_stack_closure, NULL);
2405 }
2406 }
2408 TraceTime tm_c("class unloading", print_phases(), true, gclog_or_tty);
2409 // Follow system dictionary roots and unload classes.
2410 bool purged_class = SystemDictionary::do_unloading(is_alive_closure());
2412 // Follow code cache roots.
2413 CodeCache::do_unloading(is_alive_closure(), &mark_and_push_closure,
2414 purged_class);
2415 follow_stack(cm); // Flush marking stack.
2417 // Update subklass/sibling/implementor links of live klasses
2418 // revisit_klass_stack is used in follow_weak_klass_links().
2419 follow_weak_klass_links(cm);
2421 // Visit symbol and interned string tables and delete unmarked oops
2422 SymbolTable::unlink(is_alive_closure());
2423 StringTable::unlink(is_alive_closure());
2425 assert(cm->marking_stack()->size() == 0, "stack should be empty by now");
2426 assert(cm->overflow_stack()->is_empty(), "stack should be empty by now");
2427 }
2429 // This should be moved to the shared markSweep code!
2430 class PSAlwaysTrueClosure: public BoolObjectClosure {
2431 public:
2432 void do_object(oop p) { ShouldNotReachHere(); }
2433 bool do_object_b(oop p) { return true; }
2434 };
2435 static PSAlwaysTrueClosure always_true;
2437 void PSParallelCompact::adjust_roots() {
2438 // Adjust the pointers to reflect the new locations
2439 EventMark m("3 adjust roots");
2440 TraceTime tm("adjust roots", print_phases(), true, gclog_or_tty);
2442 // General strong roots.
2443 Universe::oops_do(adjust_root_pointer_closure());
2444 ReferenceProcessor::oops_do(adjust_root_pointer_closure());
2445 JNIHandles::oops_do(adjust_root_pointer_closure()); // Global (strong) JNI handles
2446 Threads::oops_do(adjust_root_pointer_closure());
2447 ObjectSynchronizer::oops_do(adjust_root_pointer_closure());
2448 FlatProfiler::oops_do(adjust_root_pointer_closure());
2449 Management::oops_do(adjust_root_pointer_closure());
2450 JvmtiExport::oops_do(adjust_root_pointer_closure());
2451 // SO_AllClasses
2452 SystemDictionary::oops_do(adjust_root_pointer_closure());
2453 vmSymbols::oops_do(adjust_root_pointer_closure());
2455 // Now adjust pointers in remaining weak roots. (All of which should
2456 // have been cleared if they pointed to non-surviving objects.)
2457 // Global (weak) JNI handles
2458 JNIHandles::weak_oops_do(&always_true, adjust_root_pointer_closure());
2460 CodeCache::oops_do(adjust_pointer_closure());
2461 SymbolTable::oops_do(adjust_root_pointer_closure());
2462 StringTable::oops_do(adjust_root_pointer_closure());
2463 ref_processor()->weak_oops_do(adjust_root_pointer_closure());
2464 // Roots were visited so references into the young gen in roots
2465 // may have been scanned. Process them also.
2466 // Should the reference processor have a span that excludes
2467 // young gen objects?
2468 PSScavenge::reference_processor()->weak_oops_do(
2469 adjust_root_pointer_closure());
2470 }
2472 void PSParallelCompact::compact_perm(ParCompactionManager* cm) {
2473 EventMark m("4 compact perm");
2474 TraceTime tm("compact perm gen", print_phases(), true, gclog_or_tty);
2475 // trace("4");
2477 gc_heap()->perm_gen()->start_array()->reset();
2478 move_and_update(cm, perm_space_id);
2479 }
2481 void PSParallelCompact::enqueue_chunk_draining_tasks(GCTaskQueue* q,
2482 uint parallel_gc_threads) {
2483 TraceTime tm("drain task setup", print_phases(), true, gclog_or_tty);
2485 const unsigned int task_count = MAX2(parallel_gc_threads, 1U);
2486 for (unsigned int j = 0; j < task_count; j++) {
2487 q->enqueue(new DrainStacksCompactionTask());
2488 }
2490 // Find all chunks that are available (can be filled immediately) and
2491 // distribute them to the thread stacks. The iteration is done in reverse
2492 // order (high to low) so the chunks will be removed in ascending order.
2494 const ParallelCompactData& sd = PSParallelCompact::summary_data();
2496 size_t fillable_chunks = 0; // A count for diagnostic purposes.
2497 unsigned int which = 0; // The worker thread number.
2499 for (unsigned int id = to_space_id; id > perm_space_id; --id) {
2500 SpaceInfo* const space_info = _space_info + id;
2501 MutableSpace* const space = space_info->space();
2502 HeapWord* const new_top = space_info->new_top();
2504 const size_t beg_chunk = sd.addr_to_chunk_idx(space_info->dense_prefix());
2505 const size_t end_chunk = sd.addr_to_chunk_idx(sd.chunk_align_up(new_top));
2506 assert(end_chunk > 0, "perm gen cannot be empty");
2508 for (size_t cur = end_chunk - 1; cur >= beg_chunk; --cur) {
2509 if (sd.chunk(cur)->claim_unsafe()) {
2510 ParCompactionManager* cm = ParCompactionManager::manager_array(which);
2511 cm->save_for_processing(cur);
2513 if (TraceParallelOldGCCompactionPhase && Verbose) {
2514 const size_t count_mod_8 = fillable_chunks & 7;
2515 if (count_mod_8 == 0) gclog_or_tty->print("fillable: ");
2516 gclog_or_tty->print(" " SIZE_FORMAT_W("7"), cur);
2517 if (count_mod_8 == 7) gclog_or_tty->cr();
2518 }
2520 NOT_PRODUCT(++fillable_chunks;)
2522 // Assign chunks to threads in round-robin fashion.
2523 if (++which == task_count) {
2524 which = 0;
2525 }
2526 }
2527 }
2528 }
2530 if (TraceParallelOldGCCompactionPhase) {
2531 if (Verbose && (fillable_chunks & 7) != 0) gclog_or_tty->cr();
2532 gclog_or_tty->print_cr("%u initially fillable chunks", fillable_chunks);
2533 }
2534 }
2536 #define PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING 4
2538 void PSParallelCompact::enqueue_dense_prefix_tasks(GCTaskQueue* q,
2539 uint parallel_gc_threads) {
2540 TraceTime tm("dense prefix task setup", print_phases(), true, gclog_or_tty);
2542 ParallelCompactData& sd = PSParallelCompact::summary_data();
2544 // Iterate over all the spaces adding tasks for updating
2545 // chunks in the dense prefix. Assume that 1 gc thread
2546 // will work on opening the gaps and the remaining gc threads
2547 // will work on the dense prefix.
2548 SpaceId space_id = old_space_id;
2549 while (space_id != last_space_id) {
2550 HeapWord* const dense_prefix_end = _space_info[space_id].dense_prefix();
2551 const MutableSpace* const space = _space_info[space_id].space();
2553 if (dense_prefix_end == space->bottom()) {
2554 // There is no dense prefix for this space.
2555 space_id = next_compaction_space_id(space_id);
2556 continue;
2557 }
2559 // The dense prefix is before this chunk.
2560 size_t chunk_index_end_dense_prefix =
2561 sd.addr_to_chunk_idx(dense_prefix_end);
2562 ChunkData* const dense_prefix_cp = sd.chunk(chunk_index_end_dense_prefix);
2563 assert(dense_prefix_end == space->end() ||
2564 dense_prefix_cp->available() ||
2565 dense_prefix_cp->claimed(),
2566 "The chunk after the dense prefix should always be ready to fill");
2568 size_t chunk_index_start = sd.addr_to_chunk_idx(space->bottom());
2570 // Is there dense prefix work?
2571 size_t total_dense_prefix_chunks =
2572 chunk_index_end_dense_prefix - chunk_index_start;
2573 // How many chunks of the dense prefix should be given to
2574 // each thread?
2575 if (total_dense_prefix_chunks > 0) {
2576 uint tasks_for_dense_prefix = 1;
2577 if (UseParallelDensePrefixUpdate) {
2578 if (total_dense_prefix_chunks <=
2579 (parallel_gc_threads * PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING)) {
2580 // Don't over partition. This assumes that
2581 // PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING is a small integer value
2582 // so there are not many chunks to process.
2583 tasks_for_dense_prefix = parallel_gc_threads;
2584 } else {
2585 // Over partition
2586 tasks_for_dense_prefix = parallel_gc_threads *
2587 PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING;
2588 }
2589 }
2590 size_t chunks_per_thread = total_dense_prefix_chunks /
2591 tasks_for_dense_prefix;
2592 // Give each thread at least 1 chunk.
2593 if (chunks_per_thread == 0) {
2594 chunks_per_thread = 1;
2595 }
2597 for (uint k = 0; k < tasks_for_dense_prefix; k++) {
2598 if (chunk_index_start >= chunk_index_end_dense_prefix) {
2599 break;
2600 }
2601 // chunk_index_end is not processed
2602 size_t chunk_index_end = MIN2(chunk_index_start + chunks_per_thread,
2603 chunk_index_end_dense_prefix);
2604 q->enqueue(new UpdateDensePrefixTask(
2605 space_id,
2606 chunk_index_start,
2607 chunk_index_end));
2608 chunk_index_start = chunk_index_end;
2609 }
2610 }
2611 // This gets any part of the dense prefix that did not
2612 // fit evenly.
2613 if (chunk_index_start < chunk_index_end_dense_prefix) {
2614 q->enqueue(new UpdateDensePrefixTask(
2615 space_id,
2616 chunk_index_start,
2617 chunk_index_end_dense_prefix));
2618 }
2619 space_id = next_compaction_space_id(space_id);
2620 } // End tasks for dense prefix
2621 }
2623 void PSParallelCompact::enqueue_chunk_stealing_tasks(
2624 GCTaskQueue* q,
2625 ParallelTaskTerminator* terminator_ptr,
2626 uint parallel_gc_threads) {
2627 TraceTime tm("steal task setup", print_phases(), true, gclog_or_tty);
2629 // Once a thread has drained it's stack, it should try to steal chunks from
2630 // other threads.
2631 if (parallel_gc_threads > 1) {
2632 for (uint j = 0; j < parallel_gc_threads; j++) {
2633 q->enqueue(new StealChunkCompactionTask(terminator_ptr));
2634 }
2635 }
2636 }
2638 void PSParallelCompact::compact() {
2639 EventMark m("5 compact");
2640 // trace("5");
2641 TraceTime tm("compaction phase", print_phases(), true, gclog_or_tty);
2643 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
2644 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
2645 PSOldGen* old_gen = heap->old_gen();
2646 old_gen->start_array()->reset();
2647 uint parallel_gc_threads = heap->gc_task_manager()->workers();
2648 TaskQueueSetSuper* qset = ParCompactionManager::chunk_array();
2649 ParallelTaskTerminator terminator(parallel_gc_threads, qset);
2651 GCTaskQueue* q = GCTaskQueue::create();
2652 enqueue_chunk_draining_tasks(q, parallel_gc_threads);
2653 enqueue_dense_prefix_tasks(q, parallel_gc_threads);
2654 enqueue_chunk_stealing_tasks(q, &terminator, parallel_gc_threads);
2656 {
2657 TraceTime tm_pc("par compact", print_phases(), true, gclog_or_tty);
2659 WaitForBarrierGCTask* fin = WaitForBarrierGCTask::create();
2660 q->enqueue(fin);
2662 gc_task_manager()->add_list(q);
2664 fin->wait_for();
2666 // We have to release the barrier tasks!
2667 WaitForBarrierGCTask::destroy(fin);
2669 #ifdef ASSERT
2670 // Verify that all chunks have been processed before the deferred updates.
2671 // Note that perm_space_id is skipped; this type of verification is not
2672 // valid until the perm gen is compacted by chunks.
2673 for (unsigned int id = old_space_id; id < last_space_id; ++id) {
2674 verify_complete(SpaceId(id));
2675 }
2676 #endif
2677 }
2679 {
2680 // Update the deferred objects, if any. Any compaction manager can be used.
2681 TraceTime tm_du("deferred updates", print_phases(), true, gclog_or_tty);
2682 ParCompactionManager* cm = ParCompactionManager::manager_array(0);
2683 for (unsigned int id = old_space_id; id < last_space_id; ++id) {
2684 update_deferred_objects(cm, SpaceId(id));
2685 }
2686 }
2687 }
2689 #ifdef ASSERT
2690 void PSParallelCompact::verify_complete(SpaceId space_id) {
2691 // All Chunks between space bottom() to new_top() should be marked as filled
2692 // and all Chunks between new_top() and top() should be available (i.e.,
2693 // should have been emptied).
2694 ParallelCompactData& sd = summary_data();
2695 SpaceInfo si = _space_info[space_id];
2696 HeapWord* new_top_addr = sd.chunk_align_up(si.new_top());
2697 HeapWord* old_top_addr = sd.chunk_align_up(si.space()->top());
2698 const size_t beg_chunk = sd.addr_to_chunk_idx(si.space()->bottom());
2699 const size_t new_top_chunk = sd.addr_to_chunk_idx(new_top_addr);
2700 const size_t old_top_chunk = sd.addr_to_chunk_idx(old_top_addr);
2702 bool issued_a_warning = false;
2704 size_t cur_chunk;
2705 for (cur_chunk = beg_chunk; cur_chunk < new_top_chunk; ++cur_chunk) {
2706 const ChunkData* const c = sd.chunk(cur_chunk);
2707 if (!c->completed()) {
2708 warning("chunk " SIZE_FORMAT " not filled: "
2709 "destination_count=" SIZE_FORMAT,
2710 cur_chunk, c->destination_count());
2711 issued_a_warning = true;
2712 }
2713 }
2715 for (cur_chunk = new_top_chunk; cur_chunk < old_top_chunk; ++cur_chunk) {
2716 const ChunkData* const c = sd.chunk(cur_chunk);
2717 if (!c->available()) {
2718 warning("chunk " SIZE_FORMAT " not empty: "
2719 "destination_count=" SIZE_FORMAT,
2720 cur_chunk, c->destination_count());
2721 issued_a_warning = true;
2722 }
2723 }
2725 if (issued_a_warning) {
2726 print_chunk_ranges();
2727 }
2728 }
2729 #endif // #ifdef ASSERT
2731 void PSParallelCompact::compact_serial(ParCompactionManager* cm) {
2732 EventMark m("5 compact serial");
2733 TraceTime tm("compact serial", print_phases(), true, gclog_or_tty);
2735 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
2736 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
2738 PSYoungGen* young_gen = heap->young_gen();
2739 PSOldGen* old_gen = heap->old_gen();
2741 old_gen->start_array()->reset();
2742 old_gen->move_and_update(cm);
2743 young_gen->move_and_update(cm);
2744 }
2747 void PSParallelCompact::follow_stack(ParCompactionManager* cm) {
2748 while(!cm->overflow_stack()->is_empty()) {
2749 oop obj = cm->overflow_stack()->pop();
2750 obj->follow_contents(cm);
2751 }
2753 oop obj;
2754 // obj is a reference!!!
2755 while (cm->marking_stack()->pop_local(obj)) {
2756 // It would be nice to assert about the type of objects we might
2757 // pop, but they can come from anywhere, unfortunately.
2758 obj->follow_contents(cm);
2759 }
2760 }
2762 void
2763 PSParallelCompact::follow_weak_klass_links(ParCompactionManager* serial_cm) {
2764 // All klasses on the revisit stack are marked at this point.
2765 // Update and follow all subklass, sibling and implementor links.
2766 for (uint i = 0; i < ParallelGCThreads+1; i++) {
2767 ParCompactionManager* cm = ParCompactionManager::manager_array(i);
2768 KeepAliveClosure keep_alive_closure(cm);
2769 for (int i = 0; i < cm->revisit_klass_stack()->length(); i++) {
2770 cm->revisit_klass_stack()->at(i)->follow_weak_klass_links(
2771 is_alive_closure(),
2772 &keep_alive_closure);
2773 }
2774 follow_stack(cm);
2775 }
2776 }
2778 void
2779 PSParallelCompact::revisit_weak_klass_link(ParCompactionManager* cm, Klass* k) {
2780 cm->revisit_klass_stack()->push(k);
2781 }
2783 #ifdef VALIDATE_MARK_SWEEP
2785 void PSParallelCompact::track_adjusted_pointer(void* p, bool isroot) {
2786 if (!ValidateMarkSweep)
2787 return;
2789 if (!isroot) {
2790 if (_pointer_tracking) {
2791 guarantee(_adjusted_pointers->contains(p), "should have seen this pointer");
2792 _adjusted_pointers->remove(p);
2793 }
2794 } else {
2795 ptrdiff_t index = _root_refs_stack->find(p);
2796 if (index != -1) {
2797 int l = _root_refs_stack->length();
2798 if (l > 0 && l - 1 != index) {
2799 void* last = _root_refs_stack->pop();
2800 assert(last != p, "should be different");
2801 _root_refs_stack->at_put(index, last);
2802 } else {
2803 _root_refs_stack->remove(p);
2804 }
2805 }
2806 }
2807 }
2810 void PSParallelCompact::check_adjust_pointer(void* p) {
2811 _adjusted_pointers->push(p);
2812 }
2815 class AdjusterTracker: public OopClosure {
2816 public:
2817 AdjusterTracker() {};
2818 void do_oop(oop* o) { PSParallelCompact::check_adjust_pointer(o); }
2819 void do_oop(narrowOop* o) { PSParallelCompact::check_adjust_pointer(o); }
2820 };
2823 void PSParallelCompact::track_interior_pointers(oop obj) {
2824 if (ValidateMarkSweep) {
2825 _adjusted_pointers->clear();
2826 _pointer_tracking = true;
2828 AdjusterTracker checker;
2829 obj->oop_iterate(&checker);
2830 }
2831 }
2834 void PSParallelCompact::check_interior_pointers() {
2835 if (ValidateMarkSweep) {
2836 _pointer_tracking = false;
2837 guarantee(_adjusted_pointers->length() == 0, "should have processed the same pointers");
2838 }
2839 }
2842 void PSParallelCompact::reset_live_oop_tracking(bool at_perm) {
2843 if (ValidateMarkSweep) {
2844 guarantee((size_t)_live_oops->length() == _live_oops_index, "should be at end of live oops");
2845 _live_oops_index = at_perm ? _live_oops_index_at_perm : 0;
2846 }
2847 }
2850 void PSParallelCompact::register_live_oop(oop p, size_t size) {
2851 if (ValidateMarkSweep) {
2852 _live_oops->push(p);
2853 _live_oops_size->push(size);
2854 _live_oops_index++;
2855 }
2856 }
2858 void PSParallelCompact::validate_live_oop(oop p, size_t size) {
2859 if (ValidateMarkSweep) {
2860 oop obj = _live_oops->at((int)_live_oops_index);
2861 guarantee(obj == p, "should be the same object");
2862 guarantee(_live_oops_size->at((int)_live_oops_index) == size, "should be the same size");
2863 _live_oops_index++;
2864 }
2865 }
2867 void PSParallelCompact::live_oop_moved_to(HeapWord* q, size_t size,
2868 HeapWord* compaction_top) {
2869 assert(oop(q)->forwardee() == NULL || oop(q)->forwardee() == oop(compaction_top),
2870 "should be moved to forwarded location");
2871 if (ValidateMarkSweep) {
2872 PSParallelCompact::validate_live_oop(oop(q), size);
2873 _live_oops_moved_to->push(oop(compaction_top));
2874 }
2875 if (RecordMarkSweepCompaction) {
2876 _cur_gc_live_oops->push(q);
2877 _cur_gc_live_oops_moved_to->push(compaction_top);
2878 _cur_gc_live_oops_size->push(size);
2879 }
2880 }
2883 void PSParallelCompact::compaction_complete() {
2884 if (RecordMarkSweepCompaction) {
2885 GrowableArray<HeapWord*>* _tmp_live_oops = _cur_gc_live_oops;
2886 GrowableArray<HeapWord*>* _tmp_live_oops_moved_to = _cur_gc_live_oops_moved_to;
2887 GrowableArray<size_t> * _tmp_live_oops_size = _cur_gc_live_oops_size;
2889 _cur_gc_live_oops = _last_gc_live_oops;
2890 _cur_gc_live_oops_moved_to = _last_gc_live_oops_moved_to;
2891 _cur_gc_live_oops_size = _last_gc_live_oops_size;
2892 _last_gc_live_oops = _tmp_live_oops;
2893 _last_gc_live_oops_moved_to = _tmp_live_oops_moved_to;
2894 _last_gc_live_oops_size = _tmp_live_oops_size;
2895 }
2896 }
2899 void PSParallelCompact::print_new_location_of_heap_address(HeapWord* q) {
2900 if (!RecordMarkSweepCompaction) {
2901 tty->print_cr("Requires RecordMarkSweepCompaction to be enabled");
2902 return;
2903 }
2905 if (_last_gc_live_oops == NULL) {
2906 tty->print_cr("No compaction information gathered yet");
2907 return;
2908 }
2910 for (int i = 0; i < _last_gc_live_oops->length(); i++) {
2911 HeapWord* old_oop = _last_gc_live_oops->at(i);
2912 size_t sz = _last_gc_live_oops_size->at(i);
2913 if (old_oop <= q && q < (old_oop + sz)) {
2914 HeapWord* new_oop = _last_gc_live_oops_moved_to->at(i);
2915 size_t offset = (q - old_oop);
2916 tty->print_cr("Address " PTR_FORMAT, q);
2917 tty->print_cr(" Was in oop " PTR_FORMAT ", size %d, at offset %d", old_oop, sz, offset);
2918 tty->print_cr(" Now in oop " PTR_FORMAT ", actual address " PTR_FORMAT, new_oop, new_oop + offset);
2919 return;
2920 }
2921 }
2923 tty->print_cr("Address " PTR_FORMAT " not found in live oop information from last GC", q);
2924 }
2925 #endif //VALIDATE_MARK_SWEEP
2927 // Update interior oops in the ranges of chunks [beg_chunk, end_chunk).
2928 void
2929 PSParallelCompact::update_and_deadwood_in_dense_prefix(ParCompactionManager* cm,
2930 SpaceId space_id,
2931 size_t beg_chunk,
2932 size_t end_chunk) {
2933 ParallelCompactData& sd = summary_data();
2934 ParMarkBitMap* const mbm = mark_bitmap();
2936 HeapWord* beg_addr = sd.chunk_to_addr(beg_chunk);
2937 HeapWord* const end_addr = sd.chunk_to_addr(end_chunk);
2938 assert(beg_chunk <= end_chunk, "bad chunk range");
2939 assert(end_addr <= dense_prefix(space_id), "not in the dense prefix");
2941 #ifdef ASSERT
2942 // Claim the chunks to avoid triggering an assert when they are marked as
2943 // filled.
2944 for (size_t claim_chunk = beg_chunk; claim_chunk < end_chunk; ++claim_chunk) {
2945 assert(sd.chunk(claim_chunk)->claim_unsafe(), "claim() failed");
2946 }
2947 #endif // #ifdef ASSERT
2949 if (beg_addr != space(space_id)->bottom()) {
2950 // Find the first live object or block of dead space that *starts* in this
2951 // range of chunks. If a partial object crosses onto the chunk, skip it; it
2952 // will be marked for 'deferred update' when the object head is processed.
2953 // If dead space crosses onto the chunk, it is also skipped; it will be
2954 // filled when the prior chunk is processed. If neither of those apply, the
2955 // first word in the chunk is the start of a live object or dead space.
2956 assert(beg_addr > space(space_id)->bottom(), "sanity");
2957 const ChunkData* const cp = sd.chunk(beg_chunk);
2958 if (cp->partial_obj_size() != 0) {
2959 beg_addr = sd.partial_obj_end(beg_chunk);
2960 } else if (dead_space_crosses_boundary(cp, mbm->addr_to_bit(beg_addr))) {
2961 beg_addr = mbm->find_obj_beg(beg_addr, end_addr);
2962 }
2963 }
2965 if (beg_addr < end_addr) {
2966 // A live object or block of dead space starts in this range of Chunks.
2967 HeapWord* const dense_prefix_end = dense_prefix(space_id);
2969 // Create closures and iterate.
2970 UpdateOnlyClosure update_closure(mbm, cm, space_id);
2971 FillClosure fill_closure(cm, space_id);
2972 ParMarkBitMap::IterationStatus status;
2973 status = mbm->iterate(&update_closure, &fill_closure, beg_addr, end_addr,
2974 dense_prefix_end);
2975 if (status == ParMarkBitMap::incomplete) {
2976 update_closure.do_addr(update_closure.source());
2977 }
2978 }
2980 // Mark the chunks as filled.
2981 ChunkData* const beg_cp = sd.chunk(beg_chunk);
2982 ChunkData* const end_cp = sd.chunk(end_chunk);
2983 for (ChunkData* cp = beg_cp; cp < end_cp; ++cp) {
2984 cp->set_completed();
2985 }
2986 }
2988 // Return the SpaceId for the space containing addr. If addr is not in the
2989 // heap, last_space_id is returned. In debug mode it expects the address to be
2990 // in the heap and asserts such.
2991 PSParallelCompact::SpaceId PSParallelCompact::space_id(HeapWord* addr) {
2992 assert(Universe::heap()->is_in_reserved(addr), "addr not in the heap");
2994 for (unsigned int id = perm_space_id; id < last_space_id; ++id) {
2995 if (_space_info[id].space()->contains(addr)) {
2996 return SpaceId(id);
2997 }
2998 }
3000 assert(false, "no space contains the addr");
3001 return last_space_id;
3002 }
3004 void PSParallelCompact::update_deferred_objects(ParCompactionManager* cm,
3005 SpaceId id) {
3006 assert(id < last_space_id, "bad space id");
3008 ParallelCompactData& sd = summary_data();
3009 const SpaceInfo* const space_info = _space_info + id;
3010 ObjectStartArray* const start_array = space_info->start_array();
3012 const MutableSpace* const space = space_info->space();
3013 assert(space_info->dense_prefix() >= space->bottom(), "dense_prefix not set");
3014 HeapWord* const beg_addr = space_info->dense_prefix();
3015 HeapWord* const end_addr = sd.chunk_align_up(space_info->new_top());
3017 const ChunkData* const beg_chunk = sd.addr_to_chunk_ptr(beg_addr);
3018 const ChunkData* const end_chunk = sd.addr_to_chunk_ptr(end_addr);
3019 const ChunkData* cur_chunk;
3020 for (cur_chunk = beg_chunk; cur_chunk < end_chunk; ++cur_chunk) {
3021 HeapWord* const addr = cur_chunk->deferred_obj_addr();
3022 if (addr != NULL) {
3023 if (start_array != NULL) {
3024 start_array->allocate_block(addr);
3025 }
3026 oop(addr)->update_contents(cm);
3027 assert(oop(addr)->is_oop_or_null(), "should be an oop now");
3028 }
3029 }
3030 }
3032 // Skip over count live words starting from beg, and return the address of the
3033 // next live word. Unless marked, the word corresponding to beg is assumed to
3034 // be dead. Callers must either ensure beg does not correspond to the middle of
3035 // an object, or account for those live words in some other way. Callers must
3036 // also ensure that there are enough live words in the range [beg, end) to skip.
3037 HeapWord*
3038 PSParallelCompact::skip_live_words(HeapWord* beg, HeapWord* end, size_t count)
3039 {
3040 assert(count > 0, "sanity");
3042 ParMarkBitMap* m = mark_bitmap();
3043 idx_t bits_to_skip = m->words_to_bits(count);
3044 idx_t cur_beg = m->addr_to_bit(beg);
3045 const idx_t search_end = BitMap::word_align_up(m->addr_to_bit(end));
3047 do {
3048 cur_beg = m->find_obj_beg(cur_beg, search_end);
3049 idx_t cur_end = m->find_obj_end(cur_beg, search_end);
3050 const size_t obj_bits = cur_end - cur_beg + 1;
3051 if (obj_bits > bits_to_skip) {
3052 return m->bit_to_addr(cur_beg + bits_to_skip);
3053 }
3054 bits_to_skip -= obj_bits;
3055 cur_beg = cur_end + 1;
3056 } while (bits_to_skip > 0);
3058 // Skipping the desired number of words landed just past the end of an object.
3059 // Find the start of the next object.
3060 cur_beg = m->find_obj_beg(cur_beg, search_end);
3061 assert(cur_beg < m->addr_to_bit(end), "not enough live words to skip");
3062 return m->bit_to_addr(cur_beg);
3063 }
3065 HeapWord*
3066 PSParallelCompact::first_src_addr(HeapWord* const dest_addr,
3067 size_t src_chunk_idx)
3068 {
3069 ParMarkBitMap* const bitmap = mark_bitmap();
3070 const ParallelCompactData& sd = summary_data();
3071 const size_t ChunkSize = ParallelCompactData::ChunkSize;
3073 assert(sd.is_chunk_aligned(dest_addr), "not aligned");
3075 const ChunkData* const src_chunk_ptr = sd.chunk(src_chunk_idx);
3076 const size_t partial_obj_size = src_chunk_ptr->partial_obj_size();
3077 HeapWord* const src_chunk_destination = src_chunk_ptr->destination();
3079 assert(dest_addr >= src_chunk_destination, "wrong src chunk");
3080 assert(src_chunk_ptr->data_size() > 0, "src chunk cannot be empty");
3082 HeapWord* const src_chunk_beg = sd.chunk_to_addr(src_chunk_idx);
3083 HeapWord* const src_chunk_end = src_chunk_beg + ChunkSize;
3085 HeapWord* addr = src_chunk_beg;
3086 if (dest_addr == src_chunk_destination) {
3087 // Return the first live word in the source chunk.
3088 if (partial_obj_size == 0) {
3089 addr = bitmap->find_obj_beg(addr, src_chunk_end);
3090 assert(addr < src_chunk_end, "no objects start in src chunk");
3091 }
3092 return addr;
3093 }
3095 // Must skip some live data.
3096 size_t words_to_skip = dest_addr - src_chunk_destination;
3097 assert(src_chunk_ptr->data_size() > words_to_skip, "wrong src chunk");
3099 if (partial_obj_size >= words_to_skip) {
3100 // All the live words to skip are part of the partial object.
3101 addr += words_to_skip;
3102 if (partial_obj_size == words_to_skip) {
3103 // Find the first live word past the partial object.
3104 addr = bitmap->find_obj_beg(addr, src_chunk_end);
3105 assert(addr < src_chunk_end, "wrong src chunk");
3106 }
3107 return addr;
3108 }
3110 // Skip over the partial object (if any).
3111 if (partial_obj_size != 0) {
3112 words_to_skip -= partial_obj_size;
3113 addr += partial_obj_size;
3114 }
3116 // Skip over live words due to objects that start in the chunk.
3117 addr = skip_live_words(addr, src_chunk_end, words_to_skip);
3118 assert(addr < src_chunk_end, "wrong src chunk");
3119 return addr;
3120 }
3122 void PSParallelCompact::decrement_destination_counts(ParCompactionManager* cm,
3123 size_t beg_chunk,
3124 HeapWord* end_addr)
3125 {
3126 ParallelCompactData& sd = summary_data();
3127 ChunkData* const beg = sd.chunk(beg_chunk);
3128 HeapWord* const end_addr_aligned_up = sd.chunk_align_up(end_addr);
3129 ChunkData* const end = sd.addr_to_chunk_ptr(end_addr_aligned_up);
3130 size_t cur_idx = beg_chunk;
3131 for (ChunkData* cur = beg; cur < end; ++cur, ++cur_idx) {
3132 assert(cur->data_size() > 0, "chunk must have live data");
3133 cur->decrement_destination_count();
3134 if (cur_idx <= cur->source_chunk() && cur->available() && cur->claim()) {
3135 cm->save_for_processing(cur_idx);
3136 }
3137 }
3138 }
3140 size_t PSParallelCompact::next_src_chunk(MoveAndUpdateClosure& closure,
3141 SpaceId& src_space_id,
3142 HeapWord*& src_space_top,
3143 HeapWord* end_addr)
3144 {
3145 typedef ParallelCompactData::ChunkData ChunkData;
3147 ParallelCompactData& sd = PSParallelCompact::summary_data();
3148 const size_t chunk_size = ParallelCompactData::ChunkSize;
3150 size_t src_chunk_idx = 0;
3152 // Skip empty chunks (if any) up to the top of the space.
3153 HeapWord* const src_aligned_up = sd.chunk_align_up(end_addr);
3154 ChunkData* src_chunk_ptr = sd.addr_to_chunk_ptr(src_aligned_up);
3155 HeapWord* const top_aligned_up = sd.chunk_align_up(src_space_top);
3156 const ChunkData* const top_chunk_ptr = sd.addr_to_chunk_ptr(top_aligned_up);
3157 while (src_chunk_ptr < top_chunk_ptr && src_chunk_ptr->data_size() == 0) {
3158 ++src_chunk_ptr;
3159 }
3161 if (src_chunk_ptr < top_chunk_ptr) {
3162 // The next source chunk is in the current space. Update src_chunk_idx and
3163 // the source address to match src_chunk_ptr.
3164 src_chunk_idx = sd.chunk(src_chunk_ptr);
3165 HeapWord* const src_chunk_addr = sd.chunk_to_addr(src_chunk_idx);
3166 if (src_chunk_addr > closure.source()) {
3167 closure.set_source(src_chunk_addr);
3168 }
3169 return src_chunk_idx;
3170 }
3172 // Switch to a new source space and find the first non-empty chunk.
3173 unsigned int space_id = src_space_id + 1;
3174 assert(space_id < last_space_id, "not enough spaces");
3176 HeapWord* const destination = closure.destination();
3178 do {
3179 MutableSpace* space = _space_info[space_id].space();
3180 HeapWord* const bottom = space->bottom();
3181 const ChunkData* const bottom_cp = sd.addr_to_chunk_ptr(bottom);
3183 // Iterate over the spaces that do not compact into themselves.
3184 if (bottom_cp->destination() != bottom) {
3185 HeapWord* const top_aligned_up = sd.chunk_align_up(space->top());
3186 const ChunkData* const top_cp = sd.addr_to_chunk_ptr(top_aligned_up);
3188 for (const ChunkData* src_cp = bottom_cp; src_cp < top_cp; ++src_cp) {
3189 if (src_cp->live_obj_size() > 0) {
3190 // Found it.
3191 assert(src_cp->destination() == destination,
3192 "first live obj in the space must match the destination");
3193 assert(src_cp->partial_obj_size() == 0,
3194 "a space cannot begin with a partial obj");
3196 src_space_id = SpaceId(space_id);
3197 src_space_top = space->top();
3198 const size_t src_chunk_idx = sd.chunk(src_cp);
3199 closure.set_source(sd.chunk_to_addr(src_chunk_idx));
3200 return src_chunk_idx;
3201 } else {
3202 assert(src_cp->data_size() == 0, "sanity");
3203 }
3204 }
3205 }
3206 } while (++space_id < last_space_id);
3208 assert(false, "no source chunk was found");
3209 return 0;
3210 }
3212 void PSParallelCompact::fill_chunk(ParCompactionManager* cm, size_t chunk_idx)
3213 {
3214 typedef ParMarkBitMap::IterationStatus IterationStatus;
3215 const size_t ChunkSize = ParallelCompactData::ChunkSize;
3216 ParMarkBitMap* const bitmap = mark_bitmap();
3217 ParallelCompactData& sd = summary_data();
3218 ChunkData* const chunk_ptr = sd.chunk(chunk_idx);
3220 // Get the items needed to construct the closure.
3221 HeapWord* dest_addr = sd.chunk_to_addr(chunk_idx);
3222 SpaceId dest_space_id = space_id(dest_addr);
3223 ObjectStartArray* start_array = _space_info[dest_space_id].start_array();
3224 HeapWord* new_top = _space_info[dest_space_id].new_top();
3225 assert(dest_addr < new_top, "sanity");
3226 const size_t words = MIN2(pointer_delta(new_top, dest_addr), ChunkSize);
3228 // Get the source chunk and related info.
3229 size_t src_chunk_idx = chunk_ptr->source_chunk();
3230 SpaceId src_space_id = space_id(sd.chunk_to_addr(src_chunk_idx));
3231 HeapWord* src_space_top = _space_info[src_space_id].space()->top();
3233 MoveAndUpdateClosure closure(bitmap, cm, start_array, dest_addr, words);
3234 closure.set_source(first_src_addr(dest_addr, src_chunk_idx));
3236 // Adjust src_chunk_idx to prepare for decrementing destination counts (the
3237 // destination count is not decremented when a chunk is copied to itself).
3238 if (src_chunk_idx == chunk_idx) {
3239 src_chunk_idx += 1;
3240 }
3242 if (bitmap->is_unmarked(closure.source())) {
3243 // The first source word is in the middle of an object; copy the remainder
3244 // of the object or as much as will fit. The fact that pointer updates were
3245 // deferred will be noted when the object header is processed.
3246 HeapWord* const old_src_addr = closure.source();
3247 closure.copy_partial_obj();
3248 if (closure.is_full()) {
3249 decrement_destination_counts(cm, src_chunk_idx, closure.source());
3250 chunk_ptr->set_deferred_obj_addr(NULL);
3251 chunk_ptr->set_completed();
3252 return;
3253 }
3255 HeapWord* const end_addr = sd.chunk_align_down(closure.source());
3256 if (sd.chunk_align_down(old_src_addr) != end_addr) {
3257 // The partial object was copied from more than one source chunk.
3258 decrement_destination_counts(cm, src_chunk_idx, end_addr);
3260 // Move to the next source chunk, possibly switching spaces as well. All
3261 // args except end_addr may be modified.
3262 src_chunk_idx = next_src_chunk(closure, src_space_id, src_space_top,
3263 end_addr);
3264 }
3265 }
3267 do {
3268 HeapWord* const cur_addr = closure.source();
3269 HeapWord* const end_addr = MIN2(sd.chunk_align_up(cur_addr + 1),
3270 src_space_top);
3271 IterationStatus status = bitmap->iterate(&closure, cur_addr, end_addr);
3273 if (status == ParMarkBitMap::incomplete) {
3274 // The last obj that starts in the source chunk does not end in the chunk.
3275 assert(closure.source() < end_addr, "sanity")
3276 HeapWord* const obj_beg = closure.source();
3277 HeapWord* const range_end = MIN2(obj_beg + closure.words_remaining(),
3278 src_space_top);
3279 HeapWord* const obj_end = bitmap->find_obj_end(obj_beg, range_end);
3280 if (obj_end < range_end) {
3281 // The end was found; the entire object will fit.
3282 status = closure.do_addr(obj_beg, bitmap->obj_size(obj_beg, obj_end));
3283 assert(status != ParMarkBitMap::would_overflow, "sanity");
3284 } else {
3285 // The end was not found; the object will not fit.
3286 assert(range_end < src_space_top, "obj cannot cross space boundary");
3287 status = ParMarkBitMap::would_overflow;
3288 }
3289 }
3291 if (status == ParMarkBitMap::would_overflow) {
3292 // The last object did not fit. Note that interior oop updates were
3293 // deferred, then copy enough of the object to fill the chunk.
3294 chunk_ptr->set_deferred_obj_addr(closure.destination());
3295 status = closure.copy_until_full(); // copies from closure.source()
3297 decrement_destination_counts(cm, src_chunk_idx, closure.source());
3298 chunk_ptr->set_completed();
3299 return;
3300 }
3302 if (status == ParMarkBitMap::full) {
3303 decrement_destination_counts(cm, src_chunk_idx, closure.source());
3304 chunk_ptr->set_deferred_obj_addr(NULL);
3305 chunk_ptr->set_completed();
3306 return;
3307 }
3309 decrement_destination_counts(cm, src_chunk_idx, end_addr);
3311 // Move to the next source chunk, possibly switching spaces as well. All
3312 // args except end_addr may be modified.
3313 src_chunk_idx = next_src_chunk(closure, src_space_id, src_space_top,
3314 end_addr);
3315 } while (true);
3316 }
3318 void
3319 PSParallelCompact::move_and_update(ParCompactionManager* cm, SpaceId space_id) {
3320 const MutableSpace* sp = space(space_id);
3321 if (sp->is_empty()) {
3322 return;
3323 }
3325 ParallelCompactData& sd = PSParallelCompact::summary_data();
3326 ParMarkBitMap* const bitmap = mark_bitmap();
3327 HeapWord* const dp_addr = dense_prefix(space_id);
3328 HeapWord* beg_addr = sp->bottom();
3329 HeapWord* end_addr = sp->top();
3331 #ifdef ASSERT
3332 assert(beg_addr <= dp_addr && dp_addr <= end_addr, "bad dense prefix");
3333 if (cm->should_verify_only()) {
3334 VerifyUpdateClosure verify_update(cm, sp);
3335 bitmap->iterate(&verify_update, beg_addr, end_addr);
3336 return;
3337 }
3339 if (cm->should_reset_only()) {
3340 ResetObjectsClosure reset_objects(cm);
3341 bitmap->iterate(&reset_objects, beg_addr, end_addr);
3342 return;
3343 }
3344 #endif
3346 const size_t beg_chunk = sd.addr_to_chunk_idx(beg_addr);
3347 const size_t dp_chunk = sd.addr_to_chunk_idx(dp_addr);
3348 if (beg_chunk < dp_chunk) {
3349 update_and_deadwood_in_dense_prefix(cm, space_id, beg_chunk, dp_chunk);
3350 }
3352 // The destination of the first live object that starts in the chunk is one
3353 // past the end of the partial object entering the chunk (if any).
3354 HeapWord* const dest_addr = sd.partial_obj_end(dp_chunk);
3355 HeapWord* const new_top = _space_info[space_id].new_top();
3356 assert(new_top >= dest_addr, "bad new_top value");
3357 const size_t words = pointer_delta(new_top, dest_addr);
3359 if (words > 0) {
3360 ObjectStartArray* start_array = _space_info[space_id].start_array();
3361 MoveAndUpdateClosure closure(bitmap, cm, start_array, dest_addr, words);
3363 ParMarkBitMap::IterationStatus status;
3364 status = bitmap->iterate(&closure, dest_addr, end_addr);
3365 assert(status == ParMarkBitMap::full, "iteration not complete");
3366 assert(bitmap->find_obj_beg(closure.source(), end_addr) == end_addr,
3367 "live objects skipped because closure is full");
3368 }
3369 }
3371 jlong PSParallelCompact::millis_since_last_gc() {
3372 jlong ret_val = os::javaTimeMillis() - _time_of_last_gc;
3373 // XXX See note in genCollectedHeap::millis_since_last_gc().
3374 if (ret_val < 0) {
3375 NOT_PRODUCT(warning("time warp: %d", ret_val);)
3376 return 0;
3377 }
3378 return ret_val;
3379 }
3381 void PSParallelCompact::reset_millis_since_last_gc() {
3382 _time_of_last_gc = os::javaTimeMillis();
3383 }
3385 ParMarkBitMap::IterationStatus MoveAndUpdateClosure::copy_until_full()
3386 {
3387 if (source() != destination()) {
3388 assert(source() > destination(), "must copy to the left");
3389 Copy::aligned_conjoint_words(source(), destination(), words_remaining());
3390 }
3391 update_state(words_remaining());
3392 assert(is_full(), "sanity");
3393 return ParMarkBitMap::full;
3394 }
3396 void MoveAndUpdateClosure::copy_partial_obj()
3397 {
3398 size_t words = words_remaining();
3400 HeapWord* const range_end = MIN2(source() + words, bitmap()->region_end());
3401 HeapWord* const end_addr = bitmap()->find_obj_end(source(), range_end);
3402 if (end_addr < range_end) {
3403 words = bitmap()->obj_size(source(), end_addr);
3404 }
3406 // This test is necessary; if omitted, the pointer updates to a partial object
3407 // that crosses the dense prefix boundary could be overwritten.
3408 if (source() != destination()) {
3409 assert(source() > destination(), "must copy to the left");
3410 Copy::aligned_conjoint_words(source(), destination(), words);
3411 }
3412 update_state(words);
3413 }
3415 ParMarkBitMapClosure::IterationStatus
3416 MoveAndUpdateClosure::do_addr(HeapWord* addr, size_t words) {
3417 assert(destination() != NULL, "sanity");
3418 assert(bitmap()->obj_size(addr) == words, "bad size");
3420 _source = addr;
3421 assert(PSParallelCompact::summary_data().calc_new_pointer(source()) ==
3422 destination(), "wrong destination");
3424 if (words > words_remaining()) {
3425 return ParMarkBitMap::would_overflow;
3426 }
3428 // The start_array must be updated even if the object is not moving.
3429 if (_start_array != NULL) {
3430 _start_array->allocate_block(destination());
3431 }
3433 if (destination() != source()) {
3434 assert(destination() < source(), "must copy to the left");
3435 Copy::aligned_conjoint_words(source(), destination(), words);
3436 }
3438 oop moved_oop = (oop) destination();
3439 moved_oop->update_contents(compaction_manager());
3440 assert(moved_oop->is_oop_or_null(), "Object should be whole at this point");
3442 update_state(words);
3443 assert(destination() == (HeapWord*)moved_oop + moved_oop->size(), "sanity");
3444 return is_full() ? ParMarkBitMap::full : ParMarkBitMap::incomplete;
3445 }
3447 UpdateOnlyClosure::UpdateOnlyClosure(ParMarkBitMap* mbm,
3448 ParCompactionManager* cm,
3449 PSParallelCompact::SpaceId space_id) :
3450 ParMarkBitMapClosure(mbm, cm),
3451 _space_id(space_id),
3452 _start_array(PSParallelCompact::start_array(space_id))
3453 {
3454 }
3456 // Updates the references in the object to their new values.
3457 ParMarkBitMapClosure::IterationStatus
3458 UpdateOnlyClosure::do_addr(HeapWord* addr, size_t words) {
3459 do_addr(addr);
3460 return ParMarkBitMap::incomplete;
3461 }
3463 BitBlockUpdateClosure::BitBlockUpdateClosure(ParMarkBitMap* mbm,
3464 ParCompactionManager* cm,
3465 size_t chunk_index) :
3466 ParMarkBitMapClosure(mbm, cm),
3467 _live_data_left(0),
3468 _cur_block(0) {
3469 _chunk_start =
3470 PSParallelCompact::summary_data().chunk_to_addr(chunk_index);
3471 _chunk_end =
3472 PSParallelCompact::summary_data().chunk_to_addr(chunk_index) +
3473 ParallelCompactData::ChunkSize;
3474 _chunk_index = chunk_index;
3475 _cur_block =
3476 PSParallelCompact::summary_data().addr_to_block_idx(_chunk_start);
3477 }
3479 bool BitBlockUpdateClosure::chunk_contains_cur_block() {
3480 return ParallelCompactData::chunk_contains_block(_chunk_index, _cur_block);
3481 }
3483 void BitBlockUpdateClosure::reset_chunk(size_t chunk_index) {
3484 DEBUG_ONLY(ParallelCompactData::BlockData::set_cur_phase(7);)
3485 ParallelCompactData& sd = PSParallelCompact::summary_data();
3486 _chunk_index = chunk_index;
3487 _live_data_left = 0;
3488 _chunk_start = sd.chunk_to_addr(chunk_index);
3489 _chunk_end = sd.chunk_to_addr(chunk_index) + ParallelCompactData::ChunkSize;
3491 // The first block in this chunk
3492 size_t first_block = sd.addr_to_block_idx(_chunk_start);
3493 size_t partial_live_size = sd.chunk(chunk_index)->partial_obj_size();
3495 // Set the offset to 0. By definition it should have that value
3496 // but it may have been written while processing an earlier chunk.
3497 if (partial_live_size == 0) {
3498 // No live object extends onto the chunk. The first bit
3499 // in the bit map for the first chunk must be a start bit.
3500 // Although there may not be any marked bits, it is safe
3501 // to set it as a start bit.
3502 sd.block(first_block)->set_start_bit_offset(0);
3503 sd.block(first_block)->set_first_is_start_bit(true);
3504 } else if (sd.partial_obj_ends_in_block(first_block)) {
3505 sd.block(first_block)->set_end_bit_offset(0);
3506 sd.block(first_block)->set_first_is_start_bit(false);
3507 } else {
3508 // The partial object extends beyond the first block.
3509 // There is no object starting in the first block
3510 // so the offset and bit parity are not needed.
3511 // Set the the bit parity to start bit so assertions
3512 // work when not bit is found.
3513 sd.block(first_block)->set_end_bit_offset(0);
3514 sd.block(first_block)->set_first_is_start_bit(false);
3515 }
3516 _cur_block = first_block;
3517 #ifdef ASSERT
3518 if (sd.block(first_block)->first_is_start_bit()) {
3519 assert(!sd.partial_obj_ends_in_block(first_block),
3520 "Partial object cannot end in first block");
3521 }
3523 if (PrintGCDetails && Verbose) {
3524 if (partial_live_size == 1) {
3525 gclog_or_tty->print_cr("first_block " PTR_FORMAT
3526 " _offset " PTR_FORMAT
3527 " _first_is_start_bit %d",
3528 first_block,
3529 sd.block(first_block)->raw_offset(),
3530 sd.block(first_block)->first_is_start_bit());
3531 }
3532 }
3533 #endif
3534 DEBUG_ONLY(ParallelCompactData::BlockData::set_cur_phase(17);)
3535 }
3537 // This method is called when a object has been found (both beginning
3538 // and end of the object) in the range of iteration. This method is
3539 // calculating the words of live data to the left of a block. That live
3540 // data includes any object starting to the left of the block (i.e.,
3541 // the live-data-to-the-left of block AAA will include the full size
3542 // of any object entering AAA).
3544 ParMarkBitMapClosure::IterationStatus
3545 BitBlockUpdateClosure::do_addr(HeapWord* addr, size_t words) {
3546 // add the size to the block data.
3547 HeapWord* obj = addr;
3548 ParallelCompactData& sd = PSParallelCompact::summary_data();
3550 assert(bitmap()->obj_size(obj) == words, "bad size");
3551 assert(_chunk_start <= obj, "object is not in chunk");
3552 assert(obj + words <= _chunk_end, "object is not in chunk");
3554 // Update the live data to the left
3555 size_t prev_live_data_left = _live_data_left;
3556 _live_data_left = _live_data_left + words;
3558 // Is this object in the current block.
3559 size_t block_of_obj = sd.addr_to_block_idx(obj);
3560 size_t block_of_obj_last = sd.addr_to_block_idx(obj + words - 1);
3561 HeapWord* block_of_obj_last_addr = sd.block_to_addr(block_of_obj_last);
3562 if (_cur_block < block_of_obj) {
3564 //
3565 // No object crossed the block boundary and this object was found
3566 // on the other side of the block boundary. Update the offset for
3567 // the new block with the data size that does not include this object.
3568 //
3569 // The first bit in block_of_obj is a start bit except in the
3570 // case where the partial object for the chunk extends into
3571 // this block.
3572 if (sd.partial_obj_ends_in_block(block_of_obj)) {
3573 sd.block(block_of_obj)->set_end_bit_offset(prev_live_data_left);
3574 } else {
3575 sd.block(block_of_obj)->set_start_bit_offset(prev_live_data_left);
3576 }
3578 // Does this object pass beyond the its block?
3579 if (block_of_obj < block_of_obj_last) {
3580 // Object crosses block boundary. Two blocks need to be udpated:
3581 // the current block where the object started
3582 // the block where the object ends
3583 //
3584 // The offset for blocks with no objects starting in them
3585 // (e.g., blocks between _cur_block and block_of_obj_last)
3586 // should not be needed.
3587 // Note that block_of_obj_last may be in another chunk. If so,
3588 // it should be overwritten later. This is a problem (writting
3589 // into a block in a later chunk) for parallel execution.
3590 assert(obj < block_of_obj_last_addr,
3591 "Object should start in previous block");
3593 // obj is crossing into block_of_obj_last so the first bit
3594 // is and end bit.
3595 sd.block(block_of_obj_last)->set_end_bit_offset(_live_data_left);
3597 _cur_block = block_of_obj_last;
3598 } else {
3599 // _first_is_start_bit has already been set correctly
3600 // in the if-then-else above so don't reset it here.
3601 _cur_block = block_of_obj;
3602 }
3603 } else {
3604 // The current block only changes if the object extends beyound
3605 // the block it starts in.
3606 //
3607 // The object starts in the current block.
3608 // Does this object pass beyond the end of it?
3609 if (block_of_obj < block_of_obj_last) {
3610 // Object crosses block boundary.
3611 // See note above on possible blocks between block_of_obj and
3612 // block_of_obj_last
3613 assert(obj < block_of_obj_last_addr,
3614 "Object should start in previous block");
3616 sd.block(block_of_obj_last)->set_end_bit_offset(_live_data_left);
3618 _cur_block = block_of_obj_last;
3619 }
3620 }
3622 // Return incomplete if there are more blocks to be done.
3623 if (chunk_contains_cur_block()) {
3624 return ParMarkBitMap::incomplete;
3625 }
3626 return ParMarkBitMap::complete;
3627 }
3629 // Verify the new location using the forwarding pointer
3630 // from MarkSweep::mark_sweep_phase2(). Set the mark_word
3631 // to the initial value.
3632 ParMarkBitMapClosure::IterationStatus
3633 PSParallelCompact::VerifyUpdateClosure::do_addr(HeapWord* addr, size_t words) {
3634 // The second arg (words) is not used.
3635 oop obj = (oop) addr;
3636 HeapWord* forwarding_ptr = (HeapWord*) obj->mark()->decode_pointer();
3637 HeapWord* new_pointer = summary_data().calc_new_pointer(obj);
3638 if (forwarding_ptr == NULL) {
3639 // The object is dead or not moving.
3640 assert(bitmap()->is_unmarked(obj) || (new_pointer == (HeapWord*) obj),
3641 "Object liveness is wrong.");
3642 return ParMarkBitMap::incomplete;
3643 }
3644 assert(UseParallelOldGCDensePrefix ||
3645 (HeapMaximumCompactionInterval > 1) ||
3646 (MarkSweepAlwaysCompactCount > 1) ||
3647 (forwarding_ptr == new_pointer),
3648 "Calculation of new location is incorrect");
3649 return ParMarkBitMap::incomplete;
3650 }
3652 // Reset objects modified for debug checking.
3653 ParMarkBitMapClosure::IterationStatus
3654 PSParallelCompact::ResetObjectsClosure::do_addr(HeapWord* addr, size_t words) {
3655 // The second arg (words) is not used.
3656 oop obj = (oop) addr;
3657 obj->init_mark();
3658 return ParMarkBitMap::incomplete;
3659 }
3661 // Prepare for compaction. This method is executed once
3662 // (i.e., by a single thread) before compaction.
3663 // Save the updated location of the intArrayKlassObj for
3664 // filling holes in the dense prefix.
3665 void PSParallelCompact::compact_prologue() {
3666 _updated_int_array_klass_obj = (klassOop)
3667 summary_data().calc_new_pointer(Universe::intArrayKlassObj());
3668 }
3670 // The initial implementation of this method created a field
3671 // _next_compaction_space_id in SpaceInfo and initialized
3672 // that field in SpaceInfo::initialize_space_info(). That
3673 // required that _next_compaction_space_id be declared a
3674 // SpaceId in SpaceInfo and that would have required that
3675 // either SpaceId be declared in a separate class or that
3676 // it be declared in SpaceInfo. It didn't seem consistent
3677 // to declare it in SpaceInfo (didn't really fit logically).
3678 // Alternatively, defining a separate class to define SpaceId
3679 // seem excessive. This implementation is simple and localizes
3680 // the knowledge.
3682 PSParallelCompact::SpaceId
3683 PSParallelCompact::next_compaction_space_id(SpaceId id) {
3684 assert(id < last_space_id, "id out of range");
3685 switch (id) {
3686 case perm_space_id :
3687 return last_space_id;
3688 case old_space_id :
3689 return eden_space_id;
3690 case eden_space_id :
3691 return from_space_id;
3692 case from_space_id :
3693 return to_space_id;
3694 case to_space_id :
3695 return last_space_id;
3696 default:
3697 assert(false, "Bad space id");
3698 return last_space_id;
3699 }
3700 }
3702 // Here temporarily for debugging
3703 #ifdef ASSERT
3704 size_t ParallelCompactData::block_idx(BlockData* block) {
3705 size_t index = pointer_delta(block,
3706 PSParallelCompact::summary_data()._block_data, sizeof(BlockData));
3707 return index;
3708 }
3709 #endif