Tue, 07 Oct 2008 11:01:35 -0700
Merge
1 /*
2 * Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 #include "incls/_precompiled.incl"
26 #include "incls/_psParallelCompact.cpp.incl"
28 #include <math.h>
30 // All sizes are in HeapWords.
31 const size_t ParallelCompactData::Log2RegionSize = 9; // 512 words
32 const size_t ParallelCompactData::RegionSize = (size_t)1 << Log2RegionSize;
33 const size_t ParallelCompactData::RegionSizeBytes =
34 RegionSize << LogHeapWordSize;
35 const size_t ParallelCompactData::RegionSizeOffsetMask = RegionSize - 1;
36 const size_t ParallelCompactData::RegionAddrOffsetMask = RegionSizeBytes - 1;
37 const size_t ParallelCompactData::RegionAddrMask = ~RegionAddrOffsetMask;
39 const ParallelCompactData::RegionData::region_sz_t
40 ParallelCompactData::RegionData::dc_shift = 27;
42 const ParallelCompactData::RegionData::region_sz_t
43 ParallelCompactData::RegionData::dc_mask = ~0U << dc_shift;
45 const ParallelCompactData::RegionData::region_sz_t
46 ParallelCompactData::RegionData::dc_one = 0x1U << dc_shift;
48 const ParallelCompactData::RegionData::region_sz_t
49 ParallelCompactData::RegionData::los_mask = ~dc_mask;
51 const ParallelCompactData::RegionData::region_sz_t
52 ParallelCompactData::RegionData::dc_claimed = 0x8U << dc_shift;
54 const ParallelCompactData::RegionData::region_sz_t
55 ParallelCompactData::RegionData::dc_completed = 0xcU << dc_shift;
57 SpaceInfo PSParallelCompact::_space_info[PSParallelCompact::last_space_id];
58 bool PSParallelCompact::_print_phases = false;
60 ReferenceProcessor* PSParallelCompact::_ref_processor = NULL;
61 klassOop PSParallelCompact::_updated_int_array_klass_obj = NULL;
63 double PSParallelCompact::_dwl_mean;
64 double PSParallelCompact::_dwl_std_dev;
65 double PSParallelCompact::_dwl_first_term;
66 double PSParallelCompact::_dwl_adjustment;
67 #ifdef ASSERT
68 bool PSParallelCompact::_dwl_initialized = false;
69 #endif // #ifdef ASSERT
71 #ifdef VALIDATE_MARK_SWEEP
72 GrowableArray<void*>* PSParallelCompact::_root_refs_stack = NULL;
73 GrowableArray<oop> * PSParallelCompact::_live_oops = NULL;
74 GrowableArray<oop> * PSParallelCompact::_live_oops_moved_to = NULL;
75 GrowableArray<size_t>* PSParallelCompact::_live_oops_size = NULL;
76 size_t PSParallelCompact::_live_oops_index = 0;
77 size_t PSParallelCompact::_live_oops_index_at_perm = 0;
78 GrowableArray<void*>* PSParallelCompact::_other_refs_stack = NULL;
79 GrowableArray<void*>* PSParallelCompact::_adjusted_pointers = NULL;
80 bool PSParallelCompact::_pointer_tracking = false;
81 bool PSParallelCompact::_root_tracking = true;
83 GrowableArray<HeapWord*>* PSParallelCompact::_cur_gc_live_oops = NULL;
84 GrowableArray<HeapWord*>* PSParallelCompact::_cur_gc_live_oops_moved_to = NULL;
85 GrowableArray<size_t> * PSParallelCompact::_cur_gc_live_oops_size = NULL;
86 GrowableArray<HeapWord*>* PSParallelCompact::_last_gc_live_oops = NULL;
87 GrowableArray<HeapWord*>* PSParallelCompact::_last_gc_live_oops_moved_to = NULL;
88 GrowableArray<size_t> * PSParallelCompact::_last_gc_live_oops_size = NULL;
89 #endif
91 #ifndef PRODUCT
92 const char* PSParallelCompact::space_names[] = {
93 "perm", "old ", "eden", "from", "to "
94 };
96 void PSParallelCompact::print_region_ranges()
97 {
98 tty->print_cr("space bottom top end new_top");
99 tty->print_cr("------ ---------- ---------- ---------- ----------");
101 for (unsigned int id = 0; id < last_space_id; ++id) {
102 const MutableSpace* space = _space_info[id].space();
103 tty->print_cr("%u %s "
104 SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10) " "
105 SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10) " ",
106 id, space_names[id],
107 summary_data().addr_to_region_idx(space->bottom()),
108 summary_data().addr_to_region_idx(space->top()),
109 summary_data().addr_to_region_idx(space->end()),
110 summary_data().addr_to_region_idx(_space_info[id].new_top()));
111 }
112 }
114 void
115 print_generic_summary_region(size_t i, const ParallelCompactData::RegionData* c)
116 {
117 #define REGION_IDX_FORMAT SIZE_FORMAT_W(7)
118 #define REGION_DATA_FORMAT SIZE_FORMAT_W(5)
120 ParallelCompactData& sd = PSParallelCompact::summary_data();
121 size_t dci = c->destination() ? sd.addr_to_region_idx(c->destination()) : 0;
122 tty->print_cr(REGION_IDX_FORMAT " " PTR_FORMAT " "
123 REGION_IDX_FORMAT " " PTR_FORMAT " "
124 REGION_DATA_FORMAT " " REGION_DATA_FORMAT " "
125 REGION_DATA_FORMAT " " REGION_IDX_FORMAT " %d",
126 i, c->data_location(), dci, c->destination(),
127 c->partial_obj_size(), c->live_obj_size(),
128 c->data_size(), c->source_region(), c->destination_count());
130 #undef REGION_IDX_FORMAT
131 #undef REGION_DATA_FORMAT
132 }
134 void
135 print_generic_summary_data(ParallelCompactData& summary_data,
136 HeapWord* const beg_addr,
137 HeapWord* const end_addr)
138 {
139 size_t total_words = 0;
140 size_t i = summary_data.addr_to_region_idx(beg_addr);
141 const size_t last = summary_data.addr_to_region_idx(end_addr);
142 HeapWord* pdest = 0;
144 while (i <= last) {
145 ParallelCompactData::RegionData* c = summary_data.region(i);
146 if (c->data_size() != 0 || c->destination() != pdest) {
147 print_generic_summary_region(i, c);
148 total_words += c->data_size();
149 pdest = c->destination();
150 }
151 ++i;
152 }
154 tty->print_cr("summary_data_bytes=" SIZE_FORMAT, total_words * HeapWordSize);
155 }
157 void
158 print_generic_summary_data(ParallelCompactData& summary_data,
159 SpaceInfo* space_info)
160 {
161 for (unsigned int id = 0; id < PSParallelCompact::last_space_id; ++id) {
162 const MutableSpace* space = space_info[id].space();
163 print_generic_summary_data(summary_data, space->bottom(),
164 MAX2(space->top(), space_info[id].new_top()));
165 }
166 }
168 void
169 print_initial_summary_region(size_t i,
170 const ParallelCompactData::RegionData* c,
171 bool newline = true)
172 {
173 tty->print(SIZE_FORMAT_W(5) " " PTR_FORMAT " "
174 SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " "
175 SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " %d",
176 i, c->destination(),
177 c->partial_obj_size(), c->live_obj_size(),
178 c->data_size(), c->source_region(), c->destination_count());
179 if (newline) tty->cr();
180 }
182 void
183 print_initial_summary_data(ParallelCompactData& summary_data,
184 const MutableSpace* space) {
185 if (space->top() == space->bottom()) {
186 return;
187 }
189 const size_t region_size = ParallelCompactData::RegionSize;
190 typedef ParallelCompactData::RegionData RegionData;
191 HeapWord* const top_aligned_up = summary_data.region_align_up(space->top());
192 const size_t end_region = summary_data.addr_to_region_idx(top_aligned_up);
193 const RegionData* c = summary_data.region(end_region - 1);
194 HeapWord* end_addr = c->destination() + c->data_size();
195 const size_t live_in_space = pointer_delta(end_addr, space->bottom());
197 // Print (and count) the full regions at the beginning of the space.
198 size_t full_region_count = 0;
199 size_t i = summary_data.addr_to_region_idx(space->bottom());
200 while (i < end_region && summary_data.region(i)->data_size() == region_size) {
201 print_initial_summary_region(i, summary_data.region(i));
202 ++full_region_count;
203 ++i;
204 }
206 size_t live_to_right = live_in_space - full_region_count * region_size;
208 double max_reclaimed_ratio = 0.0;
209 size_t max_reclaimed_ratio_region = 0;
210 size_t max_dead_to_right = 0;
211 size_t max_live_to_right = 0;
213 // Print the 'reclaimed ratio' for regions while there is something live in
214 // the region or to the right of it. The remaining regions are empty (and
215 // uninteresting), and computing the ratio will result in division by 0.
216 while (i < end_region && live_to_right > 0) {
217 c = summary_data.region(i);
218 HeapWord* const region_addr = summary_data.region_to_addr(i);
219 const size_t used_to_right = pointer_delta(space->top(), region_addr);
220 const size_t dead_to_right = used_to_right - live_to_right;
221 const double reclaimed_ratio = double(dead_to_right) / live_to_right;
223 if (reclaimed_ratio > max_reclaimed_ratio) {
224 max_reclaimed_ratio = reclaimed_ratio;
225 max_reclaimed_ratio_region = i;
226 max_dead_to_right = dead_to_right;
227 max_live_to_right = live_to_right;
228 }
230 print_initial_summary_region(i, c, false);
231 tty->print_cr(" %12.10f " SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10),
232 reclaimed_ratio, dead_to_right, live_to_right);
234 live_to_right -= c->data_size();
235 ++i;
236 }
238 // Any remaining regions are empty. Print one more if there is one.
239 if (i < end_region) {
240 print_initial_summary_region(i, summary_data.region(i));
241 }
243 tty->print_cr("max: " SIZE_FORMAT_W(4) " d2r=" SIZE_FORMAT_W(10) " "
244 "l2r=" SIZE_FORMAT_W(10) " max_ratio=%14.12f",
245 max_reclaimed_ratio_region, max_dead_to_right,
246 max_live_to_right, max_reclaimed_ratio);
247 }
249 void
250 print_initial_summary_data(ParallelCompactData& summary_data,
251 SpaceInfo* space_info) {
252 unsigned int id = PSParallelCompact::perm_space_id;
253 const MutableSpace* space;
254 do {
255 space = space_info[id].space();
256 print_initial_summary_data(summary_data, space);
257 } while (++id < PSParallelCompact::eden_space_id);
259 do {
260 space = space_info[id].space();
261 print_generic_summary_data(summary_data, space->bottom(), space->top());
262 } while (++id < PSParallelCompact::last_space_id);
263 }
264 #endif // #ifndef PRODUCT
266 #ifdef ASSERT
267 size_t add_obj_count;
268 size_t add_obj_size;
269 size_t mark_bitmap_count;
270 size_t mark_bitmap_size;
271 #endif // #ifdef ASSERT
273 ParallelCompactData::ParallelCompactData()
274 {
275 _region_start = 0;
277 _region_vspace = 0;
278 _region_data = 0;
279 _region_count = 0;
280 }
282 bool ParallelCompactData::initialize(MemRegion covered_region)
283 {
284 _region_start = covered_region.start();
285 const size_t region_size = covered_region.word_size();
286 DEBUG_ONLY(_region_end = _region_start + region_size;)
288 assert(region_align_down(_region_start) == _region_start,
289 "region start not aligned");
290 assert((region_size & RegionSizeOffsetMask) == 0,
291 "region size not a multiple of RegionSize");
293 bool result = initialize_region_data(region_size);
295 return result;
296 }
298 PSVirtualSpace*
299 ParallelCompactData::create_vspace(size_t count, size_t element_size)
300 {
301 const size_t raw_bytes = count * element_size;
302 const size_t page_sz = os::page_size_for_region(raw_bytes, raw_bytes, 10);
303 const size_t granularity = os::vm_allocation_granularity();
304 const size_t bytes = align_size_up(raw_bytes, MAX2(page_sz, granularity));
306 const size_t rs_align = page_sz == (size_t) os::vm_page_size() ? 0 :
307 MAX2(page_sz, granularity);
308 ReservedSpace rs(bytes, rs_align, rs_align > 0);
309 os::trace_page_sizes("par compact", raw_bytes, raw_bytes, page_sz, rs.base(),
310 rs.size());
311 PSVirtualSpace* vspace = new PSVirtualSpace(rs, page_sz);
312 if (vspace != 0) {
313 if (vspace->expand_by(bytes)) {
314 return vspace;
315 }
316 delete vspace;
317 // Release memory reserved in the space.
318 rs.release();
319 }
321 return 0;
322 }
324 bool ParallelCompactData::initialize_region_data(size_t region_size)
325 {
326 const size_t count = (region_size + RegionSizeOffsetMask) >> Log2RegionSize;
327 _region_vspace = create_vspace(count, sizeof(RegionData));
328 if (_region_vspace != 0) {
329 _region_data = (RegionData*)_region_vspace->reserved_low_addr();
330 _region_count = count;
331 return true;
332 }
333 return false;
334 }
336 void ParallelCompactData::clear()
337 {
338 memset(_region_data, 0, _region_vspace->committed_size());
339 }
341 void ParallelCompactData::clear_range(size_t beg_region, size_t end_region) {
342 assert(beg_region <= _region_count, "beg_region out of range");
343 assert(end_region <= _region_count, "end_region out of range");
345 const size_t region_cnt = end_region - beg_region;
346 memset(_region_data + beg_region, 0, region_cnt * sizeof(RegionData));
347 }
349 HeapWord* ParallelCompactData::partial_obj_end(size_t region_idx) const
350 {
351 const RegionData* cur_cp = region(region_idx);
352 const RegionData* const end_cp = region(region_count() - 1);
354 HeapWord* result = region_to_addr(region_idx);
355 if (cur_cp < end_cp) {
356 do {
357 result += cur_cp->partial_obj_size();
358 } while (cur_cp->partial_obj_size() == RegionSize && ++cur_cp < end_cp);
359 }
360 return result;
361 }
363 void ParallelCompactData::add_obj(HeapWord* addr, size_t len)
364 {
365 const size_t obj_ofs = pointer_delta(addr, _region_start);
366 const size_t beg_region = obj_ofs >> Log2RegionSize;
367 const size_t end_region = (obj_ofs + len - 1) >> Log2RegionSize;
369 DEBUG_ONLY(Atomic::inc_ptr(&add_obj_count);)
370 DEBUG_ONLY(Atomic::add_ptr(len, &add_obj_size);)
372 if (beg_region == end_region) {
373 // All in one region.
374 _region_data[beg_region].add_live_obj(len);
375 return;
376 }
378 // First region.
379 const size_t beg_ofs = region_offset(addr);
380 _region_data[beg_region].add_live_obj(RegionSize - beg_ofs);
382 klassOop klass = ((oop)addr)->klass();
383 // Middle regions--completely spanned by this object.
384 for (size_t region = beg_region + 1; region < end_region; ++region) {
385 _region_data[region].set_partial_obj_size(RegionSize);
386 _region_data[region].set_partial_obj_addr(addr);
387 }
389 // Last region.
390 const size_t end_ofs = region_offset(addr + len - 1);
391 _region_data[end_region].set_partial_obj_size(end_ofs + 1);
392 _region_data[end_region].set_partial_obj_addr(addr);
393 }
395 void
396 ParallelCompactData::summarize_dense_prefix(HeapWord* beg, HeapWord* end)
397 {
398 assert(region_offset(beg) == 0, "not RegionSize aligned");
399 assert(region_offset(end) == 0, "not RegionSize aligned");
401 size_t cur_region = addr_to_region_idx(beg);
402 const size_t end_region = addr_to_region_idx(end);
403 HeapWord* addr = beg;
404 while (cur_region < end_region) {
405 _region_data[cur_region].set_destination(addr);
406 _region_data[cur_region].set_destination_count(0);
407 _region_data[cur_region].set_source_region(cur_region);
408 _region_data[cur_region].set_data_location(addr);
410 // Update live_obj_size so the region appears completely full.
411 size_t live_size = RegionSize - _region_data[cur_region].partial_obj_size();
412 _region_data[cur_region].set_live_obj_size(live_size);
414 ++cur_region;
415 addr += RegionSize;
416 }
417 }
419 bool ParallelCompactData::summarize(HeapWord* target_beg, HeapWord* target_end,
420 HeapWord* source_beg, HeapWord* source_end,
421 HeapWord** target_next,
422 HeapWord** source_next) {
423 // This is too strict.
424 // assert(region_offset(source_beg) == 0, "not RegionSize aligned");
426 if (TraceParallelOldGCSummaryPhase) {
427 tty->print_cr("tb=" PTR_FORMAT " te=" PTR_FORMAT " "
428 "sb=" PTR_FORMAT " se=" PTR_FORMAT " "
429 "tn=" PTR_FORMAT " sn=" PTR_FORMAT,
430 target_beg, target_end,
431 source_beg, source_end,
432 target_next != 0 ? *target_next : (HeapWord*) 0,
433 source_next != 0 ? *source_next : (HeapWord*) 0);
434 }
436 size_t cur_region = addr_to_region_idx(source_beg);
437 const size_t end_region = addr_to_region_idx(region_align_up(source_end));
439 HeapWord *dest_addr = target_beg;
440 while (cur_region < end_region) {
441 size_t words = _region_data[cur_region].data_size();
443 #if 1
444 assert(pointer_delta(target_end, dest_addr) >= words,
445 "source region does not fit into target region");
446 #else
447 // XXX - need some work on the corner cases here. If the region does not
448 // fit, then must either make sure any partial_obj from the region fits, or
449 // "undo" the initial part of the partial_obj that is in the previous
450 // region.
451 if (dest_addr + words >= target_end) {
452 // Let the caller know where to continue.
453 *target_next = dest_addr;
454 *source_next = region_to_addr(cur_region);
455 return false;
456 }
457 #endif // #if 1
459 _region_data[cur_region].set_destination(dest_addr);
461 // Set the destination_count for cur_region, and if necessary, update
462 // source_region for a destination region. The source_region field is
463 // updated if cur_region is the first (left-most) region to be copied to a
464 // destination region.
465 //
466 // The destination_count calculation is a bit subtle. A region that has
467 // data that compacts into itself does not count itself as a destination.
468 // This maintains the invariant that a zero count means the region is
469 // available and can be claimed and then filled.
470 if (words > 0) {
471 HeapWord* const last_addr = dest_addr + words - 1;
472 const size_t dest_region_1 = addr_to_region_idx(dest_addr);
473 const size_t dest_region_2 = addr_to_region_idx(last_addr);
474 #if 0
475 // Initially assume that the destination regions will be the same and
476 // adjust the value below if necessary. Under this assumption, if
477 // cur_region == dest_region_2, then cur_region will be compacted
478 // completely into itself.
479 uint destination_count = cur_region == dest_region_2 ? 0 : 1;
480 if (dest_region_1 != dest_region_2) {
481 // Destination regions differ; adjust destination_count.
482 destination_count += 1;
483 // Data from cur_region will be copied to the start of dest_region_2.
484 _region_data[dest_region_2].set_source_region(cur_region);
485 } else if (region_offset(dest_addr) == 0) {
486 // Data from cur_region will be copied to the start of the destination
487 // region.
488 _region_data[dest_region_1].set_source_region(cur_region);
489 }
490 #else
491 // Initially assume that the destination regions will be different and
492 // adjust the value below if necessary. Under this assumption, if
493 // cur_region == dest_region2, then cur_region will be compacted partially
494 // into dest_region_1 and partially into itself.
495 uint destination_count = cur_region == dest_region_2 ? 1 : 2;
496 if (dest_region_1 != dest_region_2) {
497 // Data from cur_region will be copied to the start of dest_region_2.
498 _region_data[dest_region_2].set_source_region(cur_region);
499 } else {
500 // Destination regions are the same; adjust destination_count.
501 destination_count -= 1;
502 if (region_offset(dest_addr) == 0) {
503 // Data from cur_region will be copied to the start of the destination
504 // region.
505 _region_data[dest_region_1].set_source_region(cur_region);
506 }
507 }
508 #endif // #if 0
510 _region_data[cur_region].set_destination_count(destination_count);
511 _region_data[cur_region].set_data_location(region_to_addr(cur_region));
512 dest_addr += words;
513 }
515 ++cur_region;
516 }
518 *target_next = dest_addr;
519 return true;
520 }
522 HeapWord* ParallelCompactData::calc_new_pointer(HeapWord* addr) {
523 assert(addr != NULL, "Should detect NULL oop earlier");
524 assert(PSParallelCompact::gc_heap()->is_in(addr), "addr not in heap");
525 #ifdef ASSERT
526 if (PSParallelCompact::mark_bitmap()->is_unmarked(addr)) {
527 gclog_or_tty->print_cr("calc_new_pointer:: addr " PTR_FORMAT, addr);
528 }
529 #endif
530 assert(PSParallelCompact::mark_bitmap()->is_marked(addr), "obj not marked");
532 // Region covering the object.
533 size_t region_index = addr_to_region_idx(addr);
534 const RegionData* const region_ptr = region(region_index);
535 HeapWord* const region_addr = region_align_down(addr);
537 assert(addr < region_addr + RegionSize, "Region does not cover object");
538 assert(addr_to_region_ptr(region_addr) == region_ptr, "sanity check");
540 HeapWord* result = region_ptr->destination();
542 // If all the data in the region is live, then the new location of the object
543 // can be calculated from the destination of the region plus the offset of the
544 // object in the region.
545 if (region_ptr->data_size() == RegionSize) {
546 result += pointer_delta(addr, region_addr);
547 return result;
548 }
550 // The new location of the object is
551 // region destination +
552 // size of the partial object extending onto the region +
553 // sizes of the live objects in the Region that are to the left of addr
554 const size_t partial_obj_size = region_ptr->partial_obj_size();
555 HeapWord* const search_start = region_addr + partial_obj_size;
557 const ParMarkBitMap* bitmap = PSParallelCompact::mark_bitmap();
558 size_t live_to_left = bitmap->live_words_in_range(search_start, oop(addr));
560 result += partial_obj_size + live_to_left;
561 assert(result <= addr, "object cannot move to the right");
562 return result;
563 }
565 klassOop ParallelCompactData::calc_new_klass(klassOop old_klass) {
566 klassOop updated_klass;
567 if (PSParallelCompact::should_update_klass(old_klass)) {
568 updated_klass = (klassOop) calc_new_pointer(old_klass);
569 } else {
570 updated_klass = old_klass;
571 }
573 return updated_klass;
574 }
576 #ifdef ASSERT
577 void ParallelCompactData::verify_clear(const PSVirtualSpace* vspace)
578 {
579 const size_t* const beg = (const size_t*)vspace->committed_low_addr();
580 const size_t* const end = (const size_t*)vspace->committed_high_addr();
581 for (const size_t* p = beg; p < end; ++p) {
582 assert(*p == 0, "not zero");
583 }
584 }
586 void ParallelCompactData::verify_clear()
587 {
588 verify_clear(_region_vspace);
589 }
590 #endif // #ifdef ASSERT
592 #ifdef NOT_PRODUCT
593 ParallelCompactData::RegionData* debug_region(size_t region_index) {
594 ParallelCompactData& sd = PSParallelCompact::summary_data();
595 return sd.region(region_index);
596 }
597 #endif
599 elapsedTimer PSParallelCompact::_accumulated_time;
600 unsigned int PSParallelCompact::_total_invocations = 0;
601 unsigned int PSParallelCompact::_maximum_compaction_gc_num = 0;
602 jlong PSParallelCompact::_time_of_last_gc = 0;
603 CollectorCounters* PSParallelCompact::_counters = NULL;
604 ParMarkBitMap PSParallelCompact::_mark_bitmap;
605 ParallelCompactData PSParallelCompact::_summary_data;
607 PSParallelCompact::IsAliveClosure PSParallelCompact::_is_alive_closure;
609 void PSParallelCompact::IsAliveClosure::do_object(oop p) { ShouldNotReachHere(); }
610 bool PSParallelCompact::IsAliveClosure::do_object_b(oop p) { return mark_bitmap()->is_marked(p); }
612 void PSParallelCompact::KeepAliveClosure::do_oop(oop* p) { PSParallelCompact::KeepAliveClosure::do_oop_work(p); }
613 void PSParallelCompact::KeepAliveClosure::do_oop(narrowOop* p) { PSParallelCompact::KeepAliveClosure::do_oop_work(p); }
615 PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_root_pointer_closure(true);
616 PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_pointer_closure(false);
618 void PSParallelCompact::AdjustPointerClosure::do_oop(oop* p) { adjust_pointer(p, _is_root); }
619 void PSParallelCompact::AdjustPointerClosure::do_oop(narrowOop* p) { adjust_pointer(p, _is_root); }
621 void PSParallelCompact::FollowStackClosure::do_void() { follow_stack(_compaction_manager); }
623 void PSParallelCompact::MarkAndPushClosure::do_oop(oop* p) { mark_and_push(_compaction_manager, p); }
624 void PSParallelCompact::MarkAndPushClosure::do_oop(narrowOop* p) { mark_and_push(_compaction_manager, p); }
626 void PSParallelCompact::post_initialize() {
627 ParallelScavengeHeap* heap = gc_heap();
628 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
630 MemRegion mr = heap->reserved_region();
631 _ref_processor = ReferenceProcessor::create_ref_processor(
632 mr, // span
633 true, // atomic_discovery
634 true, // mt_discovery
635 &_is_alive_closure,
636 ParallelGCThreads,
637 ParallelRefProcEnabled);
638 _counters = new CollectorCounters("PSParallelCompact", 1);
640 // Initialize static fields in ParCompactionManager.
641 ParCompactionManager::initialize(mark_bitmap());
642 }
644 bool PSParallelCompact::initialize() {
645 ParallelScavengeHeap* heap = gc_heap();
646 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
647 MemRegion mr = heap->reserved_region();
649 // Was the old gen get allocated successfully?
650 if (!heap->old_gen()->is_allocated()) {
651 return false;
652 }
654 initialize_space_info();
655 initialize_dead_wood_limiter();
657 if (!_mark_bitmap.initialize(mr)) {
658 vm_shutdown_during_initialization("Unable to allocate bit map for "
659 "parallel garbage collection for the requested heap size.");
660 return false;
661 }
663 if (!_summary_data.initialize(mr)) {
664 vm_shutdown_during_initialization("Unable to allocate tables for "
665 "parallel garbage collection for the requested heap size.");
666 return false;
667 }
669 return true;
670 }
672 void PSParallelCompact::initialize_space_info()
673 {
674 memset(&_space_info, 0, sizeof(_space_info));
676 ParallelScavengeHeap* heap = gc_heap();
677 PSYoungGen* young_gen = heap->young_gen();
678 MutableSpace* perm_space = heap->perm_gen()->object_space();
680 _space_info[perm_space_id].set_space(perm_space);
681 _space_info[old_space_id].set_space(heap->old_gen()->object_space());
682 _space_info[eden_space_id].set_space(young_gen->eden_space());
683 _space_info[from_space_id].set_space(young_gen->from_space());
684 _space_info[to_space_id].set_space(young_gen->to_space());
686 _space_info[perm_space_id].set_start_array(heap->perm_gen()->start_array());
687 _space_info[old_space_id].set_start_array(heap->old_gen()->start_array());
689 _space_info[perm_space_id].set_min_dense_prefix(perm_space->top());
690 if (TraceParallelOldGCDensePrefix) {
691 tty->print_cr("perm min_dense_prefix=" PTR_FORMAT,
692 _space_info[perm_space_id].min_dense_prefix());
693 }
694 }
696 void PSParallelCompact::initialize_dead_wood_limiter()
697 {
698 const size_t max = 100;
699 _dwl_mean = double(MIN2(ParallelOldDeadWoodLimiterMean, max)) / 100.0;
700 _dwl_std_dev = double(MIN2(ParallelOldDeadWoodLimiterStdDev, max)) / 100.0;
701 _dwl_first_term = 1.0 / (sqrt(2.0 * M_PI) * _dwl_std_dev);
702 DEBUG_ONLY(_dwl_initialized = true;)
703 _dwl_adjustment = normal_distribution(1.0);
704 }
706 // Simple class for storing info about the heap at the start of GC, to be used
707 // after GC for comparison/printing.
708 class PreGCValues {
709 public:
710 PreGCValues() { }
711 PreGCValues(ParallelScavengeHeap* heap) { fill(heap); }
713 void fill(ParallelScavengeHeap* heap) {
714 _heap_used = heap->used();
715 _young_gen_used = heap->young_gen()->used_in_bytes();
716 _old_gen_used = heap->old_gen()->used_in_bytes();
717 _perm_gen_used = heap->perm_gen()->used_in_bytes();
718 };
720 size_t heap_used() const { return _heap_used; }
721 size_t young_gen_used() const { return _young_gen_used; }
722 size_t old_gen_used() const { return _old_gen_used; }
723 size_t perm_gen_used() const { return _perm_gen_used; }
725 private:
726 size_t _heap_used;
727 size_t _young_gen_used;
728 size_t _old_gen_used;
729 size_t _perm_gen_used;
730 };
732 void
733 PSParallelCompact::clear_data_covering_space(SpaceId id)
734 {
735 // At this point, top is the value before GC, new_top() is the value that will
736 // be set at the end of GC. The marking bitmap is cleared to top; nothing
737 // should be marked above top. The summary data is cleared to the larger of
738 // top & new_top.
739 MutableSpace* const space = _space_info[id].space();
740 HeapWord* const bot = space->bottom();
741 HeapWord* const top = space->top();
742 HeapWord* const max_top = MAX2(top, _space_info[id].new_top());
744 const idx_t beg_bit = _mark_bitmap.addr_to_bit(bot);
745 const idx_t end_bit = BitMap::word_align_up(_mark_bitmap.addr_to_bit(top));
746 _mark_bitmap.clear_range(beg_bit, end_bit);
748 const size_t beg_region = _summary_data.addr_to_region_idx(bot);
749 const size_t end_region =
750 _summary_data.addr_to_region_idx(_summary_data.region_align_up(max_top));
751 _summary_data.clear_range(beg_region, end_region);
752 }
754 void PSParallelCompact::pre_compact(PreGCValues* pre_gc_values)
755 {
756 // Update the from & to space pointers in space_info, since they are swapped
757 // at each young gen gc. Do the update unconditionally (even though a
758 // promotion failure does not swap spaces) because an unknown number of minor
759 // collections will have swapped the spaces an unknown number of times.
760 TraceTime tm("pre compact", print_phases(), true, gclog_or_tty);
761 ParallelScavengeHeap* heap = gc_heap();
762 _space_info[from_space_id].set_space(heap->young_gen()->from_space());
763 _space_info[to_space_id].set_space(heap->young_gen()->to_space());
765 pre_gc_values->fill(heap);
767 ParCompactionManager::reset();
768 NOT_PRODUCT(_mark_bitmap.reset_counters());
769 DEBUG_ONLY(add_obj_count = add_obj_size = 0;)
770 DEBUG_ONLY(mark_bitmap_count = mark_bitmap_size = 0;)
772 // Increment the invocation count
773 heap->increment_total_collections(true);
775 // We need to track unique mark sweep invocations as well.
776 _total_invocations++;
778 if (PrintHeapAtGC) {
779 Universe::print_heap_before_gc();
780 }
782 // Fill in TLABs
783 heap->accumulate_statistics_all_tlabs();
784 heap->ensure_parsability(true); // retire TLABs
786 if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
787 HandleMark hm; // Discard invalid handles created during verification
788 gclog_or_tty->print(" VerifyBeforeGC:");
789 Universe::verify(true);
790 }
792 // Verify object start arrays
793 if (VerifyObjectStartArray &&
794 VerifyBeforeGC) {
795 heap->old_gen()->verify_object_start_array();
796 heap->perm_gen()->verify_object_start_array();
797 }
799 DEBUG_ONLY(mark_bitmap()->verify_clear();)
800 DEBUG_ONLY(summary_data().verify_clear();)
802 // Have worker threads release resources the next time they run a task.
803 gc_task_manager()->release_all_resources();
804 }
806 void PSParallelCompact::post_compact()
807 {
808 TraceTime tm("post compact", print_phases(), true, gclog_or_tty);
810 // Clear the marking bitmap and summary data and update top() in each space.
811 for (unsigned int id = perm_space_id; id < last_space_id; ++id) {
812 clear_data_covering_space(SpaceId(id));
813 _space_info[id].space()->set_top(_space_info[id].new_top());
814 }
816 MutableSpace* const eden_space = _space_info[eden_space_id].space();
817 MutableSpace* const from_space = _space_info[from_space_id].space();
818 MutableSpace* const to_space = _space_info[to_space_id].space();
820 ParallelScavengeHeap* heap = gc_heap();
821 bool eden_empty = eden_space->is_empty();
822 if (!eden_empty) {
823 eden_empty = absorb_live_data_from_eden(heap->size_policy(),
824 heap->young_gen(), heap->old_gen());
825 }
827 // Update heap occupancy information which is used as input to the soft ref
828 // clearing policy at the next gc.
829 Universe::update_heap_info_at_gc();
831 bool young_gen_empty = eden_empty && from_space->is_empty() &&
832 to_space->is_empty();
834 BarrierSet* bs = heap->barrier_set();
835 if (bs->is_a(BarrierSet::ModRef)) {
836 ModRefBarrierSet* modBS = (ModRefBarrierSet*)bs;
837 MemRegion old_mr = heap->old_gen()->reserved();
838 MemRegion perm_mr = heap->perm_gen()->reserved();
839 assert(perm_mr.end() <= old_mr.start(), "Generations out of order");
841 if (young_gen_empty) {
842 modBS->clear(MemRegion(perm_mr.start(), old_mr.end()));
843 } else {
844 modBS->invalidate(MemRegion(perm_mr.start(), old_mr.end()));
845 }
846 }
848 Threads::gc_epilogue();
849 CodeCache::gc_epilogue();
851 COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
853 ref_processor()->enqueue_discovered_references(NULL);
855 if (ZapUnusedHeapArea) {
856 heap->gen_mangle_unused_area();
857 }
859 // Update time of last GC
860 reset_millis_since_last_gc();
861 }
863 HeapWord*
864 PSParallelCompact::compute_dense_prefix_via_density(const SpaceId id,
865 bool maximum_compaction)
866 {
867 const size_t region_size = ParallelCompactData::RegionSize;
868 const ParallelCompactData& sd = summary_data();
870 const MutableSpace* const space = _space_info[id].space();
871 HeapWord* const top_aligned_up = sd.region_align_up(space->top());
872 const RegionData* const beg_cp = sd.addr_to_region_ptr(space->bottom());
873 const RegionData* const end_cp = sd.addr_to_region_ptr(top_aligned_up);
875 // Skip full regions at the beginning of the space--they are necessarily part
876 // of the dense prefix.
877 size_t full_count = 0;
878 const RegionData* cp;
879 for (cp = beg_cp; cp < end_cp && cp->data_size() == region_size; ++cp) {
880 ++full_count;
881 }
883 assert(total_invocations() >= _maximum_compaction_gc_num, "sanity");
884 const size_t gcs_since_max = total_invocations() - _maximum_compaction_gc_num;
885 const bool interval_ended = gcs_since_max > HeapMaximumCompactionInterval;
886 if (maximum_compaction || cp == end_cp || interval_ended) {
887 _maximum_compaction_gc_num = total_invocations();
888 return sd.region_to_addr(cp);
889 }
891 HeapWord* const new_top = _space_info[id].new_top();
892 const size_t space_live = pointer_delta(new_top, space->bottom());
893 const size_t space_used = space->used_in_words();
894 const size_t space_capacity = space->capacity_in_words();
896 const double cur_density = double(space_live) / space_capacity;
897 const double deadwood_density =
898 (1.0 - cur_density) * (1.0 - cur_density) * cur_density * cur_density;
899 const size_t deadwood_goal = size_t(space_capacity * deadwood_density);
901 if (TraceParallelOldGCDensePrefix) {
902 tty->print_cr("cur_dens=%5.3f dw_dens=%5.3f dw_goal=" SIZE_FORMAT,
903 cur_density, deadwood_density, deadwood_goal);
904 tty->print_cr("space_live=" SIZE_FORMAT " " "space_used=" SIZE_FORMAT " "
905 "space_cap=" SIZE_FORMAT,
906 space_live, space_used,
907 space_capacity);
908 }
910 // XXX - Use binary search?
911 HeapWord* dense_prefix = sd.region_to_addr(cp);
912 const RegionData* full_cp = cp;
913 const RegionData* const top_cp = sd.addr_to_region_ptr(space->top() - 1);
914 while (cp < end_cp) {
915 HeapWord* region_destination = cp->destination();
916 const size_t cur_deadwood = pointer_delta(dense_prefix, region_destination);
917 if (TraceParallelOldGCDensePrefix && Verbose) {
918 tty->print_cr("c#=" SIZE_FORMAT_W(4) " dst=" PTR_FORMAT " "
919 "dp=" SIZE_FORMAT_W(8) " " "cdw=" SIZE_FORMAT_W(8),
920 sd.region(cp), region_destination,
921 dense_prefix, cur_deadwood);
922 }
924 if (cur_deadwood >= deadwood_goal) {
925 // Found the region that has the correct amount of deadwood to the left.
926 // This typically occurs after crossing a fairly sparse set of regions, so
927 // iterate backwards over those sparse regions, looking for the region
928 // that has the lowest density of live objects 'to the right.'
929 size_t space_to_left = sd.region(cp) * region_size;
930 size_t live_to_left = space_to_left - cur_deadwood;
931 size_t space_to_right = space_capacity - space_to_left;
932 size_t live_to_right = space_live - live_to_left;
933 double density_to_right = double(live_to_right) / space_to_right;
934 while (cp > full_cp) {
935 --cp;
936 const size_t prev_region_live_to_right = live_to_right -
937 cp->data_size();
938 const size_t prev_region_space_to_right = space_to_right + region_size;
939 double prev_region_density_to_right =
940 double(prev_region_live_to_right) / prev_region_space_to_right;
941 if (density_to_right <= prev_region_density_to_right) {
942 return dense_prefix;
943 }
944 if (TraceParallelOldGCDensePrefix && Verbose) {
945 tty->print_cr("backing up from c=" SIZE_FORMAT_W(4) " d2r=%10.8f "
946 "pc_d2r=%10.8f", sd.region(cp), density_to_right,
947 prev_region_density_to_right);
948 }
949 dense_prefix -= region_size;
950 live_to_right = prev_region_live_to_right;
951 space_to_right = prev_region_space_to_right;
952 density_to_right = prev_region_density_to_right;
953 }
954 return dense_prefix;
955 }
957 dense_prefix += region_size;
958 ++cp;
959 }
961 return dense_prefix;
962 }
964 #ifndef PRODUCT
965 void PSParallelCompact::print_dense_prefix_stats(const char* const algorithm,
966 const SpaceId id,
967 const bool maximum_compaction,
968 HeapWord* const addr)
969 {
970 const size_t region_idx = summary_data().addr_to_region_idx(addr);
971 RegionData* const cp = summary_data().region(region_idx);
972 const MutableSpace* const space = _space_info[id].space();
973 HeapWord* const new_top = _space_info[id].new_top();
975 const size_t space_live = pointer_delta(new_top, space->bottom());
976 const size_t dead_to_left = pointer_delta(addr, cp->destination());
977 const size_t space_cap = space->capacity_in_words();
978 const double dead_to_left_pct = double(dead_to_left) / space_cap;
979 const size_t live_to_right = new_top - cp->destination();
980 const size_t dead_to_right = space->top() - addr - live_to_right;
982 tty->print_cr("%s=" PTR_FORMAT " dpc=" SIZE_FORMAT_W(5) " "
983 "spl=" SIZE_FORMAT " "
984 "d2l=" SIZE_FORMAT " d2l%%=%6.4f "
985 "d2r=" SIZE_FORMAT " l2r=" SIZE_FORMAT
986 " ratio=%10.8f",
987 algorithm, addr, region_idx,
988 space_live,
989 dead_to_left, dead_to_left_pct,
990 dead_to_right, live_to_right,
991 double(dead_to_right) / live_to_right);
992 }
993 #endif // #ifndef PRODUCT
995 // Return a fraction indicating how much of the generation can be treated as
996 // "dead wood" (i.e., not reclaimed). The function uses a normal distribution
997 // based on the density of live objects in the generation to determine a limit,
998 // which is then adjusted so the return value is min_percent when the density is
999 // 1.
1000 //
1001 // The following table shows some return values for a different values of the
1002 // standard deviation (ParallelOldDeadWoodLimiterStdDev); the mean is 0.5 and
1003 // min_percent is 1.
1004 //
1005 // fraction allowed as dead wood
1006 // -----------------------------------------------------------------
1007 // density std_dev=70 std_dev=75 std_dev=80 std_dev=85 std_dev=90 std_dev=95
1008 // ------- ---------- ---------- ---------- ---------- ---------- ----------
1009 // 0.00000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000
1010 // 0.05000 0.03193096 0.02836880 0.02550828 0.02319280 0.02130337 0.01974941
1011 // 0.10000 0.05247504 0.04547452 0.03988045 0.03537016 0.03170171 0.02869272
1012 // 0.15000 0.07135702 0.06111390 0.05296419 0.04641639 0.04110601 0.03676066
1013 // 0.20000 0.08831616 0.07509618 0.06461766 0.05622444 0.04943437 0.04388975
1014 // 0.25000 0.10311208 0.08724696 0.07471205 0.06469760 0.05661313 0.05002313
1015 // 0.30000 0.11553050 0.09741183 0.08313394 0.07175114 0.06257797 0.05511132
1016 // 0.35000 0.12538832 0.10545958 0.08978741 0.07731366 0.06727491 0.05911289
1017 // 0.40000 0.13253818 0.11128511 0.09459590 0.08132834 0.07066107 0.06199500
1018 // 0.45000 0.13687208 0.11481163 0.09750361 0.08375387 0.07270534 0.06373386
1019 // 0.50000 0.13832410 0.11599237 0.09847664 0.08456518 0.07338887 0.06431510
1020 // 0.55000 0.13687208 0.11481163 0.09750361 0.08375387 0.07270534 0.06373386
1021 // 0.60000 0.13253818 0.11128511 0.09459590 0.08132834 0.07066107 0.06199500
1022 // 0.65000 0.12538832 0.10545958 0.08978741 0.07731366 0.06727491 0.05911289
1023 // 0.70000 0.11553050 0.09741183 0.08313394 0.07175114 0.06257797 0.05511132
1024 // 0.75000 0.10311208 0.08724696 0.07471205 0.06469760 0.05661313 0.05002313
1025 // 0.80000 0.08831616 0.07509618 0.06461766 0.05622444 0.04943437 0.04388975
1026 // 0.85000 0.07135702 0.06111390 0.05296419 0.04641639 0.04110601 0.03676066
1027 // 0.90000 0.05247504 0.04547452 0.03988045 0.03537016 0.03170171 0.02869272
1028 // 0.95000 0.03193096 0.02836880 0.02550828 0.02319280 0.02130337 0.01974941
1029 // 1.00000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000
1031 double PSParallelCompact::dead_wood_limiter(double density, size_t min_percent)
1032 {
1033 assert(_dwl_initialized, "uninitialized");
1035 // The raw limit is the value of the normal distribution at x = density.
1036 const double raw_limit = normal_distribution(density);
1038 // Adjust the raw limit so it becomes the minimum when the density is 1.
1039 //
1040 // First subtract the adjustment value (which is simply the precomputed value
1041 // normal_distribution(1.0)); this yields a value of 0 when the density is 1.
1042 // Then add the minimum value, so the minimum is returned when the density is
1043 // 1. Finally, prevent negative values, which occur when the mean is not 0.5.
1044 const double min = double(min_percent) / 100.0;
1045 const double limit = raw_limit - _dwl_adjustment + min;
1046 return MAX2(limit, 0.0);
1047 }
1049 ParallelCompactData::RegionData*
1050 PSParallelCompact::first_dead_space_region(const RegionData* beg,
1051 const RegionData* end)
1052 {
1053 const size_t region_size = ParallelCompactData::RegionSize;
1054 ParallelCompactData& sd = summary_data();
1055 size_t left = sd.region(beg);
1056 size_t right = end > beg ? sd.region(end) - 1 : left;
1058 // Binary search.
1059 while (left < right) {
1060 // Equivalent to (left + right) / 2, but does not overflow.
1061 const size_t middle = left + (right - left) / 2;
1062 RegionData* const middle_ptr = sd.region(middle);
1063 HeapWord* const dest = middle_ptr->destination();
1064 HeapWord* const addr = sd.region_to_addr(middle);
1065 assert(dest != NULL, "sanity");
1066 assert(dest <= addr, "must move left");
1068 if (middle > left && dest < addr) {
1069 right = middle - 1;
1070 } else if (middle < right && middle_ptr->data_size() == region_size) {
1071 left = middle + 1;
1072 } else {
1073 return middle_ptr;
1074 }
1075 }
1076 return sd.region(left);
1077 }
1079 ParallelCompactData::RegionData*
1080 PSParallelCompact::dead_wood_limit_region(const RegionData* beg,
1081 const RegionData* end,
1082 size_t dead_words)
1083 {
1084 ParallelCompactData& sd = summary_data();
1085 size_t left = sd.region(beg);
1086 size_t right = end > beg ? sd.region(end) - 1 : left;
1088 // Binary search.
1089 while (left < right) {
1090 // Equivalent to (left + right) / 2, but does not overflow.
1091 const size_t middle = left + (right - left) / 2;
1092 RegionData* const middle_ptr = sd.region(middle);
1093 HeapWord* const dest = middle_ptr->destination();
1094 HeapWord* const addr = sd.region_to_addr(middle);
1095 assert(dest != NULL, "sanity");
1096 assert(dest <= addr, "must move left");
1098 const size_t dead_to_left = pointer_delta(addr, dest);
1099 if (middle > left && dead_to_left > dead_words) {
1100 right = middle - 1;
1101 } else if (middle < right && dead_to_left < dead_words) {
1102 left = middle + 1;
1103 } else {
1104 return middle_ptr;
1105 }
1106 }
1107 return sd.region(left);
1108 }
1110 // The result is valid during the summary phase, after the initial summarization
1111 // of each space into itself, and before final summarization.
1112 inline double
1113 PSParallelCompact::reclaimed_ratio(const RegionData* const cp,
1114 HeapWord* const bottom,
1115 HeapWord* const top,
1116 HeapWord* const new_top)
1117 {
1118 ParallelCompactData& sd = summary_data();
1120 assert(cp != NULL, "sanity");
1121 assert(bottom != NULL, "sanity");
1122 assert(top != NULL, "sanity");
1123 assert(new_top != NULL, "sanity");
1124 assert(top >= new_top, "summary data problem?");
1125 assert(new_top > bottom, "space is empty; should not be here");
1126 assert(new_top >= cp->destination(), "sanity");
1127 assert(top >= sd.region_to_addr(cp), "sanity");
1129 HeapWord* const destination = cp->destination();
1130 const size_t dense_prefix_live = pointer_delta(destination, bottom);
1131 const size_t compacted_region_live = pointer_delta(new_top, destination);
1132 const size_t compacted_region_used = pointer_delta(top,
1133 sd.region_to_addr(cp));
1134 const size_t reclaimable = compacted_region_used - compacted_region_live;
1136 const double divisor = dense_prefix_live + 1.25 * compacted_region_live;
1137 return double(reclaimable) / divisor;
1138 }
1140 // Return the address of the end of the dense prefix, a.k.a. the start of the
1141 // compacted region. The address is always on a region boundary.
1142 //
1143 // Completely full regions at the left are skipped, since no compaction can
1144 // occur in those regions. Then the maximum amount of dead wood to allow is
1145 // computed, based on the density (amount live / capacity) of the generation;
1146 // the region with approximately that amount of dead space to the left is
1147 // identified as the limit region. Regions between the last completely full
1148 // region and the limit region are scanned and the one that has the best
1149 // (maximum) reclaimed_ratio() is selected.
1150 HeapWord*
1151 PSParallelCompact::compute_dense_prefix(const SpaceId id,
1152 bool maximum_compaction)
1153 {
1154 const size_t region_size = ParallelCompactData::RegionSize;
1155 const ParallelCompactData& sd = summary_data();
1157 const MutableSpace* const space = _space_info[id].space();
1158 HeapWord* const top = space->top();
1159 HeapWord* const top_aligned_up = sd.region_align_up(top);
1160 HeapWord* const new_top = _space_info[id].new_top();
1161 HeapWord* const new_top_aligned_up = sd.region_align_up(new_top);
1162 HeapWord* const bottom = space->bottom();
1163 const RegionData* const beg_cp = sd.addr_to_region_ptr(bottom);
1164 const RegionData* const top_cp = sd.addr_to_region_ptr(top_aligned_up);
1165 const RegionData* const new_top_cp =
1166 sd.addr_to_region_ptr(new_top_aligned_up);
1168 // Skip full regions at the beginning of the space--they are necessarily part
1169 // of the dense prefix.
1170 const RegionData* const full_cp = first_dead_space_region(beg_cp, new_top_cp);
1171 assert(full_cp->destination() == sd.region_to_addr(full_cp) ||
1172 space->is_empty(), "no dead space allowed to the left");
1173 assert(full_cp->data_size() < region_size || full_cp == new_top_cp - 1,
1174 "region must have dead space");
1176 // The gc number is saved whenever a maximum compaction is done, and used to
1177 // determine when the maximum compaction interval has expired. This avoids
1178 // successive max compactions for different reasons.
1179 assert(total_invocations() >= _maximum_compaction_gc_num, "sanity");
1180 const size_t gcs_since_max = total_invocations() - _maximum_compaction_gc_num;
1181 const bool interval_ended = gcs_since_max > HeapMaximumCompactionInterval ||
1182 total_invocations() == HeapFirstMaximumCompactionCount;
1183 if (maximum_compaction || full_cp == top_cp || interval_ended) {
1184 _maximum_compaction_gc_num = total_invocations();
1185 return sd.region_to_addr(full_cp);
1186 }
1188 const size_t space_live = pointer_delta(new_top, bottom);
1189 const size_t space_used = space->used_in_words();
1190 const size_t space_capacity = space->capacity_in_words();
1192 const double density = double(space_live) / double(space_capacity);
1193 const size_t min_percent_free =
1194 id == perm_space_id ? PermMarkSweepDeadRatio : MarkSweepDeadRatio;
1195 const double limiter = dead_wood_limiter(density, min_percent_free);
1196 const size_t dead_wood_max = space_used - space_live;
1197 const size_t dead_wood_limit = MIN2(size_t(space_capacity * limiter),
1198 dead_wood_max);
1200 if (TraceParallelOldGCDensePrefix) {
1201 tty->print_cr("space_live=" SIZE_FORMAT " " "space_used=" SIZE_FORMAT " "
1202 "space_cap=" SIZE_FORMAT,
1203 space_live, space_used,
1204 space_capacity);
1205 tty->print_cr("dead_wood_limiter(%6.4f, %d)=%6.4f "
1206 "dead_wood_max=" SIZE_FORMAT " dead_wood_limit=" SIZE_FORMAT,
1207 density, min_percent_free, limiter,
1208 dead_wood_max, dead_wood_limit);
1209 }
1211 // Locate the region with the desired amount of dead space to the left.
1212 const RegionData* const limit_cp =
1213 dead_wood_limit_region(full_cp, top_cp, dead_wood_limit);
1215 // Scan from the first region with dead space to the limit region and find the
1216 // one with the best (largest) reclaimed ratio.
1217 double best_ratio = 0.0;
1218 const RegionData* best_cp = full_cp;
1219 for (const RegionData* cp = full_cp; cp < limit_cp; ++cp) {
1220 double tmp_ratio = reclaimed_ratio(cp, bottom, top, new_top);
1221 if (tmp_ratio > best_ratio) {
1222 best_cp = cp;
1223 best_ratio = tmp_ratio;
1224 }
1225 }
1227 #if 0
1228 // Something to consider: if the region with the best ratio is 'close to' the
1229 // first region w/free space, choose the first region with free space
1230 // ("first-free"). The first-free region is usually near the start of the
1231 // heap, which means we are copying most of the heap already, so copy a bit
1232 // more to get complete compaction.
1233 if (pointer_delta(best_cp, full_cp, sizeof(RegionData)) < 4) {
1234 _maximum_compaction_gc_num = total_invocations();
1235 best_cp = full_cp;
1236 }
1237 #endif // #if 0
1239 return sd.region_to_addr(best_cp);
1240 }
1242 void PSParallelCompact::summarize_spaces_quick()
1243 {
1244 for (unsigned int i = 0; i < last_space_id; ++i) {
1245 const MutableSpace* space = _space_info[i].space();
1246 bool result = _summary_data.summarize(space->bottom(), space->end(),
1247 space->bottom(), space->top(),
1248 _space_info[i].new_top_addr());
1249 assert(result, "should never fail");
1250 _space_info[i].set_dense_prefix(space->bottom());
1251 }
1252 }
1254 void PSParallelCompact::fill_dense_prefix_end(SpaceId id)
1255 {
1256 HeapWord* const dense_prefix_end = dense_prefix(id);
1257 const RegionData* region = _summary_data.addr_to_region_ptr(dense_prefix_end);
1258 const idx_t dense_prefix_bit = _mark_bitmap.addr_to_bit(dense_prefix_end);
1259 if (dead_space_crosses_boundary(region, dense_prefix_bit)) {
1260 // Only enough dead space is filled so that any remaining dead space to the
1261 // left is larger than the minimum filler object. (The remainder is filled
1262 // during the copy/update phase.)
1263 //
1264 // The size of the dead space to the right of the boundary is not a
1265 // concern, since compaction will be able to use whatever space is
1266 // available.
1267 //
1268 // Here '||' is the boundary, 'x' represents a don't care bit and a box
1269 // surrounds the space to be filled with an object.
1270 //
1271 // In the 32-bit VM, each bit represents two 32-bit words:
1272 // +---+
1273 // a) beg_bits: ... x x x | 0 | || 0 x x ...
1274 // end_bits: ... x x x | 0 | || 0 x x ...
1275 // +---+
1276 //
1277 // In the 64-bit VM, each bit represents one 64-bit word:
1278 // +------------+
1279 // b) beg_bits: ... x x x | 0 || 0 | x x ...
1280 // end_bits: ... x x 1 | 0 || 0 | x x ...
1281 // +------------+
1282 // +-------+
1283 // c) beg_bits: ... x x | 0 0 | || 0 x x ...
1284 // end_bits: ... x 1 | 0 0 | || 0 x x ...
1285 // +-------+
1286 // +-----------+
1287 // d) beg_bits: ... x | 0 0 0 | || 0 x x ...
1288 // end_bits: ... 1 | 0 0 0 | || 0 x x ...
1289 // +-----------+
1290 // +-------+
1291 // e) beg_bits: ... 0 0 | 0 0 | || 0 x x ...
1292 // end_bits: ... 0 0 | 0 0 | || 0 x x ...
1293 // +-------+
1295 // Initially assume case a, c or e will apply.
1296 size_t obj_len = (size_t)oopDesc::header_size();
1297 HeapWord* obj_beg = dense_prefix_end - obj_len;
1299 #ifdef _LP64
1300 if (_mark_bitmap.is_obj_end(dense_prefix_bit - 2)) {
1301 // Case b above.
1302 obj_beg = dense_prefix_end - 1;
1303 } else if (!_mark_bitmap.is_obj_end(dense_prefix_bit - 3) &&
1304 _mark_bitmap.is_obj_end(dense_prefix_bit - 4)) {
1305 // Case d above.
1306 obj_beg = dense_prefix_end - 3;
1307 obj_len = 3;
1308 }
1309 #endif // #ifdef _LP64
1311 MemRegion region(obj_beg, obj_len);
1312 SharedHeap::fill_region_with_object(region);
1313 _mark_bitmap.mark_obj(obj_beg, obj_len);
1314 _summary_data.add_obj(obj_beg, obj_len);
1315 assert(start_array(id) != NULL, "sanity");
1316 start_array(id)->allocate_block(obj_beg);
1317 }
1318 }
1320 void
1321 PSParallelCompact::summarize_space(SpaceId id, bool maximum_compaction)
1322 {
1323 assert(id < last_space_id, "id out of range");
1324 assert(_space_info[id].dense_prefix() == _space_info[id].space()->bottom(),
1325 "should have been set in summarize_spaces_quick()");
1327 const MutableSpace* space = _space_info[id].space();
1328 if (_space_info[id].new_top() != space->bottom()) {
1329 HeapWord* dense_prefix_end = compute_dense_prefix(id, maximum_compaction);
1330 _space_info[id].set_dense_prefix(dense_prefix_end);
1332 #ifndef PRODUCT
1333 if (TraceParallelOldGCDensePrefix) {
1334 print_dense_prefix_stats("ratio", id, maximum_compaction,
1335 dense_prefix_end);
1336 HeapWord* addr = compute_dense_prefix_via_density(id, maximum_compaction);
1337 print_dense_prefix_stats("density", id, maximum_compaction, addr);
1338 }
1339 #endif // #ifndef PRODUCT
1341 // If dead space crosses the dense prefix boundary, it is (at least
1342 // partially) filled with a dummy object, marked live and added to the
1343 // summary data. This simplifies the copy/update phase and must be done
1344 // before the final locations of objects are determined, to prevent leaving
1345 // a fragment of dead space that is too small to fill with an object.
1346 if (!maximum_compaction && dense_prefix_end != space->bottom()) {
1347 fill_dense_prefix_end(id);
1348 }
1350 // Compute the destination of each Region, and thus each object.
1351 _summary_data.summarize_dense_prefix(space->bottom(), dense_prefix_end);
1352 _summary_data.summarize(dense_prefix_end, space->end(),
1353 dense_prefix_end, space->top(),
1354 _space_info[id].new_top_addr());
1355 }
1357 if (TraceParallelOldGCSummaryPhase) {
1358 const size_t region_size = ParallelCompactData::RegionSize;
1359 HeapWord* const dense_prefix_end = _space_info[id].dense_prefix();
1360 const size_t dp_region = _summary_data.addr_to_region_idx(dense_prefix_end);
1361 const size_t dp_words = pointer_delta(dense_prefix_end, space->bottom());
1362 HeapWord* const new_top = _space_info[id].new_top();
1363 const HeapWord* nt_aligned_up = _summary_data.region_align_up(new_top);
1364 const size_t cr_words = pointer_delta(nt_aligned_up, dense_prefix_end);
1365 tty->print_cr("id=%d cap=" SIZE_FORMAT " dp=" PTR_FORMAT " "
1366 "dp_region=" SIZE_FORMAT " " "dp_count=" SIZE_FORMAT " "
1367 "cr_count=" SIZE_FORMAT " " "nt=" PTR_FORMAT,
1368 id, space->capacity_in_words(), dense_prefix_end,
1369 dp_region, dp_words / region_size,
1370 cr_words / region_size, new_top);
1371 }
1372 }
1374 void PSParallelCompact::summary_phase(ParCompactionManager* cm,
1375 bool maximum_compaction)
1376 {
1377 EventMark m("2 summarize");
1378 TraceTime tm("summary phase", print_phases(), true, gclog_or_tty);
1379 // trace("2");
1381 #ifdef ASSERT
1382 if (TraceParallelOldGCMarkingPhase) {
1383 tty->print_cr("add_obj_count=" SIZE_FORMAT " "
1384 "add_obj_bytes=" SIZE_FORMAT,
1385 add_obj_count, add_obj_size * HeapWordSize);
1386 tty->print_cr("mark_bitmap_count=" SIZE_FORMAT " "
1387 "mark_bitmap_bytes=" SIZE_FORMAT,
1388 mark_bitmap_count, mark_bitmap_size * HeapWordSize);
1389 }
1390 #endif // #ifdef ASSERT
1392 // Quick summarization of each space into itself, to see how much is live.
1393 summarize_spaces_quick();
1395 if (TraceParallelOldGCSummaryPhase) {
1396 tty->print_cr("summary_phase: after summarizing each space to self");
1397 Universe::print();
1398 NOT_PRODUCT(print_region_ranges());
1399 if (Verbose) {
1400 NOT_PRODUCT(print_initial_summary_data(_summary_data, _space_info));
1401 }
1402 }
1404 // The amount of live data that will end up in old space (assuming it fits).
1405 size_t old_space_total_live = 0;
1406 unsigned int id;
1407 for (id = old_space_id; id < last_space_id; ++id) {
1408 old_space_total_live += pointer_delta(_space_info[id].new_top(),
1409 _space_info[id].space()->bottom());
1410 }
1412 const MutableSpace* old_space = _space_info[old_space_id].space();
1413 if (old_space_total_live > old_space->capacity_in_words()) {
1414 // XXX - should also try to expand
1415 maximum_compaction = true;
1416 } else if (!UseParallelOldGCDensePrefix) {
1417 maximum_compaction = true;
1418 }
1420 // Permanent and Old generations.
1421 summarize_space(perm_space_id, maximum_compaction);
1422 summarize_space(old_space_id, maximum_compaction);
1424 // Summarize the remaining spaces (those in the young gen) into old space. If
1425 // the live data from a space doesn't fit, the existing summarization is left
1426 // intact, so the data is compacted down within the space itself.
1427 HeapWord** new_top_addr = _space_info[old_space_id].new_top_addr();
1428 HeapWord* const target_space_end = old_space->end();
1429 for (id = eden_space_id; id < last_space_id; ++id) {
1430 const MutableSpace* space = _space_info[id].space();
1431 const size_t live = pointer_delta(_space_info[id].new_top(),
1432 space->bottom());
1433 const size_t available = pointer_delta(target_space_end, *new_top_addr);
1434 if (live > 0 && live <= available) {
1435 // All the live data will fit.
1436 if (TraceParallelOldGCSummaryPhase) {
1437 tty->print_cr("summarizing %d into old_space @ " PTR_FORMAT,
1438 id, *new_top_addr);
1439 }
1440 _summary_data.summarize(*new_top_addr, target_space_end,
1441 space->bottom(), space->top(),
1442 new_top_addr);
1444 // Clear the source_region field for each region in the space.
1445 HeapWord* const new_top = _space_info[id].new_top();
1446 HeapWord* const clear_end = _summary_data.region_align_up(new_top);
1447 RegionData* beg_region =
1448 _summary_data.addr_to_region_ptr(space->bottom());
1449 RegionData* end_region = _summary_data.addr_to_region_ptr(clear_end);
1450 while (beg_region < end_region) {
1451 beg_region->set_source_region(0);
1452 ++beg_region;
1453 }
1455 // Reset the new_top value for the space.
1456 _space_info[id].set_new_top(space->bottom());
1457 }
1458 }
1460 if (TraceParallelOldGCSummaryPhase) {
1461 tty->print_cr("summary_phase: after final summarization");
1462 Universe::print();
1463 NOT_PRODUCT(print_region_ranges());
1464 if (Verbose) {
1465 NOT_PRODUCT(print_generic_summary_data(_summary_data, _space_info));
1466 }
1467 }
1468 }
1470 // This method should contain all heap-specific policy for invoking a full
1471 // collection. invoke_no_policy() will only attempt to compact the heap; it
1472 // will do nothing further. If we need to bail out for policy reasons, scavenge
1473 // before full gc, or any other specialized behavior, it needs to be added here.
1474 //
1475 // Note that this method should only be called from the vm_thread while at a
1476 // safepoint.
1477 void PSParallelCompact::invoke(bool maximum_heap_compaction) {
1478 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
1479 assert(Thread::current() == (Thread*)VMThread::vm_thread(),
1480 "should be in vm thread");
1481 ParallelScavengeHeap* heap = gc_heap();
1482 GCCause::Cause gc_cause = heap->gc_cause();
1483 assert(!heap->is_gc_active(), "not reentrant");
1485 PSAdaptiveSizePolicy* policy = heap->size_policy();
1487 // Before each allocation/collection attempt, find out from the
1488 // policy object if GCs are, on the whole, taking too long. If so,
1489 // bail out without attempting a collection. The exceptions are
1490 // for explicitly requested GC's.
1491 if (!policy->gc_time_limit_exceeded() ||
1492 GCCause::is_user_requested_gc(gc_cause) ||
1493 GCCause::is_serviceability_requested_gc(gc_cause)) {
1494 IsGCActiveMark mark;
1496 if (ScavengeBeforeFullGC) {
1497 PSScavenge::invoke_no_policy();
1498 }
1500 PSParallelCompact::invoke_no_policy(maximum_heap_compaction);
1501 }
1502 }
1504 bool ParallelCompactData::region_contains(size_t region_index, HeapWord* addr) {
1505 size_t addr_region_index = addr_to_region_idx(addr);
1506 return region_index == addr_region_index;
1507 }
1509 // This method contains no policy. You should probably
1510 // be calling invoke() instead.
1511 void PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
1512 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
1513 assert(ref_processor() != NULL, "Sanity");
1515 if (GC_locker::check_active_before_gc()) {
1516 return;
1517 }
1519 TimeStamp marking_start;
1520 TimeStamp compaction_start;
1521 TimeStamp collection_exit;
1523 ParallelScavengeHeap* heap = gc_heap();
1524 GCCause::Cause gc_cause = heap->gc_cause();
1525 PSYoungGen* young_gen = heap->young_gen();
1526 PSOldGen* old_gen = heap->old_gen();
1527 PSPermGen* perm_gen = heap->perm_gen();
1528 PSAdaptiveSizePolicy* size_policy = heap->size_policy();
1530 if (ZapUnusedHeapArea) {
1531 // Save information needed to minimize mangling
1532 heap->record_gen_tops_before_GC();
1533 }
1535 _print_phases = PrintGCDetails && PrintParallelOldGCPhaseTimes;
1537 // Make sure data structures are sane, make the heap parsable, and do other
1538 // miscellaneous bookkeeping.
1539 PreGCValues pre_gc_values;
1540 pre_compact(&pre_gc_values);
1542 // Get the compaction manager reserved for the VM thread.
1543 ParCompactionManager* const vmthread_cm =
1544 ParCompactionManager::manager_array(gc_task_manager()->workers());
1546 // Place after pre_compact() where the number of invocations is incremented.
1547 AdaptiveSizePolicyOutput(size_policy, heap->total_collections());
1549 {
1550 ResourceMark rm;
1551 HandleMark hm;
1553 const bool is_system_gc = gc_cause == GCCause::_java_lang_system_gc;
1555 // This is useful for debugging but don't change the output the
1556 // the customer sees.
1557 const char* gc_cause_str = "Full GC";
1558 if (is_system_gc && PrintGCDetails) {
1559 gc_cause_str = "Full GC (System)";
1560 }
1561 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
1562 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
1563 TraceTime t1(gc_cause_str, PrintGC, !PrintGCDetails, gclog_or_tty);
1564 TraceCollectorStats tcs(counters());
1565 TraceMemoryManagerStats tms(true /* Full GC */);
1567 if (TraceGen1Time) accumulated_time()->start();
1569 // Let the size policy know we're starting
1570 size_policy->major_collection_begin();
1572 // When collecting the permanent generation methodOops may be moving,
1573 // so we either have to flush all bcp data or convert it into bci.
1574 CodeCache::gc_prologue();
1575 Threads::gc_prologue();
1577 NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
1578 COMPILER2_PRESENT(DerivedPointerTable::clear());
1580 ref_processor()->enable_discovery();
1582 bool marked_for_unloading = false;
1584 marking_start.update();
1585 marking_phase(vmthread_cm, maximum_heap_compaction);
1587 #ifndef PRODUCT
1588 if (TraceParallelOldGCMarkingPhase) {
1589 gclog_or_tty->print_cr("marking_phase: cas_tries %d cas_retries %d "
1590 "cas_by_another %d",
1591 mark_bitmap()->cas_tries(), mark_bitmap()->cas_retries(),
1592 mark_bitmap()->cas_by_another());
1593 }
1594 #endif // #ifndef PRODUCT
1596 bool max_on_system_gc = UseMaximumCompactionOnSystemGC && is_system_gc;
1597 summary_phase(vmthread_cm, maximum_heap_compaction || max_on_system_gc);
1599 COMPILER2_PRESENT(assert(DerivedPointerTable::is_active(), "Sanity"));
1600 COMPILER2_PRESENT(DerivedPointerTable::set_active(false));
1602 // adjust_roots() updates Universe::_intArrayKlassObj which is
1603 // needed by the compaction for filling holes in the dense prefix.
1604 adjust_roots();
1606 compaction_start.update();
1607 // Does the perm gen always have to be done serially because
1608 // klasses are used in the update of an object?
1609 compact_perm(vmthread_cm);
1611 if (UseParallelOldGCCompacting) {
1612 compact();
1613 } else {
1614 compact_serial(vmthread_cm);
1615 }
1617 // Reset the mark bitmap, summary data, and do other bookkeeping. Must be
1618 // done before resizing.
1619 post_compact();
1621 // Let the size policy know we're done
1622 size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause);
1624 if (UseAdaptiveSizePolicy) {
1625 if (PrintAdaptiveSizePolicy) {
1626 gclog_or_tty->print("AdaptiveSizeStart: ");
1627 gclog_or_tty->stamp();
1628 gclog_or_tty->print_cr(" collection: %d ",
1629 heap->total_collections());
1630 if (Verbose) {
1631 gclog_or_tty->print("old_gen_capacity: %d young_gen_capacity: %d"
1632 " perm_gen_capacity: %d ",
1633 old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes(),
1634 perm_gen->capacity_in_bytes());
1635 }
1636 }
1638 // Don't check if the size_policy is ready here. Let
1639 // the size_policy check that internally.
1640 if (UseAdaptiveGenerationSizePolicyAtMajorCollection &&
1641 ((gc_cause != GCCause::_java_lang_system_gc) ||
1642 UseAdaptiveSizePolicyWithSystemGC)) {
1643 // Calculate optimal free space amounts
1644 assert(young_gen->max_size() >
1645 young_gen->from_space()->capacity_in_bytes() +
1646 young_gen->to_space()->capacity_in_bytes(),
1647 "Sizes of space in young gen are out-of-bounds");
1648 size_t max_eden_size = young_gen->max_size() -
1649 young_gen->from_space()->capacity_in_bytes() -
1650 young_gen->to_space()->capacity_in_bytes();
1651 size_policy->compute_generation_free_space(
1652 young_gen->used_in_bytes(),
1653 young_gen->eden_space()->used_in_bytes(),
1654 old_gen->used_in_bytes(),
1655 perm_gen->used_in_bytes(),
1656 young_gen->eden_space()->capacity_in_bytes(),
1657 old_gen->max_gen_size(),
1658 max_eden_size,
1659 true /* full gc*/,
1660 gc_cause);
1662 heap->resize_old_gen(
1663 size_policy->calculated_old_free_size_in_bytes());
1665 // Don't resize the young generation at an major collection. A
1666 // desired young generation size may have been calculated but
1667 // resizing the young generation complicates the code because the
1668 // resizing of the old generation may have moved the boundary
1669 // between the young generation and the old generation. Let the
1670 // young generation resizing happen at the minor collections.
1671 }
1672 if (PrintAdaptiveSizePolicy) {
1673 gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ",
1674 heap->total_collections());
1675 }
1676 }
1678 if (UsePerfData) {
1679 PSGCAdaptivePolicyCounters* const counters = heap->gc_policy_counters();
1680 counters->update_counters();
1681 counters->update_old_capacity(old_gen->capacity_in_bytes());
1682 counters->update_young_capacity(young_gen->capacity_in_bytes());
1683 }
1685 heap->resize_all_tlabs();
1687 // We collected the perm gen, so we'll resize it here.
1688 perm_gen->compute_new_size(pre_gc_values.perm_gen_used());
1690 if (TraceGen1Time) accumulated_time()->stop();
1692 if (PrintGC) {
1693 if (PrintGCDetails) {
1694 // No GC timestamp here. This is after GC so it would be confusing.
1695 young_gen->print_used_change(pre_gc_values.young_gen_used());
1696 old_gen->print_used_change(pre_gc_values.old_gen_used());
1697 heap->print_heap_change(pre_gc_values.heap_used());
1698 // Print perm gen last (print_heap_change() excludes the perm gen).
1699 perm_gen->print_used_change(pre_gc_values.perm_gen_used());
1700 } else {
1701 heap->print_heap_change(pre_gc_values.heap_used());
1702 }
1703 }
1705 // Track memory usage and detect low memory
1706 MemoryService::track_memory_usage();
1707 heap->update_counters();
1709 if (PrintGCDetails) {
1710 if (size_policy->print_gc_time_limit_would_be_exceeded()) {
1711 if (size_policy->gc_time_limit_exceeded()) {
1712 gclog_or_tty->print_cr(" GC time is exceeding GCTimeLimit "
1713 "of %d%%", GCTimeLimit);
1714 } else {
1715 gclog_or_tty->print_cr(" GC time would exceed GCTimeLimit "
1716 "of %d%%", GCTimeLimit);
1717 }
1718 }
1719 size_policy->set_print_gc_time_limit_would_be_exceeded(false);
1720 }
1721 }
1723 if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
1724 HandleMark hm; // Discard invalid handles created during verification
1725 gclog_or_tty->print(" VerifyAfterGC:");
1726 Universe::verify(false);
1727 }
1729 // Re-verify object start arrays
1730 if (VerifyObjectStartArray &&
1731 VerifyAfterGC) {
1732 old_gen->verify_object_start_array();
1733 perm_gen->verify_object_start_array();
1734 }
1736 if (ZapUnusedHeapArea) {
1737 old_gen->object_space()->check_mangled_unused_area_complete();
1738 perm_gen->object_space()->check_mangled_unused_area_complete();
1739 }
1741 NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
1743 collection_exit.update();
1745 if (PrintHeapAtGC) {
1746 Universe::print_heap_after_gc();
1747 }
1748 if (PrintGCTaskTimeStamps) {
1749 gclog_or_tty->print_cr("VM-Thread " INT64_FORMAT " " INT64_FORMAT " "
1750 INT64_FORMAT,
1751 marking_start.ticks(), compaction_start.ticks(),
1752 collection_exit.ticks());
1753 gc_task_manager()->print_task_time_stamps();
1754 }
1755 }
1757 bool PSParallelCompact::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
1758 PSYoungGen* young_gen,
1759 PSOldGen* old_gen) {
1760 MutableSpace* const eden_space = young_gen->eden_space();
1761 assert(!eden_space->is_empty(), "eden must be non-empty");
1762 assert(young_gen->virtual_space()->alignment() ==
1763 old_gen->virtual_space()->alignment(), "alignments do not match");
1765 if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary)) {
1766 return false;
1767 }
1769 // Both generations must be completely committed.
1770 if (young_gen->virtual_space()->uncommitted_size() != 0) {
1771 return false;
1772 }
1773 if (old_gen->virtual_space()->uncommitted_size() != 0) {
1774 return false;
1775 }
1777 // Figure out how much to take from eden. Include the average amount promoted
1778 // in the total; otherwise the next young gen GC will simply bail out to a
1779 // full GC.
1780 const size_t alignment = old_gen->virtual_space()->alignment();
1781 const size_t eden_used = eden_space->used_in_bytes();
1782 const size_t promoted = (size_t)size_policy->avg_promoted()->padded_average();
1783 const size_t absorb_size = align_size_up(eden_used + promoted, alignment);
1784 const size_t eden_capacity = eden_space->capacity_in_bytes();
1786 if (absorb_size >= eden_capacity) {
1787 return false; // Must leave some space in eden.
1788 }
1790 const size_t new_young_size = young_gen->capacity_in_bytes() - absorb_size;
1791 if (new_young_size < young_gen->min_gen_size()) {
1792 return false; // Respect young gen minimum size.
1793 }
1795 if (TraceAdaptiveGCBoundary && Verbose) {
1796 gclog_or_tty->print(" absorbing " SIZE_FORMAT "K: "
1797 "eden " SIZE_FORMAT "K->" SIZE_FORMAT "K "
1798 "from " SIZE_FORMAT "K, to " SIZE_FORMAT "K "
1799 "young_gen " SIZE_FORMAT "K->" SIZE_FORMAT "K ",
1800 absorb_size / K,
1801 eden_capacity / K, (eden_capacity - absorb_size) / K,
1802 young_gen->from_space()->used_in_bytes() / K,
1803 young_gen->to_space()->used_in_bytes() / K,
1804 young_gen->capacity_in_bytes() / K, new_young_size / K);
1805 }
1807 // Fill the unused part of the old gen.
1808 MutableSpace* const old_space = old_gen->object_space();
1809 MemRegion old_gen_unused(old_space->top(), old_space->end());
1810 if (!old_gen_unused.is_empty()) {
1811 SharedHeap::fill_region_with_object(old_gen_unused);
1812 }
1814 // Take the live data from eden and set both top and end in the old gen to
1815 // eden top. (Need to set end because reset_after_change() mangles the region
1816 // from end to virtual_space->high() in debug builds).
1817 HeapWord* const new_top = eden_space->top();
1818 old_gen->virtual_space()->expand_into(young_gen->virtual_space(),
1819 absorb_size);
1820 young_gen->reset_after_change();
1821 old_space->set_top(new_top);
1822 old_space->set_end(new_top);
1823 old_gen->reset_after_change();
1825 // Update the object start array for the filler object and the data from eden.
1826 ObjectStartArray* const start_array = old_gen->start_array();
1827 HeapWord* const start = old_gen_unused.start();
1828 for (HeapWord* addr = start; addr < new_top; addr += oop(addr)->size()) {
1829 start_array->allocate_block(addr);
1830 }
1832 // Could update the promoted average here, but it is not typically updated at
1833 // full GCs and the value to use is unclear. Something like
1834 //
1835 // cur_promoted_avg + absorb_size / number_of_scavenges_since_last_full_gc.
1837 size_policy->set_bytes_absorbed_from_eden(absorb_size);
1838 return true;
1839 }
1841 GCTaskManager* const PSParallelCompact::gc_task_manager() {
1842 assert(ParallelScavengeHeap::gc_task_manager() != NULL,
1843 "shouldn't return NULL");
1844 return ParallelScavengeHeap::gc_task_manager();
1845 }
1847 void PSParallelCompact::marking_phase(ParCompactionManager* cm,
1848 bool maximum_heap_compaction) {
1849 // Recursively traverse all live objects and mark them
1850 EventMark m("1 mark object");
1851 TraceTime tm("marking phase", print_phases(), true, gclog_or_tty);
1853 ParallelScavengeHeap* heap = gc_heap();
1854 uint parallel_gc_threads = heap->gc_task_manager()->workers();
1855 TaskQueueSetSuper* qset = ParCompactionManager::region_array();
1856 ParallelTaskTerminator terminator(parallel_gc_threads, qset);
1858 PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
1859 PSParallelCompact::FollowStackClosure follow_stack_closure(cm);
1861 {
1862 TraceTime tm_m("par mark", print_phases(), true, gclog_or_tty);
1864 GCTaskQueue* q = GCTaskQueue::create();
1866 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::universe));
1867 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::jni_handles));
1868 // We scan the thread roots in parallel
1869 Threads::create_thread_roots_marking_tasks(q);
1870 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::object_synchronizer));
1871 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::flat_profiler));
1872 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::management));
1873 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::system_dictionary));
1874 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::jvmti));
1875 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::vm_symbols));
1877 if (parallel_gc_threads > 1) {
1878 for (uint j = 0; j < parallel_gc_threads; j++) {
1879 q->enqueue(new StealMarkingTask(&terminator));
1880 }
1881 }
1883 WaitForBarrierGCTask* fin = WaitForBarrierGCTask::create();
1884 q->enqueue(fin);
1886 gc_task_manager()->add_list(q);
1888 fin->wait_for();
1890 // We have to release the barrier tasks!
1891 WaitForBarrierGCTask::destroy(fin);
1892 }
1894 // Process reference objects found during marking
1895 {
1896 TraceTime tm_r("reference processing", print_phases(), true, gclog_or_tty);
1897 ReferencePolicy *soft_ref_policy;
1898 if (maximum_heap_compaction) {
1899 soft_ref_policy = new AlwaysClearPolicy();
1900 } else {
1901 #ifdef COMPILER2
1902 soft_ref_policy = new LRUMaxHeapPolicy();
1903 #else
1904 soft_ref_policy = new LRUCurrentHeapPolicy();
1905 #endif // COMPILER2
1906 }
1907 assert(soft_ref_policy != NULL, "No soft reference policy");
1908 if (ref_processor()->processing_is_mt()) {
1909 RefProcTaskExecutor task_executor;
1910 ref_processor()->process_discovered_references(
1911 soft_ref_policy, is_alive_closure(), &mark_and_push_closure,
1912 &follow_stack_closure, &task_executor);
1913 } else {
1914 ref_processor()->process_discovered_references(
1915 soft_ref_policy, is_alive_closure(), &mark_and_push_closure,
1916 &follow_stack_closure, NULL);
1917 }
1918 }
1920 TraceTime tm_c("class unloading", print_phases(), true, gclog_or_tty);
1921 // Follow system dictionary roots and unload classes.
1922 bool purged_class = SystemDictionary::do_unloading(is_alive_closure());
1924 // Follow code cache roots.
1925 CodeCache::do_unloading(is_alive_closure(), &mark_and_push_closure,
1926 purged_class);
1927 follow_stack(cm); // Flush marking stack.
1929 // Update subklass/sibling/implementor links of live klasses
1930 // revisit_klass_stack is used in follow_weak_klass_links().
1931 follow_weak_klass_links(cm);
1933 // Visit symbol and interned string tables and delete unmarked oops
1934 SymbolTable::unlink(is_alive_closure());
1935 StringTable::unlink(is_alive_closure());
1937 assert(cm->marking_stack()->size() == 0, "stack should be empty by now");
1938 assert(cm->overflow_stack()->is_empty(), "stack should be empty by now");
1939 }
1941 // This should be moved to the shared markSweep code!
1942 class PSAlwaysTrueClosure: public BoolObjectClosure {
1943 public:
1944 void do_object(oop p) { ShouldNotReachHere(); }
1945 bool do_object_b(oop p) { return true; }
1946 };
1947 static PSAlwaysTrueClosure always_true;
1949 void PSParallelCompact::adjust_roots() {
1950 // Adjust the pointers to reflect the new locations
1951 EventMark m("3 adjust roots");
1952 TraceTime tm("adjust roots", print_phases(), true, gclog_or_tty);
1954 // General strong roots.
1955 Universe::oops_do(adjust_root_pointer_closure());
1956 ReferenceProcessor::oops_do(adjust_root_pointer_closure());
1957 JNIHandles::oops_do(adjust_root_pointer_closure()); // Global (strong) JNI handles
1958 Threads::oops_do(adjust_root_pointer_closure());
1959 ObjectSynchronizer::oops_do(adjust_root_pointer_closure());
1960 FlatProfiler::oops_do(adjust_root_pointer_closure());
1961 Management::oops_do(adjust_root_pointer_closure());
1962 JvmtiExport::oops_do(adjust_root_pointer_closure());
1963 // SO_AllClasses
1964 SystemDictionary::oops_do(adjust_root_pointer_closure());
1965 vmSymbols::oops_do(adjust_root_pointer_closure());
1967 // Now adjust pointers in remaining weak roots. (All of which should
1968 // have been cleared if they pointed to non-surviving objects.)
1969 // Global (weak) JNI handles
1970 JNIHandles::weak_oops_do(&always_true, adjust_root_pointer_closure());
1972 CodeCache::oops_do(adjust_pointer_closure());
1973 SymbolTable::oops_do(adjust_root_pointer_closure());
1974 StringTable::oops_do(adjust_root_pointer_closure());
1975 ref_processor()->weak_oops_do(adjust_root_pointer_closure());
1976 // Roots were visited so references into the young gen in roots
1977 // may have been scanned. Process them also.
1978 // Should the reference processor have a span that excludes
1979 // young gen objects?
1980 PSScavenge::reference_processor()->weak_oops_do(
1981 adjust_root_pointer_closure());
1982 }
1984 void PSParallelCompact::compact_perm(ParCompactionManager* cm) {
1985 EventMark m("4 compact perm");
1986 TraceTime tm("compact perm gen", print_phases(), true, gclog_or_tty);
1987 // trace("4");
1989 gc_heap()->perm_gen()->start_array()->reset();
1990 move_and_update(cm, perm_space_id);
1991 }
1993 void PSParallelCompact::enqueue_region_draining_tasks(GCTaskQueue* q,
1994 uint parallel_gc_threads)
1995 {
1996 TraceTime tm("drain task setup", print_phases(), true, gclog_or_tty);
1998 const unsigned int task_count = MAX2(parallel_gc_threads, 1U);
1999 for (unsigned int j = 0; j < task_count; j++) {
2000 q->enqueue(new DrainStacksCompactionTask());
2001 }
2003 // Find all regions that are available (can be filled immediately) and
2004 // distribute them to the thread stacks. The iteration is done in reverse
2005 // order (high to low) so the regions will be removed in ascending order.
2007 const ParallelCompactData& sd = PSParallelCompact::summary_data();
2009 size_t fillable_regions = 0; // A count for diagnostic purposes.
2010 unsigned int which = 0; // The worker thread number.
2012 for (unsigned int id = to_space_id; id > perm_space_id; --id) {
2013 SpaceInfo* const space_info = _space_info + id;
2014 MutableSpace* const space = space_info->space();
2015 HeapWord* const new_top = space_info->new_top();
2017 const size_t beg_region = sd.addr_to_region_idx(space_info->dense_prefix());
2018 const size_t end_region =
2019 sd.addr_to_region_idx(sd.region_align_up(new_top));
2020 assert(end_region > 0, "perm gen cannot be empty");
2022 for (size_t cur = end_region - 1; cur >= beg_region; --cur) {
2023 if (sd.region(cur)->claim_unsafe()) {
2024 ParCompactionManager* cm = ParCompactionManager::manager_array(which);
2025 cm->save_for_processing(cur);
2027 if (TraceParallelOldGCCompactionPhase && Verbose) {
2028 const size_t count_mod_8 = fillable_regions & 7;
2029 if (count_mod_8 == 0) gclog_or_tty->print("fillable: ");
2030 gclog_or_tty->print(" " SIZE_FORMAT_W(7), cur);
2031 if (count_mod_8 == 7) gclog_or_tty->cr();
2032 }
2034 NOT_PRODUCT(++fillable_regions;)
2036 // Assign regions to threads in round-robin fashion.
2037 if (++which == task_count) {
2038 which = 0;
2039 }
2040 }
2041 }
2042 }
2044 if (TraceParallelOldGCCompactionPhase) {
2045 if (Verbose && (fillable_regions & 7) != 0) gclog_or_tty->cr();
2046 gclog_or_tty->print_cr("%u initially fillable regions", fillable_regions);
2047 }
2048 }
2050 #define PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING 4
2052 void PSParallelCompact::enqueue_dense_prefix_tasks(GCTaskQueue* q,
2053 uint parallel_gc_threads) {
2054 TraceTime tm("dense prefix task setup", print_phases(), true, gclog_or_tty);
2056 ParallelCompactData& sd = PSParallelCompact::summary_data();
2058 // Iterate over all the spaces adding tasks for updating
2059 // regions in the dense prefix. Assume that 1 gc thread
2060 // will work on opening the gaps and the remaining gc threads
2061 // will work on the dense prefix.
2062 SpaceId space_id = old_space_id;
2063 while (space_id != last_space_id) {
2064 HeapWord* const dense_prefix_end = _space_info[space_id].dense_prefix();
2065 const MutableSpace* const space = _space_info[space_id].space();
2067 if (dense_prefix_end == space->bottom()) {
2068 // There is no dense prefix for this space.
2069 space_id = next_compaction_space_id(space_id);
2070 continue;
2071 }
2073 // The dense prefix is before this region.
2074 size_t region_index_end_dense_prefix =
2075 sd.addr_to_region_idx(dense_prefix_end);
2076 RegionData* const dense_prefix_cp =
2077 sd.region(region_index_end_dense_prefix);
2078 assert(dense_prefix_end == space->end() ||
2079 dense_prefix_cp->available() ||
2080 dense_prefix_cp->claimed(),
2081 "The region after the dense prefix should always be ready to fill");
2083 size_t region_index_start = sd.addr_to_region_idx(space->bottom());
2085 // Is there dense prefix work?
2086 size_t total_dense_prefix_regions =
2087 region_index_end_dense_prefix - region_index_start;
2088 // How many regions of the dense prefix should be given to
2089 // each thread?
2090 if (total_dense_prefix_regions > 0) {
2091 uint tasks_for_dense_prefix = 1;
2092 if (UseParallelDensePrefixUpdate) {
2093 if (total_dense_prefix_regions <=
2094 (parallel_gc_threads * PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING)) {
2095 // Don't over partition. This assumes that
2096 // PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING is a small integer value
2097 // so there are not many regions to process.
2098 tasks_for_dense_prefix = parallel_gc_threads;
2099 } else {
2100 // Over partition
2101 tasks_for_dense_prefix = parallel_gc_threads *
2102 PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING;
2103 }
2104 }
2105 size_t regions_per_thread = total_dense_prefix_regions /
2106 tasks_for_dense_prefix;
2107 // Give each thread at least 1 region.
2108 if (regions_per_thread == 0) {
2109 regions_per_thread = 1;
2110 }
2112 for (uint k = 0; k < tasks_for_dense_prefix; k++) {
2113 if (region_index_start >= region_index_end_dense_prefix) {
2114 break;
2115 }
2116 // region_index_end is not processed
2117 size_t region_index_end = MIN2(region_index_start + regions_per_thread,
2118 region_index_end_dense_prefix);
2119 q->enqueue(new UpdateDensePrefixTask(
2120 space_id,
2121 region_index_start,
2122 region_index_end));
2123 region_index_start = region_index_end;
2124 }
2125 }
2126 // This gets any part of the dense prefix that did not
2127 // fit evenly.
2128 if (region_index_start < region_index_end_dense_prefix) {
2129 q->enqueue(new UpdateDensePrefixTask(
2130 space_id,
2131 region_index_start,
2132 region_index_end_dense_prefix));
2133 }
2134 space_id = next_compaction_space_id(space_id);
2135 } // End tasks for dense prefix
2136 }
2138 void PSParallelCompact::enqueue_region_stealing_tasks(
2139 GCTaskQueue* q,
2140 ParallelTaskTerminator* terminator_ptr,
2141 uint parallel_gc_threads) {
2142 TraceTime tm("steal task setup", print_phases(), true, gclog_or_tty);
2144 // Once a thread has drained it's stack, it should try to steal regions from
2145 // other threads.
2146 if (parallel_gc_threads > 1) {
2147 for (uint j = 0; j < parallel_gc_threads; j++) {
2148 q->enqueue(new StealRegionCompactionTask(terminator_ptr));
2149 }
2150 }
2151 }
2153 void PSParallelCompact::compact() {
2154 EventMark m("5 compact");
2155 // trace("5");
2156 TraceTime tm("compaction phase", print_phases(), true, gclog_or_tty);
2158 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
2159 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
2160 PSOldGen* old_gen = heap->old_gen();
2161 old_gen->start_array()->reset();
2162 uint parallel_gc_threads = heap->gc_task_manager()->workers();
2163 TaskQueueSetSuper* qset = ParCompactionManager::region_array();
2164 ParallelTaskTerminator terminator(parallel_gc_threads, qset);
2166 GCTaskQueue* q = GCTaskQueue::create();
2167 enqueue_region_draining_tasks(q, parallel_gc_threads);
2168 enqueue_dense_prefix_tasks(q, parallel_gc_threads);
2169 enqueue_region_stealing_tasks(q, &terminator, parallel_gc_threads);
2171 {
2172 TraceTime tm_pc("par compact", print_phases(), true, gclog_or_tty);
2174 WaitForBarrierGCTask* fin = WaitForBarrierGCTask::create();
2175 q->enqueue(fin);
2177 gc_task_manager()->add_list(q);
2179 fin->wait_for();
2181 // We have to release the barrier tasks!
2182 WaitForBarrierGCTask::destroy(fin);
2184 #ifdef ASSERT
2185 // Verify that all regions have been processed before the deferred updates.
2186 // Note that perm_space_id is skipped; this type of verification is not
2187 // valid until the perm gen is compacted by regions.
2188 for (unsigned int id = old_space_id; id < last_space_id; ++id) {
2189 verify_complete(SpaceId(id));
2190 }
2191 #endif
2192 }
2194 {
2195 // Update the deferred objects, if any. Any compaction manager can be used.
2196 TraceTime tm_du("deferred updates", print_phases(), true, gclog_or_tty);
2197 ParCompactionManager* cm = ParCompactionManager::manager_array(0);
2198 for (unsigned int id = old_space_id; id < last_space_id; ++id) {
2199 update_deferred_objects(cm, SpaceId(id));
2200 }
2201 }
2202 }
2204 #ifdef ASSERT
2205 void PSParallelCompact::verify_complete(SpaceId space_id) {
2206 // All Regions between space bottom() to new_top() should be marked as filled
2207 // and all Regions between new_top() and top() should be available (i.e.,
2208 // should have been emptied).
2209 ParallelCompactData& sd = summary_data();
2210 SpaceInfo si = _space_info[space_id];
2211 HeapWord* new_top_addr = sd.region_align_up(si.new_top());
2212 HeapWord* old_top_addr = sd.region_align_up(si.space()->top());
2213 const size_t beg_region = sd.addr_to_region_idx(si.space()->bottom());
2214 const size_t new_top_region = sd.addr_to_region_idx(new_top_addr);
2215 const size_t old_top_region = sd.addr_to_region_idx(old_top_addr);
2217 bool issued_a_warning = false;
2219 size_t cur_region;
2220 for (cur_region = beg_region; cur_region < new_top_region; ++cur_region) {
2221 const RegionData* const c = sd.region(cur_region);
2222 if (!c->completed()) {
2223 warning("region " SIZE_FORMAT " not filled: "
2224 "destination_count=" SIZE_FORMAT,
2225 cur_region, c->destination_count());
2226 issued_a_warning = true;
2227 }
2228 }
2230 for (cur_region = new_top_region; cur_region < old_top_region; ++cur_region) {
2231 const RegionData* const c = sd.region(cur_region);
2232 if (!c->available()) {
2233 warning("region " SIZE_FORMAT " not empty: "
2234 "destination_count=" SIZE_FORMAT,
2235 cur_region, c->destination_count());
2236 issued_a_warning = true;
2237 }
2238 }
2240 if (issued_a_warning) {
2241 print_region_ranges();
2242 }
2243 }
2244 #endif // #ifdef ASSERT
2246 void PSParallelCompact::compact_serial(ParCompactionManager* cm) {
2247 EventMark m("5 compact serial");
2248 TraceTime tm("compact serial", print_phases(), true, gclog_or_tty);
2250 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
2251 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
2253 PSYoungGen* young_gen = heap->young_gen();
2254 PSOldGen* old_gen = heap->old_gen();
2256 old_gen->start_array()->reset();
2257 old_gen->move_and_update(cm);
2258 young_gen->move_and_update(cm);
2259 }
2262 void PSParallelCompact::follow_stack(ParCompactionManager* cm) {
2263 while(!cm->overflow_stack()->is_empty()) {
2264 oop obj = cm->overflow_stack()->pop();
2265 obj->follow_contents(cm);
2266 }
2268 oop obj;
2269 // obj is a reference!!!
2270 while (cm->marking_stack()->pop_local(obj)) {
2271 // It would be nice to assert about the type of objects we might
2272 // pop, but they can come from anywhere, unfortunately.
2273 obj->follow_contents(cm);
2274 }
2275 }
2277 void
2278 PSParallelCompact::follow_weak_klass_links(ParCompactionManager* serial_cm) {
2279 // All klasses on the revisit stack are marked at this point.
2280 // Update and follow all subklass, sibling and implementor links.
2281 for (uint i = 0; i < ParallelGCThreads+1; i++) {
2282 ParCompactionManager* cm = ParCompactionManager::manager_array(i);
2283 KeepAliveClosure keep_alive_closure(cm);
2284 for (int i = 0; i < cm->revisit_klass_stack()->length(); i++) {
2285 cm->revisit_klass_stack()->at(i)->follow_weak_klass_links(
2286 is_alive_closure(),
2287 &keep_alive_closure);
2288 }
2289 follow_stack(cm);
2290 }
2291 }
2293 void
2294 PSParallelCompact::revisit_weak_klass_link(ParCompactionManager* cm, Klass* k) {
2295 cm->revisit_klass_stack()->push(k);
2296 }
2298 #ifdef VALIDATE_MARK_SWEEP
2300 void PSParallelCompact::track_adjusted_pointer(void* p, bool isroot) {
2301 if (!ValidateMarkSweep)
2302 return;
2304 if (!isroot) {
2305 if (_pointer_tracking) {
2306 guarantee(_adjusted_pointers->contains(p), "should have seen this pointer");
2307 _adjusted_pointers->remove(p);
2308 }
2309 } else {
2310 ptrdiff_t index = _root_refs_stack->find(p);
2311 if (index != -1) {
2312 int l = _root_refs_stack->length();
2313 if (l > 0 && l - 1 != index) {
2314 void* last = _root_refs_stack->pop();
2315 assert(last != p, "should be different");
2316 _root_refs_stack->at_put(index, last);
2317 } else {
2318 _root_refs_stack->remove(p);
2319 }
2320 }
2321 }
2322 }
2325 void PSParallelCompact::check_adjust_pointer(void* p) {
2326 _adjusted_pointers->push(p);
2327 }
2330 class AdjusterTracker: public OopClosure {
2331 public:
2332 AdjusterTracker() {};
2333 void do_oop(oop* o) { PSParallelCompact::check_adjust_pointer(o); }
2334 void do_oop(narrowOop* o) { PSParallelCompact::check_adjust_pointer(o); }
2335 };
2338 void PSParallelCompact::track_interior_pointers(oop obj) {
2339 if (ValidateMarkSweep) {
2340 _adjusted_pointers->clear();
2341 _pointer_tracking = true;
2343 AdjusterTracker checker;
2344 obj->oop_iterate(&checker);
2345 }
2346 }
2349 void PSParallelCompact::check_interior_pointers() {
2350 if (ValidateMarkSweep) {
2351 _pointer_tracking = false;
2352 guarantee(_adjusted_pointers->length() == 0, "should have processed the same pointers");
2353 }
2354 }
2357 void PSParallelCompact::reset_live_oop_tracking(bool at_perm) {
2358 if (ValidateMarkSweep) {
2359 guarantee((size_t)_live_oops->length() == _live_oops_index, "should be at end of live oops");
2360 _live_oops_index = at_perm ? _live_oops_index_at_perm : 0;
2361 }
2362 }
2365 void PSParallelCompact::register_live_oop(oop p, size_t size) {
2366 if (ValidateMarkSweep) {
2367 _live_oops->push(p);
2368 _live_oops_size->push(size);
2369 _live_oops_index++;
2370 }
2371 }
2373 void PSParallelCompact::validate_live_oop(oop p, size_t size) {
2374 if (ValidateMarkSweep) {
2375 oop obj = _live_oops->at((int)_live_oops_index);
2376 guarantee(obj == p, "should be the same object");
2377 guarantee(_live_oops_size->at((int)_live_oops_index) == size, "should be the same size");
2378 _live_oops_index++;
2379 }
2380 }
2382 void PSParallelCompact::live_oop_moved_to(HeapWord* q, size_t size,
2383 HeapWord* compaction_top) {
2384 assert(oop(q)->forwardee() == NULL || oop(q)->forwardee() == oop(compaction_top),
2385 "should be moved to forwarded location");
2386 if (ValidateMarkSweep) {
2387 PSParallelCompact::validate_live_oop(oop(q), size);
2388 _live_oops_moved_to->push(oop(compaction_top));
2389 }
2390 if (RecordMarkSweepCompaction) {
2391 _cur_gc_live_oops->push(q);
2392 _cur_gc_live_oops_moved_to->push(compaction_top);
2393 _cur_gc_live_oops_size->push(size);
2394 }
2395 }
2398 void PSParallelCompact::compaction_complete() {
2399 if (RecordMarkSweepCompaction) {
2400 GrowableArray<HeapWord*>* _tmp_live_oops = _cur_gc_live_oops;
2401 GrowableArray<HeapWord*>* _tmp_live_oops_moved_to = _cur_gc_live_oops_moved_to;
2402 GrowableArray<size_t> * _tmp_live_oops_size = _cur_gc_live_oops_size;
2404 _cur_gc_live_oops = _last_gc_live_oops;
2405 _cur_gc_live_oops_moved_to = _last_gc_live_oops_moved_to;
2406 _cur_gc_live_oops_size = _last_gc_live_oops_size;
2407 _last_gc_live_oops = _tmp_live_oops;
2408 _last_gc_live_oops_moved_to = _tmp_live_oops_moved_to;
2409 _last_gc_live_oops_size = _tmp_live_oops_size;
2410 }
2411 }
2414 void PSParallelCompact::print_new_location_of_heap_address(HeapWord* q) {
2415 if (!RecordMarkSweepCompaction) {
2416 tty->print_cr("Requires RecordMarkSweepCompaction to be enabled");
2417 return;
2418 }
2420 if (_last_gc_live_oops == NULL) {
2421 tty->print_cr("No compaction information gathered yet");
2422 return;
2423 }
2425 for (int i = 0; i < _last_gc_live_oops->length(); i++) {
2426 HeapWord* old_oop = _last_gc_live_oops->at(i);
2427 size_t sz = _last_gc_live_oops_size->at(i);
2428 if (old_oop <= q && q < (old_oop + sz)) {
2429 HeapWord* new_oop = _last_gc_live_oops_moved_to->at(i);
2430 size_t offset = (q - old_oop);
2431 tty->print_cr("Address " PTR_FORMAT, q);
2432 tty->print_cr(" Was in oop " PTR_FORMAT ", size %d, at offset %d", old_oop, sz, offset);
2433 tty->print_cr(" Now in oop " PTR_FORMAT ", actual address " PTR_FORMAT, new_oop, new_oop + offset);
2434 return;
2435 }
2436 }
2438 tty->print_cr("Address " PTR_FORMAT " not found in live oop information from last GC", q);
2439 }
2440 #endif //VALIDATE_MARK_SWEEP
2442 // Update interior oops in the ranges of regions [beg_region, end_region).
2443 void
2444 PSParallelCompact::update_and_deadwood_in_dense_prefix(ParCompactionManager* cm,
2445 SpaceId space_id,
2446 size_t beg_region,
2447 size_t end_region) {
2448 ParallelCompactData& sd = summary_data();
2449 ParMarkBitMap* const mbm = mark_bitmap();
2451 HeapWord* beg_addr = sd.region_to_addr(beg_region);
2452 HeapWord* const end_addr = sd.region_to_addr(end_region);
2453 assert(beg_region <= end_region, "bad region range");
2454 assert(end_addr <= dense_prefix(space_id), "not in the dense prefix");
2456 #ifdef ASSERT
2457 // Claim the regions to avoid triggering an assert when they are marked as
2458 // filled.
2459 for (size_t claim_region = beg_region; claim_region < end_region; ++claim_region) {
2460 assert(sd.region(claim_region)->claim_unsafe(), "claim() failed");
2461 }
2462 #endif // #ifdef ASSERT
2464 if (beg_addr != space(space_id)->bottom()) {
2465 // Find the first live object or block of dead space that *starts* in this
2466 // range of regions. If a partial object crosses onto the region, skip it;
2467 // it will be marked for 'deferred update' when the object head is
2468 // processed. If dead space crosses onto the region, it is also skipped; it
2469 // will be filled when the prior region is processed. If neither of those
2470 // apply, the first word in the region is the start of a live object or dead
2471 // space.
2472 assert(beg_addr > space(space_id)->bottom(), "sanity");
2473 const RegionData* const cp = sd.region(beg_region);
2474 if (cp->partial_obj_size() != 0) {
2475 beg_addr = sd.partial_obj_end(beg_region);
2476 } else if (dead_space_crosses_boundary(cp, mbm->addr_to_bit(beg_addr))) {
2477 beg_addr = mbm->find_obj_beg(beg_addr, end_addr);
2478 }
2479 }
2481 if (beg_addr < end_addr) {
2482 // A live object or block of dead space starts in this range of Regions.
2483 HeapWord* const dense_prefix_end = dense_prefix(space_id);
2485 // Create closures and iterate.
2486 UpdateOnlyClosure update_closure(mbm, cm, space_id);
2487 FillClosure fill_closure(cm, space_id);
2488 ParMarkBitMap::IterationStatus status;
2489 status = mbm->iterate(&update_closure, &fill_closure, beg_addr, end_addr,
2490 dense_prefix_end);
2491 if (status == ParMarkBitMap::incomplete) {
2492 update_closure.do_addr(update_closure.source());
2493 }
2494 }
2496 // Mark the regions as filled.
2497 RegionData* const beg_cp = sd.region(beg_region);
2498 RegionData* const end_cp = sd.region(end_region);
2499 for (RegionData* cp = beg_cp; cp < end_cp; ++cp) {
2500 cp->set_completed();
2501 }
2502 }
2504 // Return the SpaceId for the space containing addr. If addr is not in the
2505 // heap, last_space_id is returned. In debug mode it expects the address to be
2506 // in the heap and asserts such.
2507 PSParallelCompact::SpaceId PSParallelCompact::space_id(HeapWord* addr) {
2508 assert(Universe::heap()->is_in_reserved(addr), "addr not in the heap");
2510 for (unsigned int id = perm_space_id; id < last_space_id; ++id) {
2511 if (_space_info[id].space()->contains(addr)) {
2512 return SpaceId(id);
2513 }
2514 }
2516 assert(false, "no space contains the addr");
2517 return last_space_id;
2518 }
2520 void PSParallelCompact::update_deferred_objects(ParCompactionManager* cm,
2521 SpaceId id) {
2522 assert(id < last_space_id, "bad space id");
2524 ParallelCompactData& sd = summary_data();
2525 const SpaceInfo* const space_info = _space_info + id;
2526 ObjectStartArray* const start_array = space_info->start_array();
2528 const MutableSpace* const space = space_info->space();
2529 assert(space_info->dense_prefix() >= space->bottom(), "dense_prefix not set");
2530 HeapWord* const beg_addr = space_info->dense_prefix();
2531 HeapWord* const end_addr = sd.region_align_up(space_info->new_top());
2533 const RegionData* const beg_region = sd.addr_to_region_ptr(beg_addr);
2534 const RegionData* const end_region = sd.addr_to_region_ptr(end_addr);
2535 const RegionData* cur_region;
2536 for (cur_region = beg_region; cur_region < end_region; ++cur_region) {
2537 HeapWord* const addr = cur_region->deferred_obj_addr();
2538 if (addr != NULL) {
2539 if (start_array != NULL) {
2540 start_array->allocate_block(addr);
2541 }
2542 oop(addr)->update_contents(cm);
2543 assert(oop(addr)->is_oop_or_null(), "should be an oop now");
2544 }
2545 }
2546 }
2548 // Skip over count live words starting from beg, and return the address of the
2549 // next live word. Unless marked, the word corresponding to beg is assumed to
2550 // be dead. Callers must either ensure beg does not correspond to the middle of
2551 // an object, or account for those live words in some other way. Callers must
2552 // also ensure that there are enough live words in the range [beg, end) to skip.
2553 HeapWord*
2554 PSParallelCompact::skip_live_words(HeapWord* beg, HeapWord* end, size_t count)
2555 {
2556 assert(count > 0, "sanity");
2558 ParMarkBitMap* m = mark_bitmap();
2559 idx_t bits_to_skip = m->words_to_bits(count);
2560 idx_t cur_beg = m->addr_to_bit(beg);
2561 const idx_t search_end = BitMap::word_align_up(m->addr_to_bit(end));
2563 do {
2564 cur_beg = m->find_obj_beg(cur_beg, search_end);
2565 idx_t cur_end = m->find_obj_end(cur_beg, search_end);
2566 const size_t obj_bits = cur_end - cur_beg + 1;
2567 if (obj_bits > bits_to_skip) {
2568 return m->bit_to_addr(cur_beg + bits_to_skip);
2569 }
2570 bits_to_skip -= obj_bits;
2571 cur_beg = cur_end + 1;
2572 } while (bits_to_skip > 0);
2574 // Skipping the desired number of words landed just past the end of an object.
2575 // Find the start of the next object.
2576 cur_beg = m->find_obj_beg(cur_beg, search_end);
2577 assert(cur_beg < m->addr_to_bit(end), "not enough live words to skip");
2578 return m->bit_to_addr(cur_beg);
2579 }
2581 HeapWord*
2582 PSParallelCompact::first_src_addr(HeapWord* const dest_addr,
2583 size_t src_region_idx)
2584 {
2585 ParMarkBitMap* const bitmap = mark_bitmap();
2586 const ParallelCompactData& sd = summary_data();
2587 const size_t RegionSize = ParallelCompactData::RegionSize;
2589 assert(sd.is_region_aligned(dest_addr), "not aligned");
2591 const RegionData* const src_region_ptr = sd.region(src_region_idx);
2592 const size_t partial_obj_size = src_region_ptr->partial_obj_size();
2593 HeapWord* const src_region_destination = src_region_ptr->destination();
2595 assert(dest_addr >= src_region_destination, "wrong src region");
2596 assert(src_region_ptr->data_size() > 0, "src region cannot be empty");
2598 HeapWord* const src_region_beg = sd.region_to_addr(src_region_idx);
2599 HeapWord* const src_region_end = src_region_beg + RegionSize;
2601 HeapWord* addr = src_region_beg;
2602 if (dest_addr == src_region_destination) {
2603 // Return the first live word in the source region.
2604 if (partial_obj_size == 0) {
2605 addr = bitmap->find_obj_beg(addr, src_region_end);
2606 assert(addr < src_region_end, "no objects start in src region");
2607 }
2608 return addr;
2609 }
2611 // Must skip some live data.
2612 size_t words_to_skip = dest_addr - src_region_destination;
2613 assert(src_region_ptr->data_size() > words_to_skip, "wrong src region");
2615 if (partial_obj_size >= words_to_skip) {
2616 // All the live words to skip are part of the partial object.
2617 addr += words_to_skip;
2618 if (partial_obj_size == words_to_skip) {
2619 // Find the first live word past the partial object.
2620 addr = bitmap->find_obj_beg(addr, src_region_end);
2621 assert(addr < src_region_end, "wrong src region");
2622 }
2623 return addr;
2624 }
2626 // Skip over the partial object (if any).
2627 if (partial_obj_size != 0) {
2628 words_to_skip -= partial_obj_size;
2629 addr += partial_obj_size;
2630 }
2632 // Skip over live words due to objects that start in the region.
2633 addr = skip_live_words(addr, src_region_end, words_to_skip);
2634 assert(addr < src_region_end, "wrong src region");
2635 return addr;
2636 }
2638 void PSParallelCompact::decrement_destination_counts(ParCompactionManager* cm,
2639 size_t beg_region,
2640 HeapWord* end_addr)
2641 {
2642 ParallelCompactData& sd = summary_data();
2643 RegionData* const beg = sd.region(beg_region);
2644 HeapWord* const end_addr_aligned_up = sd.region_align_up(end_addr);
2645 RegionData* const end = sd.addr_to_region_ptr(end_addr_aligned_up);
2646 size_t cur_idx = beg_region;
2647 for (RegionData* cur = beg; cur < end; ++cur, ++cur_idx) {
2648 assert(cur->data_size() > 0, "region must have live data");
2649 cur->decrement_destination_count();
2650 if (cur_idx <= cur->source_region() && cur->available() && cur->claim()) {
2651 cm->save_for_processing(cur_idx);
2652 }
2653 }
2654 }
2656 size_t PSParallelCompact::next_src_region(MoveAndUpdateClosure& closure,
2657 SpaceId& src_space_id,
2658 HeapWord*& src_space_top,
2659 HeapWord* end_addr)
2660 {
2661 typedef ParallelCompactData::RegionData RegionData;
2663 ParallelCompactData& sd = PSParallelCompact::summary_data();
2664 const size_t region_size = ParallelCompactData::RegionSize;
2666 size_t src_region_idx = 0;
2668 // Skip empty regions (if any) up to the top of the space.
2669 HeapWord* const src_aligned_up = sd.region_align_up(end_addr);
2670 RegionData* src_region_ptr = sd.addr_to_region_ptr(src_aligned_up);
2671 HeapWord* const top_aligned_up = sd.region_align_up(src_space_top);
2672 const RegionData* const top_region_ptr =
2673 sd.addr_to_region_ptr(top_aligned_up);
2674 while (src_region_ptr < top_region_ptr && src_region_ptr->data_size() == 0) {
2675 ++src_region_ptr;
2676 }
2678 if (src_region_ptr < top_region_ptr) {
2679 // The next source region is in the current space. Update src_region_idx
2680 // and the source address to match src_region_ptr.
2681 src_region_idx = sd.region(src_region_ptr);
2682 HeapWord* const src_region_addr = sd.region_to_addr(src_region_idx);
2683 if (src_region_addr > closure.source()) {
2684 closure.set_source(src_region_addr);
2685 }
2686 return src_region_idx;
2687 }
2689 // Switch to a new source space and find the first non-empty region.
2690 unsigned int space_id = src_space_id + 1;
2691 assert(space_id < last_space_id, "not enough spaces");
2693 HeapWord* const destination = closure.destination();
2695 do {
2696 MutableSpace* space = _space_info[space_id].space();
2697 HeapWord* const bottom = space->bottom();
2698 const RegionData* const bottom_cp = sd.addr_to_region_ptr(bottom);
2700 // Iterate over the spaces that do not compact into themselves.
2701 if (bottom_cp->destination() != bottom) {
2702 HeapWord* const top_aligned_up = sd.region_align_up(space->top());
2703 const RegionData* const top_cp = sd.addr_to_region_ptr(top_aligned_up);
2705 for (const RegionData* src_cp = bottom_cp; src_cp < top_cp; ++src_cp) {
2706 if (src_cp->live_obj_size() > 0) {
2707 // Found it.
2708 assert(src_cp->destination() == destination,
2709 "first live obj in the space must match the destination");
2710 assert(src_cp->partial_obj_size() == 0,
2711 "a space cannot begin with a partial obj");
2713 src_space_id = SpaceId(space_id);
2714 src_space_top = space->top();
2715 const size_t src_region_idx = sd.region(src_cp);
2716 closure.set_source(sd.region_to_addr(src_region_idx));
2717 return src_region_idx;
2718 } else {
2719 assert(src_cp->data_size() == 0, "sanity");
2720 }
2721 }
2722 }
2723 } while (++space_id < last_space_id);
2725 assert(false, "no source region was found");
2726 return 0;
2727 }
2729 void PSParallelCompact::fill_region(ParCompactionManager* cm, size_t region_idx)
2730 {
2731 typedef ParMarkBitMap::IterationStatus IterationStatus;
2732 const size_t RegionSize = ParallelCompactData::RegionSize;
2733 ParMarkBitMap* const bitmap = mark_bitmap();
2734 ParallelCompactData& sd = summary_data();
2735 RegionData* const region_ptr = sd.region(region_idx);
2737 // Get the items needed to construct the closure.
2738 HeapWord* dest_addr = sd.region_to_addr(region_idx);
2739 SpaceId dest_space_id = space_id(dest_addr);
2740 ObjectStartArray* start_array = _space_info[dest_space_id].start_array();
2741 HeapWord* new_top = _space_info[dest_space_id].new_top();
2742 assert(dest_addr < new_top, "sanity");
2743 const size_t words = MIN2(pointer_delta(new_top, dest_addr), RegionSize);
2745 // Get the source region and related info.
2746 size_t src_region_idx = region_ptr->source_region();
2747 SpaceId src_space_id = space_id(sd.region_to_addr(src_region_idx));
2748 HeapWord* src_space_top = _space_info[src_space_id].space()->top();
2750 MoveAndUpdateClosure closure(bitmap, cm, start_array, dest_addr, words);
2751 closure.set_source(first_src_addr(dest_addr, src_region_idx));
2753 // Adjust src_region_idx to prepare for decrementing destination counts (the
2754 // destination count is not decremented when a region is copied to itself).
2755 if (src_region_idx == region_idx) {
2756 src_region_idx += 1;
2757 }
2759 if (bitmap->is_unmarked(closure.source())) {
2760 // The first source word is in the middle of an object; copy the remainder
2761 // of the object or as much as will fit. The fact that pointer updates were
2762 // deferred will be noted when the object header is processed.
2763 HeapWord* const old_src_addr = closure.source();
2764 closure.copy_partial_obj();
2765 if (closure.is_full()) {
2766 decrement_destination_counts(cm, src_region_idx, closure.source());
2767 region_ptr->set_deferred_obj_addr(NULL);
2768 region_ptr->set_completed();
2769 return;
2770 }
2772 HeapWord* const end_addr = sd.region_align_down(closure.source());
2773 if (sd.region_align_down(old_src_addr) != end_addr) {
2774 // The partial object was copied from more than one source region.
2775 decrement_destination_counts(cm, src_region_idx, end_addr);
2777 // Move to the next source region, possibly switching spaces as well. All
2778 // args except end_addr may be modified.
2779 src_region_idx = next_src_region(closure, src_space_id, src_space_top,
2780 end_addr);
2781 }
2782 }
2784 do {
2785 HeapWord* const cur_addr = closure.source();
2786 HeapWord* const end_addr = MIN2(sd.region_align_up(cur_addr + 1),
2787 src_space_top);
2788 IterationStatus status = bitmap->iterate(&closure, cur_addr, end_addr);
2790 if (status == ParMarkBitMap::incomplete) {
2791 // The last obj that starts in the source region does not end in the
2792 // region.
2793 assert(closure.source() < end_addr, "sanity")
2794 HeapWord* const obj_beg = closure.source();
2795 HeapWord* const range_end = MIN2(obj_beg + closure.words_remaining(),
2796 src_space_top);
2797 HeapWord* const obj_end = bitmap->find_obj_end(obj_beg, range_end);
2798 if (obj_end < range_end) {
2799 // The end was found; the entire object will fit.
2800 status = closure.do_addr(obj_beg, bitmap->obj_size(obj_beg, obj_end));
2801 assert(status != ParMarkBitMap::would_overflow, "sanity");
2802 } else {
2803 // The end was not found; the object will not fit.
2804 assert(range_end < src_space_top, "obj cannot cross space boundary");
2805 status = ParMarkBitMap::would_overflow;
2806 }
2807 }
2809 if (status == ParMarkBitMap::would_overflow) {
2810 // The last object did not fit. Note that interior oop updates were
2811 // deferred, then copy enough of the object to fill the region.
2812 region_ptr->set_deferred_obj_addr(closure.destination());
2813 status = closure.copy_until_full(); // copies from closure.source()
2815 decrement_destination_counts(cm, src_region_idx, closure.source());
2816 region_ptr->set_completed();
2817 return;
2818 }
2820 if (status == ParMarkBitMap::full) {
2821 decrement_destination_counts(cm, src_region_idx, closure.source());
2822 region_ptr->set_deferred_obj_addr(NULL);
2823 region_ptr->set_completed();
2824 return;
2825 }
2827 decrement_destination_counts(cm, src_region_idx, end_addr);
2829 // Move to the next source region, possibly switching spaces as well. All
2830 // args except end_addr may be modified.
2831 src_region_idx = next_src_region(closure, src_space_id, src_space_top,
2832 end_addr);
2833 } while (true);
2834 }
2836 void
2837 PSParallelCompact::move_and_update(ParCompactionManager* cm, SpaceId space_id) {
2838 const MutableSpace* sp = space(space_id);
2839 if (sp->is_empty()) {
2840 return;
2841 }
2843 ParallelCompactData& sd = PSParallelCompact::summary_data();
2844 ParMarkBitMap* const bitmap = mark_bitmap();
2845 HeapWord* const dp_addr = dense_prefix(space_id);
2846 HeapWord* beg_addr = sp->bottom();
2847 HeapWord* end_addr = sp->top();
2849 #ifdef ASSERT
2850 assert(beg_addr <= dp_addr && dp_addr <= end_addr, "bad dense prefix");
2851 if (cm->should_verify_only()) {
2852 VerifyUpdateClosure verify_update(cm, sp);
2853 bitmap->iterate(&verify_update, beg_addr, end_addr);
2854 return;
2855 }
2857 if (cm->should_reset_only()) {
2858 ResetObjectsClosure reset_objects(cm);
2859 bitmap->iterate(&reset_objects, beg_addr, end_addr);
2860 return;
2861 }
2862 #endif
2864 const size_t beg_region = sd.addr_to_region_idx(beg_addr);
2865 const size_t dp_region = sd.addr_to_region_idx(dp_addr);
2866 if (beg_region < dp_region) {
2867 update_and_deadwood_in_dense_prefix(cm, space_id, beg_region, dp_region);
2868 }
2870 // The destination of the first live object that starts in the region is one
2871 // past the end of the partial object entering the region (if any).
2872 HeapWord* const dest_addr = sd.partial_obj_end(dp_region);
2873 HeapWord* const new_top = _space_info[space_id].new_top();
2874 assert(new_top >= dest_addr, "bad new_top value");
2875 const size_t words = pointer_delta(new_top, dest_addr);
2877 if (words > 0) {
2878 ObjectStartArray* start_array = _space_info[space_id].start_array();
2879 MoveAndUpdateClosure closure(bitmap, cm, start_array, dest_addr, words);
2881 ParMarkBitMap::IterationStatus status;
2882 status = bitmap->iterate(&closure, dest_addr, end_addr);
2883 assert(status == ParMarkBitMap::full, "iteration not complete");
2884 assert(bitmap->find_obj_beg(closure.source(), end_addr) == end_addr,
2885 "live objects skipped because closure is full");
2886 }
2887 }
2889 jlong PSParallelCompact::millis_since_last_gc() {
2890 jlong ret_val = os::javaTimeMillis() - _time_of_last_gc;
2891 // XXX See note in genCollectedHeap::millis_since_last_gc().
2892 if (ret_val < 0) {
2893 NOT_PRODUCT(warning("time warp: %d", ret_val);)
2894 return 0;
2895 }
2896 return ret_val;
2897 }
2899 void PSParallelCompact::reset_millis_since_last_gc() {
2900 _time_of_last_gc = os::javaTimeMillis();
2901 }
2903 ParMarkBitMap::IterationStatus MoveAndUpdateClosure::copy_until_full()
2904 {
2905 if (source() != destination()) {
2906 assert(source() > destination(), "must copy to the left");
2907 Copy::aligned_conjoint_words(source(), destination(), words_remaining());
2908 }
2909 update_state(words_remaining());
2910 assert(is_full(), "sanity");
2911 return ParMarkBitMap::full;
2912 }
2914 void MoveAndUpdateClosure::copy_partial_obj()
2915 {
2916 size_t words = words_remaining();
2918 HeapWord* const range_end = MIN2(source() + words, bitmap()->region_end());
2919 HeapWord* const end_addr = bitmap()->find_obj_end(source(), range_end);
2920 if (end_addr < range_end) {
2921 words = bitmap()->obj_size(source(), end_addr);
2922 }
2924 // This test is necessary; if omitted, the pointer updates to a partial object
2925 // that crosses the dense prefix boundary could be overwritten.
2926 if (source() != destination()) {
2927 assert(source() > destination(), "must copy to the left");
2928 Copy::aligned_conjoint_words(source(), destination(), words);
2929 }
2930 update_state(words);
2931 }
2933 ParMarkBitMapClosure::IterationStatus
2934 MoveAndUpdateClosure::do_addr(HeapWord* addr, size_t words) {
2935 assert(destination() != NULL, "sanity");
2936 assert(bitmap()->obj_size(addr) == words, "bad size");
2938 _source = addr;
2939 assert(PSParallelCompact::summary_data().calc_new_pointer(source()) ==
2940 destination(), "wrong destination");
2942 if (words > words_remaining()) {
2943 return ParMarkBitMap::would_overflow;
2944 }
2946 // The start_array must be updated even if the object is not moving.
2947 if (_start_array != NULL) {
2948 _start_array->allocate_block(destination());
2949 }
2951 if (destination() != source()) {
2952 assert(destination() < source(), "must copy to the left");
2953 Copy::aligned_conjoint_words(source(), destination(), words);
2954 }
2956 oop moved_oop = (oop) destination();
2957 moved_oop->update_contents(compaction_manager());
2958 assert(moved_oop->is_oop_or_null(), "Object should be whole at this point");
2960 update_state(words);
2961 assert(destination() == (HeapWord*)moved_oop + moved_oop->size(), "sanity");
2962 return is_full() ? ParMarkBitMap::full : ParMarkBitMap::incomplete;
2963 }
2965 UpdateOnlyClosure::UpdateOnlyClosure(ParMarkBitMap* mbm,
2966 ParCompactionManager* cm,
2967 PSParallelCompact::SpaceId space_id) :
2968 ParMarkBitMapClosure(mbm, cm),
2969 _space_id(space_id),
2970 _start_array(PSParallelCompact::start_array(space_id))
2971 {
2972 }
2974 // Updates the references in the object to their new values.
2975 ParMarkBitMapClosure::IterationStatus
2976 UpdateOnlyClosure::do_addr(HeapWord* addr, size_t words) {
2977 do_addr(addr);
2978 return ParMarkBitMap::incomplete;
2979 }
2981 // Verify the new location using the forwarding pointer
2982 // from MarkSweep::mark_sweep_phase2(). Set the mark_word
2983 // to the initial value.
2984 ParMarkBitMapClosure::IterationStatus
2985 PSParallelCompact::VerifyUpdateClosure::do_addr(HeapWord* addr, size_t words) {
2986 // The second arg (words) is not used.
2987 oop obj = (oop) addr;
2988 HeapWord* forwarding_ptr = (HeapWord*) obj->mark()->decode_pointer();
2989 HeapWord* new_pointer = summary_data().calc_new_pointer(obj);
2990 if (forwarding_ptr == NULL) {
2991 // The object is dead or not moving.
2992 assert(bitmap()->is_unmarked(obj) || (new_pointer == (HeapWord*) obj),
2993 "Object liveness is wrong.");
2994 return ParMarkBitMap::incomplete;
2995 }
2996 assert(UseParallelOldGCDensePrefix ||
2997 (HeapMaximumCompactionInterval > 1) ||
2998 (MarkSweepAlwaysCompactCount > 1) ||
2999 (forwarding_ptr == new_pointer),
3000 "Calculation of new location is incorrect");
3001 return ParMarkBitMap::incomplete;
3002 }
3004 // Reset objects modified for debug checking.
3005 ParMarkBitMapClosure::IterationStatus
3006 PSParallelCompact::ResetObjectsClosure::do_addr(HeapWord* addr, size_t words) {
3007 // The second arg (words) is not used.
3008 oop obj = (oop) addr;
3009 obj->init_mark();
3010 return ParMarkBitMap::incomplete;
3011 }
3013 // Prepare for compaction. This method is executed once
3014 // (i.e., by a single thread) before compaction.
3015 // Save the updated location of the intArrayKlassObj for
3016 // filling holes in the dense prefix.
3017 void PSParallelCompact::compact_prologue() {
3018 _updated_int_array_klass_obj = (klassOop)
3019 summary_data().calc_new_pointer(Universe::intArrayKlassObj());
3020 }
3022 // The initial implementation of this method created a field
3023 // _next_compaction_space_id in SpaceInfo and initialized
3024 // that field in SpaceInfo::initialize_space_info(). That
3025 // required that _next_compaction_space_id be declared a
3026 // SpaceId in SpaceInfo and that would have required that
3027 // either SpaceId be declared in a separate class or that
3028 // it be declared in SpaceInfo. It didn't seem consistent
3029 // to declare it in SpaceInfo (didn't really fit logically).
3030 // Alternatively, defining a separate class to define SpaceId
3031 // seem excessive. This implementation is simple and localizes
3032 // the knowledge.
3034 PSParallelCompact::SpaceId
3035 PSParallelCompact::next_compaction_space_id(SpaceId id) {
3036 assert(id < last_space_id, "id out of range");
3037 switch (id) {
3038 case perm_space_id :
3039 return last_space_id;
3040 case old_space_id :
3041 return eden_space_id;
3042 case eden_space_id :
3043 return from_space_id;
3044 case from_space_id :
3045 return to_space_id;
3046 case to_space_id :
3047 return last_space_id;
3048 default:
3049 assert(false, "Bad space id");
3050 return last_space_id;
3051 }
3052 }