Thu, 11 Dec 2008 12:05:14 -0800
6765745: par compact - allow young gen spaces to be split
Reviewed-by: jmasa
1 /*
2 * Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 #include "incls/_precompiled.incl"
26 #include "incls/_psParallelCompact.cpp.incl"
28 #include <math.h>
30 // All sizes are in HeapWords.
31 const size_t ParallelCompactData::Log2RegionSize = 9; // 512 words
32 const size_t ParallelCompactData::RegionSize = (size_t)1 << Log2RegionSize;
33 const size_t ParallelCompactData::RegionSizeBytes =
34 RegionSize << LogHeapWordSize;
35 const size_t ParallelCompactData::RegionSizeOffsetMask = RegionSize - 1;
36 const size_t ParallelCompactData::RegionAddrOffsetMask = RegionSizeBytes - 1;
37 const size_t ParallelCompactData::RegionAddrMask = ~RegionAddrOffsetMask;
39 const ParallelCompactData::RegionData::region_sz_t
40 ParallelCompactData::RegionData::dc_shift = 27;
42 const ParallelCompactData::RegionData::region_sz_t
43 ParallelCompactData::RegionData::dc_mask = ~0U << dc_shift;
45 const ParallelCompactData::RegionData::region_sz_t
46 ParallelCompactData::RegionData::dc_one = 0x1U << dc_shift;
48 const ParallelCompactData::RegionData::region_sz_t
49 ParallelCompactData::RegionData::los_mask = ~dc_mask;
51 const ParallelCompactData::RegionData::region_sz_t
52 ParallelCompactData::RegionData::dc_claimed = 0x8U << dc_shift;
54 const ParallelCompactData::RegionData::region_sz_t
55 ParallelCompactData::RegionData::dc_completed = 0xcU << dc_shift;
57 SpaceInfo PSParallelCompact::_space_info[PSParallelCompact::last_space_id];
58 bool PSParallelCompact::_print_phases = false;
60 ReferenceProcessor* PSParallelCompact::_ref_processor = NULL;
61 klassOop PSParallelCompact::_updated_int_array_klass_obj = NULL;
63 double PSParallelCompact::_dwl_mean;
64 double PSParallelCompact::_dwl_std_dev;
65 double PSParallelCompact::_dwl_first_term;
66 double PSParallelCompact::_dwl_adjustment;
67 #ifdef ASSERT
68 bool PSParallelCompact::_dwl_initialized = false;
69 #endif // #ifdef ASSERT
71 #ifdef VALIDATE_MARK_SWEEP
72 GrowableArray<void*>* PSParallelCompact::_root_refs_stack = NULL;
73 GrowableArray<oop> * PSParallelCompact::_live_oops = NULL;
74 GrowableArray<oop> * PSParallelCompact::_live_oops_moved_to = NULL;
75 GrowableArray<size_t>* PSParallelCompact::_live_oops_size = NULL;
76 size_t PSParallelCompact::_live_oops_index = 0;
77 size_t PSParallelCompact::_live_oops_index_at_perm = 0;
78 GrowableArray<void*>* PSParallelCompact::_other_refs_stack = NULL;
79 GrowableArray<void*>* PSParallelCompact::_adjusted_pointers = NULL;
80 bool PSParallelCompact::_pointer_tracking = false;
81 bool PSParallelCompact::_root_tracking = true;
83 GrowableArray<HeapWord*>* PSParallelCompact::_cur_gc_live_oops = NULL;
84 GrowableArray<HeapWord*>* PSParallelCompact::_cur_gc_live_oops_moved_to = NULL;
85 GrowableArray<size_t> * PSParallelCompact::_cur_gc_live_oops_size = NULL;
86 GrowableArray<HeapWord*>* PSParallelCompact::_last_gc_live_oops = NULL;
87 GrowableArray<HeapWord*>* PSParallelCompact::_last_gc_live_oops_moved_to = NULL;
88 GrowableArray<size_t> * PSParallelCompact::_last_gc_live_oops_size = NULL;
89 #endif
91 void SplitInfo::record(size_t src_region_idx, size_t partial_obj_size,
92 HeapWord* destination)
93 {
94 assert(src_region_idx != 0, "invalid src_region_idx");
95 assert(partial_obj_size != 0, "invalid partial_obj_size argument");
96 assert(destination != NULL, "invalid destination argument");
98 _src_region_idx = src_region_idx;
99 _partial_obj_size = partial_obj_size;
100 _destination = destination;
102 // These fields may not be updated below, so make sure they're clear.
103 assert(_dest_region_addr == NULL, "should have been cleared");
104 assert(_first_src_addr == NULL, "should have been cleared");
106 // Determine the number of destination regions for the partial object.
107 HeapWord* const last_word = destination + partial_obj_size - 1;
108 const ParallelCompactData& sd = PSParallelCompact::summary_data();
109 HeapWord* const beg_region_addr = sd.region_align_down(destination);
110 HeapWord* const end_region_addr = sd.region_align_down(last_word);
112 if (beg_region_addr == end_region_addr) {
113 // One destination region.
114 _destination_count = 1;
115 if (end_region_addr == destination) {
116 // The destination falls on a region boundary, thus the first word of the
117 // partial object will be the first word copied to the destination region.
118 _dest_region_addr = end_region_addr;
119 _first_src_addr = sd.region_to_addr(src_region_idx);
120 }
121 } else {
122 // Two destination regions. When copied, the partial object will cross a
123 // destination region boundary, so a word somewhere within the partial
124 // object will be the first word copied to the second destination region.
125 _destination_count = 2;
126 _dest_region_addr = end_region_addr;
127 const size_t ofs = pointer_delta(end_region_addr, destination);
128 assert(ofs < _partial_obj_size, "sanity");
129 _first_src_addr = sd.region_to_addr(src_region_idx) + ofs;
130 }
131 }
133 void SplitInfo::clear()
134 {
135 _src_region_idx = 0;
136 _partial_obj_size = 0;
137 _destination = NULL;
138 _destination_count = 0;
139 _dest_region_addr = NULL;
140 _first_src_addr = NULL;
141 assert(!is_valid(), "sanity");
142 }
144 #ifdef ASSERT
145 void SplitInfo::verify_clear()
146 {
147 assert(_src_region_idx == 0, "not clear");
148 assert(_partial_obj_size == 0, "not clear");
149 assert(_destination == NULL, "not clear");
150 assert(_destination_count == 0, "not clear");
151 assert(_dest_region_addr == NULL, "not clear");
152 assert(_first_src_addr == NULL, "not clear");
153 }
154 #endif // #ifdef ASSERT
157 #ifndef PRODUCT
158 const char* PSParallelCompact::space_names[] = {
159 "perm", "old ", "eden", "from", "to "
160 };
162 void PSParallelCompact::print_region_ranges()
163 {
164 tty->print_cr("space bottom top end new_top");
165 tty->print_cr("------ ---------- ---------- ---------- ----------");
167 for (unsigned int id = 0; id < last_space_id; ++id) {
168 const MutableSpace* space = _space_info[id].space();
169 tty->print_cr("%u %s "
170 SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10) " "
171 SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10) " ",
172 id, space_names[id],
173 summary_data().addr_to_region_idx(space->bottom()),
174 summary_data().addr_to_region_idx(space->top()),
175 summary_data().addr_to_region_idx(space->end()),
176 summary_data().addr_to_region_idx(_space_info[id].new_top()));
177 }
178 }
180 void
181 print_generic_summary_region(size_t i, const ParallelCompactData::RegionData* c)
182 {
183 #define REGION_IDX_FORMAT SIZE_FORMAT_W(7)
184 #define REGION_DATA_FORMAT SIZE_FORMAT_W(5)
186 ParallelCompactData& sd = PSParallelCompact::summary_data();
187 size_t dci = c->destination() ? sd.addr_to_region_idx(c->destination()) : 0;
188 tty->print_cr(REGION_IDX_FORMAT " " PTR_FORMAT " "
189 REGION_IDX_FORMAT " " PTR_FORMAT " "
190 REGION_DATA_FORMAT " " REGION_DATA_FORMAT " "
191 REGION_DATA_FORMAT " " REGION_IDX_FORMAT " %d",
192 i, c->data_location(), dci, c->destination(),
193 c->partial_obj_size(), c->live_obj_size(),
194 c->data_size(), c->source_region(), c->destination_count());
196 #undef REGION_IDX_FORMAT
197 #undef REGION_DATA_FORMAT
198 }
200 void
201 print_generic_summary_data(ParallelCompactData& summary_data,
202 HeapWord* const beg_addr,
203 HeapWord* const end_addr)
204 {
205 size_t total_words = 0;
206 size_t i = summary_data.addr_to_region_idx(beg_addr);
207 const size_t last = summary_data.addr_to_region_idx(end_addr);
208 HeapWord* pdest = 0;
210 while (i <= last) {
211 ParallelCompactData::RegionData* c = summary_data.region(i);
212 if (c->data_size() != 0 || c->destination() != pdest) {
213 print_generic_summary_region(i, c);
214 total_words += c->data_size();
215 pdest = c->destination();
216 }
217 ++i;
218 }
220 tty->print_cr("summary_data_bytes=" SIZE_FORMAT, total_words * HeapWordSize);
221 }
223 void
224 print_generic_summary_data(ParallelCompactData& summary_data,
225 SpaceInfo* space_info)
226 {
227 for (unsigned int id = 0; id < PSParallelCompact::last_space_id; ++id) {
228 const MutableSpace* space = space_info[id].space();
229 print_generic_summary_data(summary_data, space->bottom(),
230 MAX2(space->top(), space_info[id].new_top()));
231 }
232 }
234 void
235 print_initial_summary_region(size_t i,
236 const ParallelCompactData::RegionData* c,
237 bool newline = true)
238 {
239 tty->print(SIZE_FORMAT_W(5) " " PTR_FORMAT " "
240 SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " "
241 SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " %d",
242 i, c->destination(),
243 c->partial_obj_size(), c->live_obj_size(),
244 c->data_size(), c->source_region(), c->destination_count());
245 if (newline) tty->cr();
246 }
248 void
249 print_initial_summary_data(ParallelCompactData& summary_data,
250 const MutableSpace* space) {
251 if (space->top() == space->bottom()) {
252 return;
253 }
255 const size_t region_size = ParallelCompactData::RegionSize;
256 typedef ParallelCompactData::RegionData RegionData;
257 HeapWord* const top_aligned_up = summary_data.region_align_up(space->top());
258 const size_t end_region = summary_data.addr_to_region_idx(top_aligned_up);
259 const RegionData* c = summary_data.region(end_region - 1);
260 HeapWord* end_addr = c->destination() + c->data_size();
261 const size_t live_in_space = pointer_delta(end_addr, space->bottom());
263 // Print (and count) the full regions at the beginning of the space.
264 size_t full_region_count = 0;
265 size_t i = summary_data.addr_to_region_idx(space->bottom());
266 while (i < end_region && summary_data.region(i)->data_size() == region_size) {
267 print_initial_summary_region(i, summary_data.region(i));
268 ++full_region_count;
269 ++i;
270 }
272 size_t live_to_right = live_in_space - full_region_count * region_size;
274 double max_reclaimed_ratio = 0.0;
275 size_t max_reclaimed_ratio_region = 0;
276 size_t max_dead_to_right = 0;
277 size_t max_live_to_right = 0;
279 // Print the 'reclaimed ratio' for regions while there is something live in
280 // the region or to the right of it. The remaining regions are empty (and
281 // uninteresting), and computing the ratio will result in division by 0.
282 while (i < end_region && live_to_right > 0) {
283 c = summary_data.region(i);
284 HeapWord* const region_addr = summary_data.region_to_addr(i);
285 const size_t used_to_right = pointer_delta(space->top(), region_addr);
286 const size_t dead_to_right = used_to_right - live_to_right;
287 const double reclaimed_ratio = double(dead_to_right) / live_to_right;
289 if (reclaimed_ratio > max_reclaimed_ratio) {
290 max_reclaimed_ratio = reclaimed_ratio;
291 max_reclaimed_ratio_region = i;
292 max_dead_to_right = dead_to_right;
293 max_live_to_right = live_to_right;
294 }
296 print_initial_summary_region(i, c, false);
297 tty->print_cr(" %12.10f " SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10),
298 reclaimed_ratio, dead_to_right, live_to_right);
300 live_to_right -= c->data_size();
301 ++i;
302 }
304 // Any remaining regions are empty. Print one more if there is one.
305 if (i < end_region) {
306 print_initial_summary_region(i, summary_data.region(i));
307 }
309 tty->print_cr("max: " SIZE_FORMAT_W(4) " d2r=" SIZE_FORMAT_W(10) " "
310 "l2r=" SIZE_FORMAT_W(10) " max_ratio=%14.12f",
311 max_reclaimed_ratio_region, max_dead_to_right,
312 max_live_to_right, max_reclaimed_ratio);
313 }
315 void
316 print_initial_summary_data(ParallelCompactData& summary_data,
317 SpaceInfo* space_info) {
318 unsigned int id = PSParallelCompact::perm_space_id;
319 const MutableSpace* space;
320 do {
321 space = space_info[id].space();
322 print_initial_summary_data(summary_data, space);
323 } while (++id < PSParallelCompact::eden_space_id);
325 do {
326 space = space_info[id].space();
327 print_generic_summary_data(summary_data, space->bottom(), space->top());
328 } while (++id < PSParallelCompact::last_space_id);
329 }
330 #endif // #ifndef PRODUCT
332 #ifdef ASSERT
333 size_t add_obj_count;
334 size_t add_obj_size;
335 size_t mark_bitmap_count;
336 size_t mark_bitmap_size;
337 #endif // #ifdef ASSERT
339 ParallelCompactData::ParallelCompactData()
340 {
341 _region_start = 0;
343 _region_vspace = 0;
344 _region_data = 0;
345 _region_count = 0;
346 }
348 bool ParallelCompactData::initialize(MemRegion covered_region)
349 {
350 _region_start = covered_region.start();
351 const size_t region_size = covered_region.word_size();
352 DEBUG_ONLY(_region_end = _region_start + region_size;)
354 assert(region_align_down(_region_start) == _region_start,
355 "region start not aligned");
356 assert((region_size & RegionSizeOffsetMask) == 0,
357 "region size not a multiple of RegionSize");
359 bool result = initialize_region_data(region_size);
361 return result;
362 }
364 PSVirtualSpace*
365 ParallelCompactData::create_vspace(size_t count, size_t element_size)
366 {
367 const size_t raw_bytes = count * element_size;
368 const size_t page_sz = os::page_size_for_region(raw_bytes, raw_bytes, 10);
369 const size_t granularity = os::vm_allocation_granularity();
370 const size_t bytes = align_size_up(raw_bytes, MAX2(page_sz, granularity));
372 const size_t rs_align = page_sz == (size_t) os::vm_page_size() ? 0 :
373 MAX2(page_sz, granularity);
374 ReservedSpace rs(bytes, rs_align, rs_align > 0);
375 os::trace_page_sizes("par compact", raw_bytes, raw_bytes, page_sz, rs.base(),
376 rs.size());
377 PSVirtualSpace* vspace = new PSVirtualSpace(rs, page_sz);
378 if (vspace != 0) {
379 if (vspace->expand_by(bytes)) {
380 return vspace;
381 }
382 delete vspace;
383 // Release memory reserved in the space.
384 rs.release();
385 }
387 return 0;
388 }
390 bool ParallelCompactData::initialize_region_data(size_t region_size)
391 {
392 const size_t count = (region_size + RegionSizeOffsetMask) >> Log2RegionSize;
393 _region_vspace = create_vspace(count, sizeof(RegionData));
394 if (_region_vspace != 0) {
395 _region_data = (RegionData*)_region_vspace->reserved_low_addr();
396 _region_count = count;
397 return true;
398 }
399 return false;
400 }
402 void ParallelCompactData::clear()
403 {
404 memset(_region_data, 0, _region_vspace->committed_size());
405 }
407 void ParallelCompactData::clear_range(size_t beg_region, size_t end_region) {
408 assert(beg_region <= _region_count, "beg_region out of range");
409 assert(end_region <= _region_count, "end_region out of range");
411 const size_t region_cnt = end_region - beg_region;
412 memset(_region_data + beg_region, 0, region_cnt * sizeof(RegionData));
413 }
415 HeapWord* ParallelCompactData::partial_obj_end(size_t region_idx) const
416 {
417 const RegionData* cur_cp = region(region_idx);
418 const RegionData* const end_cp = region(region_count() - 1);
420 HeapWord* result = region_to_addr(region_idx);
421 if (cur_cp < end_cp) {
422 do {
423 result += cur_cp->partial_obj_size();
424 } while (cur_cp->partial_obj_size() == RegionSize && ++cur_cp < end_cp);
425 }
426 return result;
427 }
429 void ParallelCompactData::add_obj(HeapWord* addr, size_t len)
430 {
431 const size_t obj_ofs = pointer_delta(addr, _region_start);
432 const size_t beg_region = obj_ofs >> Log2RegionSize;
433 const size_t end_region = (obj_ofs + len - 1) >> Log2RegionSize;
435 DEBUG_ONLY(Atomic::inc_ptr(&add_obj_count);)
436 DEBUG_ONLY(Atomic::add_ptr(len, &add_obj_size);)
438 if (beg_region == end_region) {
439 // All in one region.
440 _region_data[beg_region].add_live_obj(len);
441 return;
442 }
444 // First region.
445 const size_t beg_ofs = region_offset(addr);
446 _region_data[beg_region].add_live_obj(RegionSize - beg_ofs);
448 klassOop klass = ((oop)addr)->klass();
449 // Middle regions--completely spanned by this object.
450 for (size_t region = beg_region + 1; region < end_region; ++region) {
451 _region_data[region].set_partial_obj_size(RegionSize);
452 _region_data[region].set_partial_obj_addr(addr);
453 }
455 // Last region.
456 const size_t end_ofs = region_offset(addr + len - 1);
457 _region_data[end_region].set_partial_obj_size(end_ofs + 1);
458 _region_data[end_region].set_partial_obj_addr(addr);
459 }
461 void
462 ParallelCompactData::summarize_dense_prefix(HeapWord* beg, HeapWord* end)
463 {
464 assert(region_offset(beg) == 0, "not RegionSize aligned");
465 assert(region_offset(end) == 0, "not RegionSize aligned");
467 size_t cur_region = addr_to_region_idx(beg);
468 const size_t end_region = addr_to_region_idx(end);
469 HeapWord* addr = beg;
470 while (cur_region < end_region) {
471 _region_data[cur_region].set_destination(addr);
472 _region_data[cur_region].set_destination_count(0);
473 _region_data[cur_region].set_source_region(cur_region);
474 _region_data[cur_region].set_data_location(addr);
476 // Update live_obj_size so the region appears completely full.
477 size_t live_size = RegionSize - _region_data[cur_region].partial_obj_size();
478 _region_data[cur_region].set_live_obj_size(live_size);
480 ++cur_region;
481 addr += RegionSize;
482 }
483 }
485 // Find the point at which a space can be split and, if necessary, record the
486 // split point.
487 //
488 // If the current src region (which overflowed the destination space) doesn't
489 // have a partial object, the split point is at the beginning of the current src
490 // region (an "easy" split, no extra bookkeeping required).
491 //
492 // If the current src region has a partial object, the split point is in the
493 // region where that partial object starts (call it the split_region). If
494 // split_region has a partial object, then the split point is just after that
495 // partial object (a "hard" split where we have to record the split data and
496 // zero the partial_obj_size field). With a "hard" split, we know that the
497 // partial_obj ends within split_region because the partial object that caused
498 // the overflow starts in split_region. If split_region doesn't have a partial
499 // obj, then the split is at the beginning of split_region (another "easy"
500 // split).
501 HeapWord*
502 ParallelCompactData::summarize_split_space(size_t src_region,
503 SplitInfo& split_info,
504 HeapWord* destination,
505 HeapWord* target_end,
506 HeapWord** target_next)
507 {
508 assert(destination <= target_end, "sanity");
509 assert(destination + _region_data[src_region].data_size() > target_end,
510 "region should not fit into target space");
512 size_t split_region = src_region;
513 HeapWord* split_destination = destination;
514 size_t partial_obj_size = _region_data[src_region].partial_obj_size();
516 if (destination + partial_obj_size > target_end) {
517 // The split point is just after the partial object (if any) in the
518 // src_region that contains the start of the object that overflowed the
519 // destination space.
520 //
521 // Find the start of the "overflow" object and set split_region to the
522 // region containing it.
523 HeapWord* const overflow_obj = _region_data[src_region].partial_obj_addr();
524 split_region = addr_to_region_idx(overflow_obj);
526 // Clear the source_region field of all destination regions whose first word
527 // came from data after the split point (a non-null source_region field
528 // implies a region must be filled).
529 //
530 // An alternative to the simple loop below: clear during post_compact(),
531 // which uses memcpy instead of individual stores, and is easy to
532 // parallelize. (The downside is that it clears the entire RegionData
533 // object as opposed to just one field.)
534 //
535 // post_compact() would have to clear the summary data up to the highest
536 // address that was written during the summary phase, which would be
537 //
538 // max(top, max(new_top, clear_top))
539 //
540 // where clear_top is a new field in SpaceInfo. Would have to set clear_top
541 // to destination + partial_obj_size, where both have the values passed to
542 // this routine.
543 const RegionData* const sr = region(split_region);
544 const size_t beg_idx =
545 addr_to_region_idx(region_align_up(sr->destination() +
546 sr->partial_obj_size()));
547 const size_t end_idx =
548 addr_to_region_idx(region_align_up(destination + partial_obj_size));
550 if (TraceParallelOldGCSummaryPhase) {
551 gclog_or_tty->print_cr("split: clearing source_region field in ["
552 SIZE_FORMAT ", " SIZE_FORMAT ")",
553 beg_idx, end_idx);
554 }
555 for (size_t idx = beg_idx; idx < end_idx; ++idx) {
556 _region_data[idx].set_source_region(0);
557 }
559 // Set split_destination and partial_obj_size to reflect the split region.
560 split_destination = sr->destination();
561 partial_obj_size = sr->partial_obj_size();
562 }
564 // The split is recorded only if a partial object extends onto the region.
565 if (partial_obj_size != 0) {
566 _region_data[split_region].set_partial_obj_size(0);
567 split_info.record(split_region, partial_obj_size, split_destination);
568 }
570 // Setup the continuation addresses.
571 *target_next = split_destination + partial_obj_size;
572 HeapWord* const source_next = region_to_addr(split_region) + partial_obj_size;
574 if (TraceParallelOldGCSummaryPhase) {
575 const char * split_type = partial_obj_size == 0 ? "easy" : "hard";
576 gclog_or_tty->print_cr("%s split: src=" PTR_FORMAT " src_c=" SIZE_FORMAT
577 " pos=" SIZE_FORMAT,
578 split_type, source_next, split_region,
579 partial_obj_size);
580 gclog_or_tty->print_cr("%s split: dst=" PTR_FORMAT " dst_c=" SIZE_FORMAT
581 " tn=" PTR_FORMAT,
582 split_type, split_destination,
583 addr_to_region_idx(split_destination),
584 *target_next);
586 if (partial_obj_size != 0) {
587 HeapWord* const po_beg = split_info.destination();
588 HeapWord* const po_end = po_beg + split_info.partial_obj_size();
589 gclog_or_tty->print_cr("%s split: "
590 "po_beg=" PTR_FORMAT " " SIZE_FORMAT " "
591 "po_end=" PTR_FORMAT " " SIZE_FORMAT,
592 split_type,
593 po_beg, addr_to_region_idx(po_beg),
594 po_end, addr_to_region_idx(po_end));
595 }
596 }
598 return source_next;
599 }
601 bool ParallelCompactData::summarize(SplitInfo& split_info,
602 HeapWord* source_beg, HeapWord* source_end,
603 HeapWord** source_next,
604 HeapWord* target_beg, HeapWord* target_end,
605 HeapWord** target_next)
606 {
607 if (TraceParallelOldGCSummaryPhase) {
608 HeapWord* const source_next_val = source_next == NULL ? NULL : *source_next;
609 tty->print_cr("sb=" PTR_FORMAT " se=" PTR_FORMAT " sn=" PTR_FORMAT
610 "tb=" PTR_FORMAT " te=" PTR_FORMAT " tn=" PTR_FORMAT,
611 source_beg, source_end, source_next_val,
612 target_beg, target_end, *target_next);
613 }
615 size_t cur_region = addr_to_region_idx(source_beg);
616 const size_t end_region = addr_to_region_idx(region_align_up(source_end));
618 HeapWord *dest_addr = target_beg;
619 while (cur_region < end_region) {
620 // The destination must be set even if the region has no data.
621 _region_data[cur_region].set_destination(dest_addr);
623 size_t words = _region_data[cur_region].data_size();
624 if (words > 0) {
625 // If cur_region does not fit entirely into the target space, find a point
626 // at which the source space can be 'split' so that part is copied to the
627 // target space and the rest is copied elsewhere.
628 if (dest_addr + words > target_end) {
629 assert(source_next != NULL, "source_next is NULL when splitting");
630 *source_next = summarize_split_space(cur_region, split_info, dest_addr,
631 target_end, target_next);
632 return false;
633 }
635 // Compute the destination_count for cur_region, and if necessary, update
636 // source_region for a destination region. The source_region field is
637 // updated if cur_region is the first (left-most) region to be copied to a
638 // destination region.
639 //
640 // The destination_count calculation is a bit subtle. A region that has
641 // data that compacts into itself does not count itself as a destination.
642 // This maintains the invariant that a zero count means the region is
643 // available and can be claimed and then filled.
644 uint destination_count = 0;
645 if (split_info.is_split(cur_region)) {
646 // The current region has been split: the partial object will be copied
647 // to one destination space and the remaining data will be copied to
648 // another destination space. Adjust the initial destination_count and,
649 // if necessary, set the source_region field if the partial object will
650 // cross a destination region boundary.
651 destination_count = split_info.destination_count();
652 if (destination_count == 2) {
653 size_t dest_idx = addr_to_region_idx(split_info.dest_region_addr());
654 _region_data[dest_idx].set_source_region(cur_region);
655 }
656 }
658 HeapWord* const last_addr = dest_addr + words - 1;
659 const size_t dest_region_1 = addr_to_region_idx(dest_addr);
660 const size_t dest_region_2 = addr_to_region_idx(last_addr);
662 // Initially assume that the destination regions will be the same and
663 // adjust the value below if necessary. Under this assumption, if
664 // cur_region == dest_region_2, then cur_region will be compacted
665 // completely into itself.
666 destination_count += cur_region == dest_region_2 ? 0 : 1;
667 if (dest_region_1 != dest_region_2) {
668 // Destination regions differ; adjust destination_count.
669 destination_count += 1;
670 // Data from cur_region will be copied to the start of dest_region_2.
671 _region_data[dest_region_2].set_source_region(cur_region);
672 } else if (region_offset(dest_addr) == 0) {
673 // Data from cur_region will be copied to the start of the destination
674 // region.
675 _region_data[dest_region_1].set_source_region(cur_region);
676 }
678 _region_data[cur_region].set_destination_count(destination_count);
679 _region_data[cur_region].set_data_location(region_to_addr(cur_region));
680 dest_addr += words;
681 }
683 ++cur_region;
684 }
686 *target_next = dest_addr;
687 return true;
688 }
690 HeapWord* ParallelCompactData::calc_new_pointer(HeapWord* addr) {
691 assert(addr != NULL, "Should detect NULL oop earlier");
692 assert(PSParallelCompact::gc_heap()->is_in(addr), "addr not in heap");
693 #ifdef ASSERT
694 if (PSParallelCompact::mark_bitmap()->is_unmarked(addr)) {
695 gclog_or_tty->print_cr("calc_new_pointer:: addr " PTR_FORMAT, addr);
696 }
697 #endif
698 assert(PSParallelCompact::mark_bitmap()->is_marked(addr), "obj not marked");
700 // Region covering the object.
701 size_t region_index = addr_to_region_idx(addr);
702 const RegionData* const region_ptr = region(region_index);
703 HeapWord* const region_addr = region_align_down(addr);
705 assert(addr < region_addr + RegionSize, "Region does not cover object");
706 assert(addr_to_region_ptr(region_addr) == region_ptr, "sanity check");
708 HeapWord* result = region_ptr->destination();
710 // If all the data in the region is live, then the new location of the object
711 // can be calculated from the destination of the region plus the offset of the
712 // object in the region.
713 if (region_ptr->data_size() == RegionSize) {
714 result += pointer_delta(addr, region_addr);
715 return result;
716 }
718 // The new location of the object is
719 // region destination +
720 // size of the partial object extending onto the region +
721 // sizes of the live objects in the Region that are to the left of addr
722 const size_t partial_obj_size = region_ptr->partial_obj_size();
723 HeapWord* const search_start = region_addr + partial_obj_size;
725 const ParMarkBitMap* bitmap = PSParallelCompact::mark_bitmap();
726 size_t live_to_left = bitmap->live_words_in_range(search_start, oop(addr));
728 result += partial_obj_size + live_to_left;
729 assert(result <= addr, "object cannot move to the right");
730 return result;
731 }
733 klassOop ParallelCompactData::calc_new_klass(klassOop old_klass) {
734 klassOop updated_klass;
735 if (PSParallelCompact::should_update_klass(old_klass)) {
736 updated_klass = (klassOop) calc_new_pointer(old_klass);
737 } else {
738 updated_klass = old_klass;
739 }
741 return updated_klass;
742 }
744 #ifdef ASSERT
745 void ParallelCompactData::verify_clear(const PSVirtualSpace* vspace)
746 {
747 const size_t* const beg = (const size_t*)vspace->committed_low_addr();
748 const size_t* const end = (const size_t*)vspace->committed_high_addr();
749 for (const size_t* p = beg; p < end; ++p) {
750 assert(*p == 0, "not zero");
751 }
752 }
754 void ParallelCompactData::verify_clear()
755 {
756 verify_clear(_region_vspace);
757 }
758 #endif // #ifdef ASSERT
760 #ifdef NOT_PRODUCT
761 ParallelCompactData::RegionData* debug_region(size_t region_index) {
762 ParallelCompactData& sd = PSParallelCompact::summary_data();
763 return sd.region(region_index);
764 }
765 #endif
767 elapsedTimer PSParallelCompact::_accumulated_time;
768 unsigned int PSParallelCompact::_total_invocations = 0;
769 unsigned int PSParallelCompact::_maximum_compaction_gc_num = 0;
770 jlong PSParallelCompact::_time_of_last_gc = 0;
771 CollectorCounters* PSParallelCompact::_counters = NULL;
772 ParMarkBitMap PSParallelCompact::_mark_bitmap;
773 ParallelCompactData PSParallelCompact::_summary_data;
775 PSParallelCompact::IsAliveClosure PSParallelCompact::_is_alive_closure;
777 void PSParallelCompact::IsAliveClosure::do_object(oop p) { ShouldNotReachHere(); }
778 bool PSParallelCompact::IsAliveClosure::do_object_b(oop p) { return mark_bitmap()->is_marked(p); }
780 void PSParallelCompact::KeepAliveClosure::do_oop(oop* p) { PSParallelCompact::KeepAliveClosure::do_oop_work(p); }
781 void PSParallelCompact::KeepAliveClosure::do_oop(narrowOop* p) { PSParallelCompact::KeepAliveClosure::do_oop_work(p); }
783 PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_root_pointer_closure(true);
784 PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_pointer_closure(false);
786 void PSParallelCompact::AdjustPointerClosure::do_oop(oop* p) { adjust_pointer(p, _is_root); }
787 void PSParallelCompact::AdjustPointerClosure::do_oop(narrowOop* p) { adjust_pointer(p, _is_root); }
789 void PSParallelCompact::FollowStackClosure::do_void() { follow_stack(_compaction_manager); }
791 void PSParallelCompact::MarkAndPushClosure::do_oop(oop* p) { mark_and_push(_compaction_manager, p); }
792 void PSParallelCompact::MarkAndPushClosure::do_oop(narrowOop* p) { mark_and_push(_compaction_manager, p); }
794 void PSParallelCompact::post_initialize() {
795 ParallelScavengeHeap* heap = gc_heap();
796 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
798 MemRegion mr = heap->reserved_region();
799 _ref_processor = ReferenceProcessor::create_ref_processor(
800 mr, // span
801 true, // atomic_discovery
802 true, // mt_discovery
803 &_is_alive_closure,
804 ParallelGCThreads,
805 ParallelRefProcEnabled);
806 _counters = new CollectorCounters("PSParallelCompact", 1);
808 // Initialize static fields in ParCompactionManager.
809 ParCompactionManager::initialize(mark_bitmap());
810 }
812 bool PSParallelCompact::initialize() {
813 ParallelScavengeHeap* heap = gc_heap();
814 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
815 MemRegion mr = heap->reserved_region();
817 // Was the old gen get allocated successfully?
818 if (!heap->old_gen()->is_allocated()) {
819 return false;
820 }
822 initialize_space_info();
823 initialize_dead_wood_limiter();
825 if (!_mark_bitmap.initialize(mr)) {
826 vm_shutdown_during_initialization("Unable to allocate bit map for "
827 "parallel garbage collection for the requested heap size.");
828 return false;
829 }
831 if (!_summary_data.initialize(mr)) {
832 vm_shutdown_during_initialization("Unable to allocate tables for "
833 "parallel garbage collection for the requested heap size.");
834 return false;
835 }
837 return true;
838 }
840 void PSParallelCompact::initialize_space_info()
841 {
842 memset(&_space_info, 0, sizeof(_space_info));
844 ParallelScavengeHeap* heap = gc_heap();
845 PSYoungGen* young_gen = heap->young_gen();
846 MutableSpace* perm_space = heap->perm_gen()->object_space();
848 _space_info[perm_space_id].set_space(perm_space);
849 _space_info[old_space_id].set_space(heap->old_gen()->object_space());
850 _space_info[eden_space_id].set_space(young_gen->eden_space());
851 _space_info[from_space_id].set_space(young_gen->from_space());
852 _space_info[to_space_id].set_space(young_gen->to_space());
854 _space_info[perm_space_id].set_start_array(heap->perm_gen()->start_array());
855 _space_info[old_space_id].set_start_array(heap->old_gen()->start_array());
857 _space_info[perm_space_id].set_min_dense_prefix(perm_space->top());
858 if (TraceParallelOldGCDensePrefix) {
859 tty->print_cr("perm min_dense_prefix=" PTR_FORMAT,
860 _space_info[perm_space_id].min_dense_prefix());
861 }
862 }
864 void PSParallelCompact::initialize_dead_wood_limiter()
865 {
866 const size_t max = 100;
867 _dwl_mean = double(MIN2(ParallelOldDeadWoodLimiterMean, max)) / 100.0;
868 _dwl_std_dev = double(MIN2(ParallelOldDeadWoodLimiterStdDev, max)) / 100.0;
869 _dwl_first_term = 1.0 / (sqrt(2.0 * M_PI) * _dwl_std_dev);
870 DEBUG_ONLY(_dwl_initialized = true;)
871 _dwl_adjustment = normal_distribution(1.0);
872 }
874 // Simple class for storing info about the heap at the start of GC, to be used
875 // after GC for comparison/printing.
876 class PreGCValues {
877 public:
878 PreGCValues() { }
879 PreGCValues(ParallelScavengeHeap* heap) { fill(heap); }
881 void fill(ParallelScavengeHeap* heap) {
882 _heap_used = heap->used();
883 _young_gen_used = heap->young_gen()->used_in_bytes();
884 _old_gen_used = heap->old_gen()->used_in_bytes();
885 _perm_gen_used = heap->perm_gen()->used_in_bytes();
886 };
888 size_t heap_used() const { return _heap_used; }
889 size_t young_gen_used() const { return _young_gen_used; }
890 size_t old_gen_used() const { return _old_gen_used; }
891 size_t perm_gen_used() const { return _perm_gen_used; }
893 private:
894 size_t _heap_used;
895 size_t _young_gen_used;
896 size_t _old_gen_used;
897 size_t _perm_gen_used;
898 };
900 void
901 PSParallelCompact::clear_data_covering_space(SpaceId id)
902 {
903 // At this point, top is the value before GC, new_top() is the value that will
904 // be set at the end of GC. The marking bitmap is cleared to top; nothing
905 // should be marked above top. The summary data is cleared to the larger of
906 // top & new_top.
907 MutableSpace* const space = _space_info[id].space();
908 HeapWord* const bot = space->bottom();
909 HeapWord* const top = space->top();
910 HeapWord* const max_top = MAX2(top, _space_info[id].new_top());
912 const idx_t beg_bit = _mark_bitmap.addr_to_bit(bot);
913 const idx_t end_bit = BitMap::word_align_up(_mark_bitmap.addr_to_bit(top));
914 _mark_bitmap.clear_range(beg_bit, end_bit);
916 const size_t beg_region = _summary_data.addr_to_region_idx(bot);
917 const size_t end_region =
918 _summary_data.addr_to_region_idx(_summary_data.region_align_up(max_top));
919 _summary_data.clear_range(beg_region, end_region);
921 // Clear the data used to 'split' regions.
922 SplitInfo& split_info = _space_info[id].split_info();
923 if (split_info.is_valid()) {
924 split_info.clear();
925 }
926 DEBUG_ONLY(split_info.verify_clear();)
927 }
929 void PSParallelCompact::pre_compact(PreGCValues* pre_gc_values)
930 {
931 // Update the from & to space pointers in space_info, since they are swapped
932 // at each young gen gc. Do the update unconditionally (even though a
933 // promotion failure does not swap spaces) because an unknown number of minor
934 // collections will have swapped the spaces an unknown number of times.
935 TraceTime tm("pre compact", print_phases(), true, gclog_or_tty);
936 ParallelScavengeHeap* heap = gc_heap();
937 _space_info[from_space_id].set_space(heap->young_gen()->from_space());
938 _space_info[to_space_id].set_space(heap->young_gen()->to_space());
940 pre_gc_values->fill(heap);
942 ParCompactionManager::reset();
943 NOT_PRODUCT(_mark_bitmap.reset_counters());
944 DEBUG_ONLY(add_obj_count = add_obj_size = 0;)
945 DEBUG_ONLY(mark_bitmap_count = mark_bitmap_size = 0;)
947 // Increment the invocation count
948 heap->increment_total_collections(true);
950 // We need to track unique mark sweep invocations as well.
951 _total_invocations++;
953 if (PrintHeapAtGC) {
954 Universe::print_heap_before_gc();
955 }
957 // Fill in TLABs
958 heap->accumulate_statistics_all_tlabs();
959 heap->ensure_parsability(true); // retire TLABs
961 if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
962 HandleMark hm; // Discard invalid handles created during verification
963 gclog_or_tty->print(" VerifyBeforeGC:");
964 Universe::verify(true);
965 }
967 // Verify object start arrays
968 if (VerifyObjectStartArray &&
969 VerifyBeforeGC) {
970 heap->old_gen()->verify_object_start_array();
971 heap->perm_gen()->verify_object_start_array();
972 }
974 DEBUG_ONLY(mark_bitmap()->verify_clear();)
975 DEBUG_ONLY(summary_data().verify_clear();)
977 // Have worker threads release resources the next time they run a task.
978 gc_task_manager()->release_all_resources();
979 }
981 void PSParallelCompact::post_compact()
982 {
983 TraceTime tm("post compact", print_phases(), true, gclog_or_tty);
985 for (unsigned int id = perm_space_id; id < last_space_id; ++id) {
986 // Clear the marking bitmap, summary data and split info.
987 clear_data_covering_space(SpaceId(id));
988 // Update top(). Must be done after clearing the bitmap and summary data.
989 _space_info[id].publish_new_top();
990 }
992 MutableSpace* const eden_space = _space_info[eden_space_id].space();
993 MutableSpace* const from_space = _space_info[from_space_id].space();
994 MutableSpace* const to_space = _space_info[to_space_id].space();
996 ParallelScavengeHeap* heap = gc_heap();
997 bool eden_empty = eden_space->is_empty();
998 if (!eden_empty) {
999 eden_empty = absorb_live_data_from_eden(heap->size_policy(),
1000 heap->young_gen(), heap->old_gen());
1001 }
1003 // Update heap occupancy information which is used as input to the soft ref
1004 // clearing policy at the next gc.
1005 Universe::update_heap_info_at_gc();
1007 bool young_gen_empty = eden_empty && from_space->is_empty() &&
1008 to_space->is_empty();
1010 BarrierSet* bs = heap->barrier_set();
1011 if (bs->is_a(BarrierSet::ModRef)) {
1012 ModRefBarrierSet* modBS = (ModRefBarrierSet*)bs;
1013 MemRegion old_mr = heap->old_gen()->reserved();
1014 MemRegion perm_mr = heap->perm_gen()->reserved();
1015 assert(perm_mr.end() <= old_mr.start(), "Generations out of order");
1017 if (young_gen_empty) {
1018 modBS->clear(MemRegion(perm_mr.start(), old_mr.end()));
1019 } else {
1020 modBS->invalidate(MemRegion(perm_mr.start(), old_mr.end()));
1021 }
1022 }
1024 Threads::gc_epilogue();
1025 CodeCache::gc_epilogue();
1027 COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
1029 ref_processor()->enqueue_discovered_references(NULL);
1031 if (ZapUnusedHeapArea) {
1032 heap->gen_mangle_unused_area();
1033 }
1035 // Update time of last GC
1036 reset_millis_since_last_gc();
1037 }
1039 HeapWord*
1040 PSParallelCompact::compute_dense_prefix_via_density(const SpaceId id,
1041 bool maximum_compaction)
1042 {
1043 const size_t region_size = ParallelCompactData::RegionSize;
1044 const ParallelCompactData& sd = summary_data();
1046 const MutableSpace* const space = _space_info[id].space();
1047 HeapWord* const top_aligned_up = sd.region_align_up(space->top());
1048 const RegionData* const beg_cp = sd.addr_to_region_ptr(space->bottom());
1049 const RegionData* const end_cp = sd.addr_to_region_ptr(top_aligned_up);
1051 // Skip full regions at the beginning of the space--they are necessarily part
1052 // of the dense prefix.
1053 size_t full_count = 0;
1054 const RegionData* cp;
1055 for (cp = beg_cp; cp < end_cp && cp->data_size() == region_size; ++cp) {
1056 ++full_count;
1057 }
1059 assert(total_invocations() >= _maximum_compaction_gc_num, "sanity");
1060 const size_t gcs_since_max = total_invocations() - _maximum_compaction_gc_num;
1061 const bool interval_ended = gcs_since_max > HeapMaximumCompactionInterval;
1062 if (maximum_compaction || cp == end_cp || interval_ended) {
1063 _maximum_compaction_gc_num = total_invocations();
1064 return sd.region_to_addr(cp);
1065 }
1067 HeapWord* const new_top = _space_info[id].new_top();
1068 const size_t space_live = pointer_delta(new_top, space->bottom());
1069 const size_t space_used = space->used_in_words();
1070 const size_t space_capacity = space->capacity_in_words();
1072 const double cur_density = double(space_live) / space_capacity;
1073 const double deadwood_density =
1074 (1.0 - cur_density) * (1.0 - cur_density) * cur_density * cur_density;
1075 const size_t deadwood_goal = size_t(space_capacity * deadwood_density);
1077 if (TraceParallelOldGCDensePrefix) {
1078 tty->print_cr("cur_dens=%5.3f dw_dens=%5.3f dw_goal=" SIZE_FORMAT,
1079 cur_density, deadwood_density, deadwood_goal);
1080 tty->print_cr("space_live=" SIZE_FORMAT " " "space_used=" SIZE_FORMAT " "
1081 "space_cap=" SIZE_FORMAT,
1082 space_live, space_used,
1083 space_capacity);
1084 }
1086 // XXX - Use binary search?
1087 HeapWord* dense_prefix = sd.region_to_addr(cp);
1088 const RegionData* full_cp = cp;
1089 const RegionData* const top_cp = sd.addr_to_region_ptr(space->top() - 1);
1090 while (cp < end_cp) {
1091 HeapWord* region_destination = cp->destination();
1092 const size_t cur_deadwood = pointer_delta(dense_prefix, region_destination);
1093 if (TraceParallelOldGCDensePrefix && Verbose) {
1094 tty->print_cr("c#=" SIZE_FORMAT_W(4) " dst=" PTR_FORMAT " "
1095 "dp=" SIZE_FORMAT_W(8) " " "cdw=" SIZE_FORMAT_W(8),
1096 sd.region(cp), region_destination,
1097 dense_prefix, cur_deadwood);
1098 }
1100 if (cur_deadwood >= deadwood_goal) {
1101 // Found the region that has the correct amount of deadwood to the left.
1102 // This typically occurs after crossing a fairly sparse set of regions, so
1103 // iterate backwards over those sparse regions, looking for the region
1104 // that has the lowest density of live objects 'to the right.'
1105 size_t space_to_left = sd.region(cp) * region_size;
1106 size_t live_to_left = space_to_left - cur_deadwood;
1107 size_t space_to_right = space_capacity - space_to_left;
1108 size_t live_to_right = space_live - live_to_left;
1109 double density_to_right = double(live_to_right) / space_to_right;
1110 while (cp > full_cp) {
1111 --cp;
1112 const size_t prev_region_live_to_right = live_to_right -
1113 cp->data_size();
1114 const size_t prev_region_space_to_right = space_to_right + region_size;
1115 double prev_region_density_to_right =
1116 double(prev_region_live_to_right) / prev_region_space_to_right;
1117 if (density_to_right <= prev_region_density_to_right) {
1118 return dense_prefix;
1119 }
1120 if (TraceParallelOldGCDensePrefix && Verbose) {
1121 tty->print_cr("backing up from c=" SIZE_FORMAT_W(4) " d2r=%10.8f "
1122 "pc_d2r=%10.8f", sd.region(cp), density_to_right,
1123 prev_region_density_to_right);
1124 }
1125 dense_prefix -= region_size;
1126 live_to_right = prev_region_live_to_right;
1127 space_to_right = prev_region_space_to_right;
1128 density_to_right = prev_region_density_to_right;
1129 }
1130 return dense_prefix;
1131 }
1133 dense_prefix += region_size;
1134 ++cp;
1135 }
1137 return dense_prefix;
1138 }
1140 #ifndef PRODUCT
1141 void PSParallelCompact::print_dense_prefix_stats(const char* const algorithm,
1142 const SpaceId id,
1143 const bool maximum_compaction,
1144 HeapWord* const addr)
1145 {
1146 const size_t region_idx = summary_data().addr_to_region_idx(addr);
1147 RegionData* const cp = summary_data().region(region_idx);
1148 const MutableSpace* const space = _space_info[id].space();
1149 HeapWord* const new_top = _space_info[id].new_top();
1151 const size_t space_live = pointer_delta(new_top, space->bottom());
1152 const size_t dead_to_left = pointer_delta(addr, cp->destination());
1153 const size_t space_cap = space->capacity_in_words();
1154 const double dead_to_left_pct = double(dead_to_left) / space_cap;
1155 const size_t live_to_right = new_top - cp->destination();
1156 const size_t dead_to_right = space->top() - addr - live_to_right;
1158 tty->print_cr("%s=" PTR_FORMAT " dpc=" SIZE_FORMAT_W(5) " "
1159 "spl=" SIZE_FORMAT " "
1160 "d2l=" SIZE_FORMAT " d2l%%=%6.4f "
1161 "d2r=" SIZE_FORMAT " l2r=" SIZE_FORMAT
1162 " ratio=%10.8f",
1163 algorithm, addr, region_idx,
1164 space_live,
1165 dead_to_left, dead_to_left_pct,
1166 dead_to_right, live_to_right,
1167 double(dead_to_right) / live_to_right);
1168 }
1169 #endif // #ifndef PRODUCT
1171 // Return a fraction indicating how much of the generation can be treated as
1172 // "dead wood" (i.e., not reclaimed). The function uses a normal distribution
1173 // based on the density of live objects in the generation to determine a limit,
1174 // which is then adjusted so the return value is min_percent when the density is
1175 // 1.
1176 //
1177 // The following table shows some return values for a different values of the
1178 // standard deviation (ParallelOldDeadWoodLimiterStdDev); the mean is 0.5 and
1179 // min_percent is 1.
1180 //
1181 // fraction allowed as dead wood
1182 // -----------------------------------------------------------------
1183 // density std_dev=70 std_dev=75 std_dev=80 std_dev=85 std_dev=90 std_dev=95
1184 // ------- ---------- ---------- ---------- ---------- ---------- ----------
1185 // 0.00000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000
1186 // 0.05000 0.03193096 0.02836880 0.02550828 0.02319280 0.02130337 0.01974941
1187 // 0.10000 0.05247504 0.04547452 0.03988045 0.03537016 0.03170171 0.02869272
1188 // 0.15000 0.07135702 0.06111390 0.05296419 0.04641639 0.04110601 0.03676066
1189 // 0.20000 0.08831616 0.07509618 0.06461766 0.05622444 0.04943437 0.04388975
1190 // 0.25000 0.10311208 0.08724696 0.07471205 0.06469760 0.05661313 0.05002313
1191 // 0.30000 0.11553050 0.09741183 0.08313394 0.07175114 0.06257797 0.05511132
1192 // 0.35000 0.12538832 0.10545958 0.08978741 0.07731366 0.06727491 0.05911289
1193 // 0.40000 0.13253818 0.11128511 0.09459590 0.08132834 0.07066107 0.06199500
1194 // 0.45000 0.13687208 0.11481163 0.09750361 0.08375387 0.07270534 0.06373386
1195 // 0.50000 0.13832410 0.11599237 0.09847664 0.08456518 0.07338887 0.06431510
1196 // 0.55000 0.13687208 0.11481163 0.09750361 0.08375387 0.07270534 0.06373386
1197 // 0.60000 0.13253818 0.11128511 0.09459590 0.08132834 0.07066107 0.06199500
1198 // 0.65000 0.12538832 0.10545958 0.08978741 0.07731366 0.06727491 0.05911289
1199 // 0.70000 0.11553050 0.09741183 0.08313394 0.07175114 0.06257797 0.05511132
1200 // 0.75000 0.10311208 0.08724696 0.07471205 0.06469760 0.05661313 0.05002313
1201 // 0.80000 0.08831616 0.07509618 0.06461766 0.05622444 0.04943437 0.04388975
1202 // 0.85000 0.07135702 0.06111390 0.05296419 0.04641639 0.04110601 0.03676066
1203 // 0.90000 0.05247504 0.04547452 0.03988045 0.03537016 0.03170171 0.02869272
1204 // 0.95000 0.03193096 0.02836880 0.02550828 0.02319280 0.02130337 0.01974941
1205 // 1.00000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000
1207 double PSParallelCompact::dead_wood_limiter(double density, size_t min_percent)
1208 {
1209 assert(_dwl_initialized, "uninitialized");
1211 // The raw limit is the value of the normal distribution at x = density.
1212 const double raw_limit = normal_distribution(density);
1214 // Adjust the raw limit so it becomes the minimum when the density is 1.
1215 //
1216 // First subtract the adjustment value (which is simply the precomputed value
1217 // normal_distribution(1.0)); this yields a value of 0 when the density is 1.
1218 // Then add the minimum value, so the minimum is returned when the density is
1219 // 1. Finally, prevent negative values, which occur when the mean is not 0.5.
1220 const double min = double(min_percent) / 100.0;
1221 const double limit = raw_limit - _dwl_adjustment + min;
1222 return MAX2(limit, 0.0);
1223 }
1225 ParallelCompactData::RegionData*
1226 PSParallelCompact::first_dead_space_region(const RegionData* beg,
1227 const RegionData* end)
1228 {
1229 const size_t region_size = ParallelCompactData::RegionSize;
1230 ParallelCompactData& sd = summary_data();
1231 size_t left = sd.region(beg);
1232 size_t right = end > beg ? sd.region(end) - 1 : left;
1234 // Binary search.
1235 while (left < right) {
1236 // Equivalent to (left + right) / 2, but does not overflow.
1237 const size_t middle = left + (right - left) / 2;
1238 RegionData* const middle_ptr = sd.region(middle);
1239 HeapWord* const dest = middle_ptr->destination();
1240 HeapWord* const addr = sd.region_to_addr(middle);
1241 assert(dest != NULL, "sanity");
1242 assert(dest <= addr, "must move left");
1244 if (middle > left && dest < addr) {
1245 right = middle - 1;
1246 } else if (middle < right && middle_ptr->data_size() == region_size) {
1247 left = middle + 1;
1248 } else {
1249 return middle_ptr;
1250 }
1251 }
1252 return sd.region(left);
1253 }
1255 ParallelCompactData::RegionData*
1256 PSParallelCompact::dead_wood_limit_region(const RegionData* beg,
1257 const RegionData* end,
1258 size_t dead_words)
1259 {
1260 ParallelCompactData& sd = summary_data();
1261 size_t left = sd.region(beg);
1262 size_t right = end > beg ? sd.region(end) - 1 : left;
1264 // Binary search.
1265 while (left < right) {
1266 // Equivalent to (left + right) / 2, but does not overflow.
1267 const size_t middle = left + (right - left) / 2;
1268 RegionData* const middle_ptr = sd.region(middle);
1269 HeapWord* const dest = middle_ptr->destination();
1270 HeapWord* const addr = sd.region_to_addr(middle);
1271 assert(dest != NULL, "sanity");
1272 assert(dest <= addr, "must move left");
1274 const size_t dead_to_left = pointer_delta(addr, dest);
1275 if (middle > left && dead_to_left > dead_words) {
1276 right = middle - 1;
1277 } else if (middle < right && dead_to_left < dead_words) {
1278 left = middle + 1;
1279 } else {
1280 return middle_ptr;
1281 }
1282 }
1283 return sd.region(left);
1284 }
1286 // The result is valid during the summary phase, after the initial summarization
1287 // of each space into itself, and before final summarization.
1288 inline double
1289 PSParallelCompact::reclaimed_ratio(const RegionData* const cp,
1290 HeapWord* const bottom,
1291 HeapWord* const top,
1292 HeapWord* const new_top)
1293 {
1294 ParallelCompactData& sd = summary_data();
1296 assert(cp != NULL, "sanity");
1297 assert(bottom != NULL, "sanity");
1298 assert(top != NULL, "sanity");
1299 assert(new_top != NULL, "sanity");
1300 assert(top >= new_top, "summary data problem?");
1301 assert(new_top > bottom, "space is empty; should not be here");
1302 assert(new_top >= cp->destination(), "sanity");
1303 assert(top >= sd.region_to_addr(cp), "sanity");
1305 HeapWord* const destination = cp->destination();
1306 const size_t dense_prefix_live = pointer_delta(destination, bottom);
1307 const size_t compacted_region_live = pointer_delta(new_top, destination);
1308 const size_t compacted_region_used = pointer_delta(top,
1309 sd.region_to_addr(cp));
1310 const size_t reclaimable = compacted_region_used - compacted_region_live;
1312 const double divisor = dense_prefix_live + 1.25 * compacted_region_live;
1313 return double(reclaimable) / divisor;
1314 }
1316 // Return the address of the end of the dense prefix, a.k.a. the start of the
1317 // compacted region. The address is always on a region boundary.
1318 //
1319 // Completely full regions at the left are skipped, since no compaction can
1320 // occur in those regions. Then the maximum amount of dead wood to allow is
1321 // computed, based on the density (amount live / capacity) of the generation;
1322 // the region with approximately that amount of dead space to the left is
1323 // identified as the limit region. Regions between the last completely full
1324 // region and the limit region are scanned and the one that has the best
1325 // (maximum) reclaimed_ratio() is selected.
1326 HeapWord*
1327 PSParallelCompact::compute_dense_prefix(const SpaceId id,
1328 bool maximum_compaction)
1329 {
1330 const size_t region_size = ParallelCompactData::RegionSize;
1331 const ParallelCompactData& sd = summary_data();
1333 const MutableSpace* const space = _space_info[id].space();
1334 HeapWord* const top = space->top();
1335 HeapWord* const top_aligned_up = sd.region_align_up(top);
1336 HeapWord* const new_top = _space_info[id].new_top();
1337 HeapWord* const new_top_aligned_up = sd.region_align_up(new_top);
1338 HeapWord* const bottom = space->bottom();
1339 const RegionData* const beg_cp = sd.addr_to_region_ptr(bottom);
1340 const RegionData* const top_cp = sd.addr_to_region_ptr(top_aligned_up);
1341 const RegionData* const new_top_cp =
1342 sd.addr_to_region_ptr(new_top_aligned_up);
1344 // Skip full regions at the beginning of the space--they are necessarily part
1345 // of the dense prefix.
1346 const RegionData* const full_cp = first_dead_space_region(beg_cp, new_top_cp);
1347 assert(full_cp->destination() == sd.region_to_addr(full_cp) ||
1348 space->is_empty(), "no dead space allowed to the left");
1349 assert(full_cp->data_size() < region_size || full_cp == new_top_cp - 1,
1350 "region must have dead space");
1352 // The gc number is saved whenever a maximum compaction is done, and used to
1353 // determine when the maximum compaction interval has expired. This avoids
1354 // successive max compactions for different reasons.
1355 assert(total_invocations() >= _maximum_compaction_gc_num, "sanity");
1356 const size_t gcs_since_max = total_invocations() - _maximum_compaction_gc_num;
1357 const bool interval_ended = gcs_since_max > HeapMaximumCompactionInterval ||
1358 total_invocations() == HeapFirstMaximumCompactionCount;
1359 if (maximum_compaction || full_cp == top_cp || interval_ended) {
1360 _maximum_compaction_gc_num = total_invocations();
1361 return sd.region_to_addr(full_cp);
1362 }
1364 const size_t space_live = pointer_delta(new_top, bottom);
1365 const size_t space_used = space->used_in_words();
1366 const size_t space_capacity = space->capacity_in_words();
1368 const double density = double(space_live) / double(space_capacity);
1369 const size_t min_percent_free =
1370 id == perm_space_id ? PermMarkSweepDeadRatio : MarkSweepDeadRatio;
1371 const double limiter = dead_wood_limiter(density, min_percent_free);
1372 const size_t dead_wood_max = space_used - space_live;
1373 const size_t dead_wood_limit = MIN2(size_t(space_capacity * limiter),
1374 dead_wood_max);
1376 if (TraceParallelOldGCDensePrefix) {
1377 tty->print_cr("space_live=" SIZE_FORMAT " " "space_used=" SIZE_FORMAT " "
1378 "space_cap=" SIZE_FORMAT,
1379 space_live, space_used,
1380 space_capacity);
1381 tty->print_cr("dead_wood_limiter(%6.4f, %d)=%6.4f "
1382 "dead_wood_max=" SIZE_FORMAT " dead_wood_limit=" SIZE_FORMAT,
1383 density, min_percent_free, limiter,
1384 dead_wood_max, dead_wood_limit);
1385 }
1387 // Locate the region with the desired amount of dead space to the left.
1388 const RegionData* const limit_cp =
1389 dead_wood_limit_region(full_cp, top_cp, dead_wood_limit);
1391 // Scan from the first region with dead space to the limit region and find the
1392 // one with the best (largest) reclaimed ratio.
1393 double best_ratio = 0.0;
1394 const RegionData* best_cp = full_cp;
1395 for (const RegionData* cp = full_cp; cp < limit_cp; ++cp) {
1396 double tmp_ratio = reclaimed_ratio(cp, bottom, top, new_top);
1397 if (tmp_ratio > best_ratio) {
1398 best_cp = cp;
1399 best_ratio = tmp_ratio;
1400 }
1401 }
1403 #if 0
1404 // Something to consider: if the region with the best ratio is 'close to' the
1405 // first region w/free space, choose the first region with free space
1406 // ("first-free"). The first-free region is usually near the start of the
1407 // heap, which means we are copying most of the heap already, so copy a bit
1408 // more to get complete compaction.
1409 if (pointer_delta(best_cp, full_cp, sizeof(RegionData)) < 4) {
1410 _maximum_compaction_gc_num = total_invocations();
1411 best_cp = full_cp;
1412 }
1413 #endif // #if 0
1415 return sd.region_to_addr(best_cp);
1416 }
1418 void PSParallelCompact::summarize_spaces_quick()
1419 {
1420 for (unsigned int i = 0; i < last_space_id; ++i) {
1421 const MutableSpace* space = _space_info[i].space();
1422 HeapWord** nta = _space_info[i].new_top_addr();
1423 bool result = _summary_data.summarize(_space_info[i].split_info(),
1424 space->bottom(), space->top(), NULL,
1425 space->bottom(), space->end(), nta);
1426 assert(result, "space must fit into itself");
1427 _space_info[i].set_dense_prefix(space->bottom());
1428 }
1429 }
1431 void PSParallelCompact::fill_dense_prefix_end(SpaceId id)
1432 {
1433 HeapWord* const dense_prefix_end = dense_prefix(id);
1434 const RegionData* region = _summary_data.addr_to_region_ptr(dense_prefix_end);
1435 const idx_t dense_prefix_bit = _mark_bitmap.addr_to_bit(dense_prefix_end);
1436 if (dead_space_crosses_boundary(region, dense_prefix_bit)) {
1437 // Only enough dead space is filled so that any remaining dead space to the
1438 // left is larger than the minimum filler object. (The remainder is filled
1439 // during the copy/update phase.)
1440 //
1441 // The size of the dead space to the right of the boundary is not a
1442 // concern, since compaction will be able to use whatever space is
1443 // available.
1444 //
1445 // Here '||' is the boundary, 'x' represents a don't care bit and a box
1446 // surrounds the space to be filled with an object.
1447 //
1448 // In the 32-bit VM, each bit represents two 32-bit words:
1449 // +---+
1450 // a) beg_bits: ... x x x | 0 | || 0 x x ...
1451 // end_bits: ... x x x | 0 | || 0 x x ...
1452 // +---+
1453 //
1454 // In the 64-bit VM, each bit represents one 64-bit word:
1455 // +------------+
1456 // b) beg_bits: ... x x x | 0 || 0 | x x ...
1457 // end_bits: ... x x 1 | 0 || 0 | x x ...
1458 // +------------+
1459 // +-------+
1460 // c) beg_bits: ... x x | 0 0 | || 0 x x ...
1461 // end_bits: ... x 1 | 0 0 | || 0 x x ...
1462 // +-------+
1463 // +-----------+
1464 // d) beg_bits: ... x | 0 0 0 | || 0 x x ...
1465 // end_bits: ... 1 | 0 0 0 | || 0 x x ...
1466 // +-----------+
1467 // +-------+
1468 // e) beg_bits: ... 0 0 | 0 0 | || 0 x x ...
1469 // end_bits: ... 0 0 | 0 0 | || 0 x x ...
1470 // +-------+
1472 // Initially assume case a, c or e will apply.
1473 size_t obj_len = (size_t)oopDesc::header_size();
1474 HeapWord* obj_beg = dense_prefix_end - obj_len;
1476 #ifdef _LP64
1477 if (_mark_bitmap.is_obj_end(dense_prefix_bit - 2)) {
1478 // Case b above.
1479 obj_beg = dense_prefix_end - 1;
1480 } else if (!_mark_bitmap.is_obj_end(dense_prefix_bit - 3) &&
1481 _mark_bitmap.is_obj_end(dense_prefix_bit - 4)) {
1482 // Case d above.
1483 obj_beg = dense_prefix_end - 3;
1484 obj_len = 3;
1485 }
1486 #endif // #ifdef _LP64
1488 CollectedHeap::fill_with_object(obj_beg, obj_len);
1489 _mark_bitmap.mark_obj(obj_beg, obj_len);
1490 _summary_data.add_obj(obj_beg, obj_len);
1491 assert(start_array(id) != NULL, "sanity");
1492 start_array(id)->allocate_block(obj_beg);
1493 }
1494 }
1496 void
1497 PSParallelCompact::clear_source_region(HeapWord* beg_addr, HeapWord* end_addr)
1498 {
1499 RegionData* const beg_ptr = _summary_data.addr_to_region_ptr(beg_addr);
1500 HeapWord* const end_aligned_up = _summary_data.region_align_up(end_addr);
1501 RegionData* const end_ptr = _summary_data.addr_to_region_ptr(end_aligned_up);
1502 for (RegionData* cur = beg_ptr; cur < end_ptr; ++cur) {
1503 cur->set_source_region(0);
1504 }
1505 }
1507 void
1508 PSParallelCompact::summarize_space(SpaceId id, bool maximum_compaction)
1509 {
1510 assert(id < last_space_id, "id out of range");
1511 assert(_space_info[id].dense_prefix() == _space_info[id].space()->bottom(),
1512 "should have been set in summarize_spaces_quick()");
1514 const MutableSpace* space = _space_info[id].space();
1515 if (_space_info[id].new_top() != space->bottom()) {
1516 HeapWord* dense_prefix_end = compute_dense_prefix(id, maximum_compaction);
1517 _space_info[id].set_dense_prefix(dense_prefix_end);
1519 #ifndef PRODUCT
1520 if (TraceParallelOldGCDensePrefix) {
1521 print_dense_prefix_stats("ratio", id, maximum_compaction,
1522 dense_prefix_end);
1523 HeapWord* addr = compute_dense_prefix_via_density(id, maximum_compaction);
1524 print_dense_prefix_stats("density", id, maximum_compaction, addr);
1525 }
1526 #endif // #ifndef PRODUCT
1528 // Recompute the summary data, taking into account the dense prefix. If every
1529 // last byte will be reclaimed, then the existing summary data which compacts
1530 // everything can be left in place.
1531 if (!maximum_compaction && dense_prefix_end != space->bottom()) {
1532 // If dead space crosses the dense prefix boundary, it is (at least
1533 // partially) filled with a dummy object, marked live and added to the
1534 // summary data. This simplifies the copy/update phase and must be done
1535 // before the final locations of objects are determined, to prevent leaving
1536 // a fragment of dead space that is too small to fill with an object.
1537 fill_dense_prefix_end(id);
1539 // Compute the destination of each Region, and thus each object.
1540 _summary_data.summarize_dense_prefix(space->bottom(), dense_prefix_end);
1541 _summary_data.summarize(_space_info[id].split_info(),
1542 dense_prefix_end, space->top(), NULL,
1543 dense_prefix_end, space->end(),
1544 _space_info[id].new_top_addr());
1545 }
1546 }
1548 if (TraceParallelOldGCSummaryPhase) {
1549 const size_t region_size = ParallelCompactData::RegionSize;
1550 HeapWord* const dense_prefix_end = _space_info[id].dense_prefix();
1551 const size_t dp_region = _summary_data.addr_to_region_idx(dense_prefix_end);
1552 const size_t dp_words = pointer_delta(dense_prefix_end, space->bottom());
1553 HeapWord* const new_top = _space_info[id].new_top();
1554 const HeapWord* nt_aligned_up = _summary_data.region_align_up(new_top);
1555 const size_t cr_words = pointer_delta(nt_aligned_up, dense_prefix_end);
1556 tty->print_cr("id=%d cap=" SIZE_FORMAT " dp=" PTR_FORMAT " "
1557 "dp_region=" SIZE_FORMAT " " "dp_count=" SIZE_FORMAT " "
1558 "cr_count=" SIZE_FORMAT " " "nt=" PTR_FORMAT,
1559 id, space->capacity_in_words(), dense_prefix_end,
1560 dp_region, dp_words / region_size,
1561 cr_words / region_size, new_top);
1562 }
1563 }
1565 #ifndef PRODUCT
1566 void PSParallelCompact::summary_phase_msg(SpaceId dst_space_id,
1567 HeapWord* dst_beg, HeapWord* dst_end,
1568 SpaceId src_space_id,
1569 HeapWord* src_beg, HeapWord* src_end)
1570 {
1571 if (TraceParallelOldGCSummaryPhase) {
1572 tty->print_cr("summarizing %d [%s] into %d [%s]: "
1573 "src=" PTR_FORMAT "-" PTR_FORMAT " "
1574 SIZE_FORMAT "-" SIZE_FORMAT " "
1575 "dst=" PTR_FORMAT "-" PTR_FORMAT " "
1576 SIZE_FORMAT "-" SIZE_FORMAT,
1577 src_space_id, space_names[src_space_id],
1578 dst_space_id, space_names[dst_space_id],
1579 src_beg, src_end,
1580 _summary_data.addr_to_region_idx(src_beg),
1581 _summary_data.addr_to_region_idx(src_end),
1582 dst_beg, dst_end,
1583 _summary_data.addr_to_region_idx(dst_beg),
1584 _summary_data.addr_to_region_idx(dst_end));
1585 }
1586 }
1587 #endif // #ifndef PRODUCT
1589 void PSParallelCompact::summary_phase(ParCompactionManager* cm,
1590 bool maximum_compaction)
1591 {
1592 EventMark m("2 summarize");
1593 TraceTime tm("summary phase", print_phases(), true, gclog_or_tty);
1594 // trace("2");
1596 #ifdef ASSERT
1597 if (TraceParallelOldGCMarkingPhase) {
1598 tty->print_cr("add_obj_count=" SIZE_FORMAT " "
1599 "add_obj_bytes=" SIZE_FORMAT,
1600 add_obj_count, add_obj_size * HeapWordSize);
1601 tty->print_cr("mark_bitmap_count=" SIZE_FORMAT " "
1602 "mark_bitmap_bytes=" SIZE_FORMAT,
1603 mark_bitmap_count, mark_bitmap_size * HeapWordSize);
1604 }
1605 #endif // #ifdef ASSERT
1607 // Quick summarization of each space into itself, to see how much is live.
1608 summarize_spaces_quick();
1610 if (TraceParallelOldGCSummaryPhase) {
1611 tty->print_cr("summary_phase: after summarizing each space to self");
1612 Universe::print();
1613 NOT_PRODUCT(print_region_ranges());
1614 if (Verbose) {
1615 NOT_PRODUCT(print_initial_summary_data(_summary_data, _space_info));
1616 }
1617 }
1619 // The amount of live data that will end up in old space (assuming it fits).
1620 size_t old_space_total_live = 0;
1621 assert(perm_space_id < old_space_id, "should not count perm data here");
1622 for (unsigned int id = old_space_id; id < last_space_id; ++id) {
1623 old_space_total_live += pointer_delta(_space_info[id].new_top(),
1624 _space_info[id].space()->bottom());
1625 }
1627 MutableSpace* const old_space = _space_info[old_space_id].space();
1628 if (old_space_total_live > old_space->capacity_in_words()) {
1629 // XXX - should also try to expand
1630 maximum_compaction = true;
1631 }
1633 // Permanent and Old generations.
1634 summarize_space(perm_space_id, maximum_compaction);
1635 summarize_space(old_space_id, maximum_compaction);
1637 // Summarize the remaining spaces in the young gen. The initial target space
1638 // is the old gen. If a space does not fit entirely into the target, then the
1639 // remainder is compacted into the space itself and that space becomes the new
1640 // target.
1641 SpaceId dst_space_id = old_space_id;
1642 HeapWord* dst_space_end = old_space->end();
1643 HeapWord** new_top_addr = _space_info[dst_space_id].new_top_addr();
1644 for (unsigned int id = eden_space_id; id < last_space_id; ++id) {
1645 const MutableSpace* space = _space_info[id].space();
1646 const size_t live = pointer_delta(_space_info[id].new_top(),
1647 space->bottom());
1648 const size_t available = pointer_delta(dst_space_end, *new_top_addr);
1650 NOT_PRODUCT(summary_phase_msg(dst_space_id, *new_top_addr, dst_space_end,
1651 SpaceId(id), space->bottom(), space->top());)
1652 if (live > 0 && live <= available) {
1653 // All the live data will fit.
1654 bool done = _summary_data.summarize(_space_info[id].split_info(),
1655 space->bottom(), space->top(),
1656 NULL,
1657 *new_top_addr, dst_space_end,
1658 new_top_addr);
1659 assert(done, "space must fit into old gen");
1661 // XXX - this is necessary because decrement_destination_counts() tests
1662 // source_region() to determine if a region will be filled. Probably
1663 // better to pass src_space->new_top() into decrement_destination_counts
1664 // and test that instead.
1665 //
1666 // Clear the source_region field for each region in the space.
1667 clear_source_region(space->bottom(), _space_info[id].new_top());
1669 // Reset the new_top value for the space.
1670 _space_info[id].set_new_top(space->bottom());
1671 } else if (live > 0) {
1672 // Attempt to fit part of the source space into the target space.
1673 HeapWord* next_src_addr = NULL;
1674 bool done = _summary_data.summarize(_space_info[id].split_info(),
1675 space->bottom(), space->top(),
1676 &next_src_addr,
1677 *new_top_addr, dst_space_end,
1678 new_top_addr);
1679 assert(!done, "space should not fit into old gen");
1680 assert(next_src_addr != NULL, "sanity");
1682 // The source space becomes the new target, so the remainder is compacted
1683 // within the space itself.
1684 dst_space_id = SpaceId(id);
1685 dst_space_end = space->end();
1686 new_top_addr = _space_info[id].new_top_addr();
1687 HeapWord* const clear_end = _space_info[id].new_top();
1688 NOT_PRODUCT(summary_phase_msg(dst_space_id,
1689 space->bottom(), dst_space_end,
1690 SpaceId(id), next_src_addr, space->top());)
1691 done = _summary_data.summarize(_space_info[id].split_info(),
1692 next_src_addr, space->top(),
1693 NULL,
1694 space->bottom(), dst_space_end,
1695 new_top_addr);
1696 assert(done, "space must fit when compacted into itself");
1697 assert(*new_top_addr <= space->top(), "usage should not grow");
1699 // XXX - this should go away. See comments above.
1700 //
1701 // Clear the source_region field in regions at the end of the space that
1702 // will not be filled.
1703 HeapWord* const clear_beg = _summary_data.region_align_up(*new_top_addr);
1704 clear_source_region(clear_beg, clear_end);
1705 }
1706 }
1708 if (TraceParallelOldGCSummaryPhase) {
1709 tty->print_cr("summary_phase: after final summarization");
1710 Universe::print();
1711 NOT_PRODUCT(print_region_ranges());
1712 if (Verbose) {
1713 NOT_PRODUCT(print_generic_summary_data(_summary_data, _space_info));
1714 }
1715 }
1716 }
1718 // This method should contain all heap-specific policy for invoking a full
1719 // collection. invoke_no_policy() will only attempt to compact the heap; it
1720 // will do nothing further. If we need to bail out for policy reasons, scavenge
1721 // before full gc, or any other specialized behavior, it needs to be added here.
1722 //
1723 // Note that this method should only be called from the vm_thread while at a
1724 // safepoint.
1725 void PSParallelCompact::invoke(bool maximum_heap_compaction) {
1726 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
1727 assert(Thread::current() == (Thread*)VMThread::vm_thread(),
1728 "should be in vm thread");
1729 ParallelScavengeHeap* heap = gc_heap();
1730 GCCause::Cause gc_cause = heap->gc_cause();
1731 assert(!heap->is_gc_active(), "not reentrant");
1733 PSAdaptiveSizePolicy* policy = heap->size_policy();
1735 // Before each allocation/collection attempt, find out from the
1736 // policy object if GCs are, on the whole, taking too long. If so,
1737 // bail out without attempting a collection. The exceptions are
1738 // for explicitly requested GC's.
1739 if (!policy->gc_time_limit_exceeded() ||
1740 GCCause::is_user_requested_gc(gc_cause) ||
1741 GCCause::is_serviceability_requested_gc(gc_cause)) {
1742 IsGCActiveMark mark;
1744 if (ScavengeBeforeFullGC) {
1745 PSScavenge::invoke_no_policy();
1746 }
1748 PSParallelCompact::invoke_no_policy(maximum_heap_compaction);
1749 }
1750 }
1752 bool ParallelCompactData::region_contains(size_t region_index, HeapWord* addr) {
1753 size_t addr_region_index = addr_to_region_idx(addr);
1754 return region_index == addr_region_index;
1755 }
1757 // This method contains no policy. You should probably
1758 // be calling invoke() instead.
1759 void PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
1760 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
1761 assert(ref_processor() != NULL, "Sanity");
1763 if (GC_locker::check_active_before_gc()) {
1764 return;
1765 }
1767 TimeStamp marking_start;
1768 TimeStamp compaction_start;
1769 TimeStamp collection_exit;
1771 ParallelScavengeHeap* heap = gc_heap();
1772 GCCause::Cause gc_cause = heap->gc_cause();
1773 PSYoungGen* young_gen = heap->young_gen();
1774 PSOldGen* old_gen = heap->old_gen();
1775 PSPermGen* perm_gen = heap->perm_gen();
1776 PSAdaptiveSizePolicy* size_policy = heap->size_policy();
1778 if (ZapUnusedHeapArea) {
1779 // Save information needed to minimize mangling
1780 heap->record_gen_tops_before_GC();
1781 }
1783 _print_phases = PrintGCDetails && PrintParallelOldGCPhaseTimes;
1785 // Make sure data structures are sane, make the heap parsable, and do other
1786 // miscellaneous bookkeeping.
1787 PreGCValues pre_gc_values;
1788 pre_compact(&pre_gc_values);
1790 // Get the compaction manager reserved for the VM thread.
1791 ParCompactionManager* const vmthread_cm =
1792 ParCompactionManager::manager_array(gc_task_manager()->workers());
1794 // Place after pre_compact() where the number of invocations is incremented.
1795 AdaptiveSizePolicyOutput(size_policy, heap->total_collections());
1797 {
1798 ResourceMark rm;
1799 HandleMark hm;
1801 const bool is_system_gc = gc_cause == GCCause::_java_lang_system_gc;
1803 // This is useful for debugging but don't change the output the
1804 // the customer sees.
1805 const char* gc_cause_str = "Full GC";
1806 if (is_system_gc && PrintGCDetails) {
1807 gc_cause_str = "Full GC (System)";
1808 }
1809 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
1810 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
1811 TraceTime t1(gc_cause_str, PrintGC, !PrintGCDetails, gclog_or_tty);
1812 TraceCollectorStats tcs(counters());
1813 TraceMemoryManagerStats tms(true /* Full GC */);
1815 if (TraceGen1Time) accumulated_time()->start();
1817 // Let the size policy know we're starting
1818 size_policy->major_collection_begin();
1820 // When collecting the permanent generation methodOops may be moving,
1821 // so we either have to flush all bcp data or convert it into bci.
1822 CodeCache::gc_prologue();
1823 Threads::gc_prologue();
1825 NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
1826 COMPILER2_PRESENT(DerivedPointerTable::clear());
1828 ref_processor()->enable_discovery();
1829 ref_processor()->setup_policy(maximum_heap_compaction);
1831 bool marked_for_unloading = false;
1833 marking_start.update();
1834 marking_phase(vmthread_cm, maximum_heap_compaction);
1836 #ifndef PRODUCT
1837 if (TraceParallelOldGCMarkingPhase) {
1838 gclog_or_tty->print_cr("marking_phase: cas_tries %d cas_retries %d "
1839 "cas_by_another %d",
1840 mark_bitmap()->cas_tries(), mark_bitmap()->cas_retries(),
1841 mark_bitmap()->cas_by_another());
1842 }
1843 #endif // #ifndef PRODUCT
1845 bool max_on_system_gc = UseMaximumCompactionOnSystemGC && is_system_gc;
1846 summary_phase(vmthread_cm, maximum_heap_compaction || max_on_system_gc);
1848 COMPILER2_PRESENT(assert(DerivedPointerTable::is_active(), "Sanity"));
1849 COMPILER2_PRESENT(DerivedPointerTable::set_active(false));
1851 // adjust_roots() updates Universe::_intArrayKlassObj which is
1852 // needed by the compaction for filling holes in the dense prefix.
1853 adjust_roots();
1855 compaction_start.update();
1856 // Does the perm gen always have to be done serially because
1857 // klasses are used in the update of an object?
1858 compact_perm(vmthread_cm);
1860 if (UseParallelOldGCCompacting) {
1861 compact();
1862 } else {
1863 compact_serial(vmthread_cm);
1864 }
1866 // Reset the mark bitmap, summary data, and do other bookkeeping. Must be
1867 // done before resizing.
1868 post_compact();
1870 // Let the size policy know we're done
1871 size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause);
1873 if (UseAdaptiveSizePolicy) {
1874 if (PrintAdaptiveSizePolicy) {
1875 gclog_or_tty->print("AdaptiveSizeStart: ");
1876 gclog_or_tty->stamp();
1877 gclog_or_tty->print_cr(" collection: %d ",
1878 heap->total_collections());
1879 if (Verbose) {
1880 gclog_or_tty->print("old_gen_capacity: %d young_gen_capacity: %d"
1881 " perm_gen_capacity: %d ",
1882 old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes(),
1883 perm_gen->capacity_in_bytes());
1884 }
1885 }
1887 // Don't check if the size_policy is ready here. Let
1888 // the size_policy check that internally.
1889 if (UseAdaptiveGenerationSizePolicyAtMajorCollection &&
1890 ((gc_cause != GCCause::_java_lang_system_gc) ||
1891 UseAdaptiveSizePolicyWithSystemGC)) {
1892 // Calculate optimal free space amounts
1893 assert(young_gen->max_size() >
1894 young_gen->from_space()->capacity_in_bytes() +
1895 young_gen->to_space()->capacity_in_bytes(),
1896 "Sizes of space in young gen are out-of-bounds");
1897 size_t max_eden_size = young_gen->max_size() -
1898 young_gen->from_space()->capacity_in_bytes() -
1899 young_gen->to_space()->capacity_in_bytes();
1900 size_policy->compute_generation_free_space(
1901 young_gen->used_in_bytes(),
1902 young_gen->eden_space()->used_in_bytes(),
1903 old_gen->used_in_bytes(),
1904 perm_gen->used_in_bytes(),
1905 young_gen->eden_space()->capacity_in_bytes(),
1906 old_gen->max_gen_size(),
1907 max_eden_size,
1908 true /* full gc*/,
1909 gc_cause);
1911 heap->resize_old_gen(
1912 size_policy->calculated_old_free_size_in_bytes());
1914 // Don't resize the young generation at an major collection. A
1915 // desired young generation size may have been calculated but
1916 // resizing the young generation complicates the code because the
1917 // resizing of the old generation may have moved the boundary
1918 // between the young generation and the old generation. Let the
1919 // young generation resizing happen at the minor collections.
1920 }
1921 if (PrintAdaptiveSizePolicy) {
1922 gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ",
1923 heap->total_collections());
1924 }
1925 }
1927 if (UsePerfData) {
1928 PSGCAdaptivePolicyCounters* const counters = heap->gc_policy_counters();
1929 counters->update_counters();
1930 counters->update_old_capacity(old_gen->capacity_in_bytes());
1931 counters->update_young_capacity(young_gen->capacity_in_bytes());
1932 }
1934 heap->resize_all_tlabs();
1936 // We collected the perm gen, so we'll resize it here.
1937 perm_gen->compute_new_size(pre_gc_values.perm_gen_used());
1939 if (TraceGen1Time) accumulated_time()->stop();
1941 if (PrintGC) {
1942 if (PrintGCDetails) {
1943 // No GC timestamp here. This is after GC so it would be confusing.
1944 young_gen->print_used_change(pre_gc_values.young_gen_used());
1945 old_gen->print_used_change(pre_gc_values.old_gen_used());
1946 heap->print_heap_change(pre_gc_values.heap_used());
1947 // Print perm gen last (print_heap_change() excludes the perm gen).
1948 perm_gen->print_used_change(pre_gc_values.perm_gen_used());
1949 } else {
1950 heap->print_heap_change(pre_gc_values.heap_used());
1951 }
1952 }
1954 // Track memory usage and detect low memory
1955 MemoryService::track_memory_usage();
1956 heap->update_counters();
1958 if (PrintGCDetails) {
1959 if (size_policy->print_gc_time_limit_would_be_exceeded()) {
1960 if (size_policy->gc_time_limit_exceeded()) {
1961 gclog_or_tty->print_cr(" GC time is exceeding GCTimeLimit "
1962 "of %d%%", GCTimeLimit);
1963 } else {
1964 gclog_or_tty->print_cr(" GC time would exceed GCTimeLimit "
1965 "of %d%%", GCTimeLimit);
1966 }
1967 }
1968 size_policy->set_print_gc_time_limit_would_be_exceeded(false);
1969 }
1970 }
1972 if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
1973 HandleMark hm; // Discard invalid handles created during verification
1974 gclog_or_tty->print(" VerifyAfterGC:");
1975 Universe::verify(false);
1976 }
1978 // Re-verify object start arrays
1979 if (VerifyObjectStartArray &&
1980 VerifyAfterGC) {
1981 old_gen->verify_object_start_array();
1982 perm_gen->verify_object_start_array();
1983 }
1985 if (ZapUnusedHeapArea) {
1986 old_gen->object_space()->check_mangled_unused_area_complete();
1987 perm_gen->object_space()->check_mangled_unused_area_complete();
1988 }
1990 NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
1992 collection_exit.update();
1994 if (PrintHeapAtGC) {
1995 Universe::print_heap_after_gc();
1996 }
1997 if (PrintGCTaskTimeStamps) {
1998 gclog_or_tty->print_cr("VM-Thread " INT64_FORMAT " " INT64_FORMAT " "
1999 INT64_FORMAT,
2000 marking_start.ticks(), compaction_start.ticks(),
2001 collection_exit.ticks());
2002 gc_task_manager()->print_task_time_stamps();
2003 }
2004 }
2006 bool PSParallelCompact::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
2007 PSYoungGen* young_gen,
2008 PSOldGen* old_gen) {
2009 MutableSpace* const eden_space = young_gen->eden_space();
2010 assert(!eden_space->is_empty(), "eden must be non-empty");
2011 assert(young_gen->virtual_space()->alignment() ==
2012 old_gen->virtual_space()->alignment(), "alignments do not match");
2014 if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary)) {
2015 return false;
2016 }
2018 // Both generations must be completely committed.
2019 if (young_gen->virtual_space()->uncommitted_size() != 0) {
2020 return false;
2021 }
2022 if (old_gen->virtual_space()->uncommitted_size() != 0) {
2023 return false;
2024 }
2026 // Figure out how much to take from eden. Include the average amount promoted
2027 // in the total; otherwise the next young gen GC will simply bail out to a
2028 // full GC.
2029 const size_t alignment = old_gen->virtual_space()->alignment();
2030 const size_t eden_used = eden_space->used_in_bytes();
2031 const size_t promoted = (size_t)size_policy->avg_promoted()->padded_average();
2032 const size_t absorb_size = align_size_up(eden_used + promoted, alignment);
2033 const size_t eden_capacity = eden_space->capacity_in_bytes();
2035 if (absorb_size >= eden_capacity) {
2036 return false; // Must leave some space in eden.
2037 }
2039 const size_t new_young_size = young_gen->capacity_in_bytes() - absorb_size;
2040 if (new_young_size < young_gen->min_gen_size()) {
2041 return false; // Respect young gen minimum size.
2042 }
2044 if (TraceAdaptiveGCBoundary && Verbose) {
2045 gclog_or_tty->print(" absorbing " SIZE_FORMAT "K: "
2046 "eden " SIZE_FORMAT "K->" SIZE_FORMAT "K "
2047 "from " SIZE_FORMAT "K, to " SIZE_FORMAT "K "
2048 "young_gen " SIZE_FORMAT "K->" SIZE_FORMAT "K ",
2049 absorb_size / K,
2050 eden_capacity / K, (eden_capacity - absorb_size) / K,
2051 young_gen->from_space()->used_in_bytes() / K,
2052 young_gen->to_space()->used_in_bytes() / K,
2053 young_gen->capacity_in_bytes() / K, new_young_size / K);
2054 }
2056 // Fill the unused part of the old gen.
2057 MutableSpace* const old_space = old_gen->object_space();
2058 HeapWord* const unused_start = old_space->top();
2059 size_t const unused_words = pointer_delta(old_space->end(), unused_start);
2061 if (unused_words > 0) {
2062 if (unused_words < CollectedHeap::min_fill_size()) {
2063 return false; // If the old gen cannot be filled, must give up.
2064 }
2065 CollectedHeap::fill_with_objects(unused_start, unused_words);
2066 }
2068 // Take the live data from eden and set both top and end in the old gen to
2069 // eden top. (Need to set end because reset_after_change() mangles the region
2070 // from end to virtual_space->high() in debug builds).
2071 HeapWord* const new_top = eden_space->top();
2072 old_gen->virtual_space()->expand_into(young_gen->virtual_space(),
2073 absorb_size);
2074 young_gen->reset_after_change();
2075 old_space->set_top(new_top);
2076 old_space->set_end(new_top);
2077 old_gen->reset_after_change();
2079 // Update the object start array for the filler object and the data from eden.
2080 ObjectStartArray* const start_array = old_gen->start_array();
2081 for (HeapWord* p = unused_start; p < new_top; p += oop(p)->size()) {
2082 start_array->allocate_block(p);
2083 }
2085 // Could update the promoted average here, but it is not typically updated at
2086 // full GCs and the value to use is unclear. Something like
2087 //
2088 // cur_promoted_avg + absorb_size / number_of_scavenges_since_last_full_gc.
2090 size_policy->set_bytes_absorbed_from_eden(absorb_size);
2091 return true;
2092 }
2094 GCTaskManager* const PSParallelCompact::gc_task_manager() {
2095 assert(ParallelScavengeHeap::gc_task_manager() != NULL,
2096 "shouldn't return NULL");
2097 return ParallelScavengeHeap::gc_task_manager();
2098 }
2100 void PSParallelCompact::marking_phase(ParCompactionManager* cm,
2101 bool maximum_heap_compaction) {
2102 // Recursively traverse all live objects and mark them
2103 EventMark m("1 mark object");
2104 TraceTime tm("marking phase", print_phases(), true, gclog_or_tty);
2106 ParallelScavengeHeap* heap = gc_heap();
2107 uint parallel_gc_threads = heap->gc_task_manager()->workers();
2108 TaskQueueSetSuper* qset = ParCompactionManager::region_array();
2109 ParallelTaskTerminator terminator(parallel_gc_threads, qset);
2111 PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
2112 PSParallelCompact::FollowStackClosure follow_stack_closure(cm);
2114 {
2115 TraceTime tm_m("par mark", print_phases(), true, gclog_or_tty);
2117 GCTaskQueue* q = GCTaskQueue::create();
2119 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::universe));
2120 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::jni_handles));
2121 // We scan the thread roots in parallel
2122 Threads::create_thread_roots_marking_tasks(q);
2123 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::object_synchronizer));
2124 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::flat_profiler));
2125 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::management));
2126 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::system_dictionary));
2127 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::jvmti));
2128 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::vm_symbols));
2130 if (parallel_gc_threads > 1) {
2131 for (uint j = 0; j < parallel_gc_threads; j++) {
2132 q->enqueue(new StealMarkingTask(&terminator));
2133 }
2134 }
2136 WaitForBarrierGCTask* fin = WaitForBarrierGCTask::create();
2137 q->enqueue(fin);
2139 gc_task_manager()->add_list(q);
2141 fin->wait_for();
2143 // We have to release the barrier tasks!
2144 WaitForBarrierGCTask::destroy(fin);
2145 }
2147 // Process reference objects found during marking
2148 {
2149 TraceTime tm_r("reference processing", print_phases(), true, gclog_or_tty);
2150 if (ref_processor()->processing_is_mt()) {
2151 RefProcTaskExecutor task_executor;
2152 ref_processor()->process_discovered_references(
2153 is_alive_closure(), &mark_and_push_closure, &follow_stack_closure,
2154 &task_executor);
2155 } else {
2156 ref_processor()->process_discovered_references(
2157 is_alive_closure(), &mark_and_push_closure, &follow_stack_closure, NULL);
2158 }
2159 }
2161 TraceTime tm_c("class unloading", print_phases(), true, gclog_or_tty);
2162 // Follow system dictionary roots and unload classes.
2163 bool purged_class = SystemDictionary::do_unloading(is_alive_closure());
2165 // Follow code cache roots.
2166 CodeCache::do_unloading(is_alive_closure(), &mark_and_push_closure,
2167 purged_class);
2168 follow_stack(cm); // Flush marking stack.
2170 // Update subklass/sibling/implementor links of live klasses
2171 // revisit_klass_stack is used in follow_weak_klass_links().
2172 follow_weak_klass_links(cm);
2174 // Visit symbol and interned string tables and delete unmarked oops
2175 SymbolTable::unlink(is_alive_closure());
2176 StringTable::unlink(is_alive_closure());
2178 assert(cm->marking_stack()->size() == 0, "stack should be empty by now");
2179 assert(cm->overflow_stack()->is_empty(), "stack should be empty by now");
2180 }
2182 // This should be moved to the shared markSweep code!
2183 class PSAlwaysTrueClosure: public BoolObjectClosure {
2184 public:
2185 void do_object(oop p) { ShouldNotReachHere(); }
2186 bool do_object_b(oop p) { return true; }
2187 };
2188 static PSAlwaysTrueClosure always_true;
2190 void PSParallelCompact::adjust_roots() {
2191 // Adjust the pointers to reflect the new locations
2192 EventMark m("3 adjust roots");
2193 TraceTime tm("adjust roots", print_phases(), true, gclog_or_tty);
2195 // General strong roots.
2196 Universe::oops_do(adjust_root_pointer_closure());
2197 ReferenceProcessor::oops_do(adjust_root_pointer_closure());
2198 JNIHandles::oops_do(adjust_root_pointer_closure()); // Global (strong) JNI handles
2199 Threads::oops_do(adjust_root_pointer_closure());
2200 ObjectSynchronizer::oops_do(adjust_root_pointer_closure());
2201 FlatProfiler::oops_do(adjust_root_pointer_closure());
2202 Management::oops_do(adjust_root_pointer_closure());
2203 JvmtiExport::oops_do(adjust_root_pointer_closure());
2204 // SO_AllClasses
2205 SystemDictionary::oops_do(adjust_root_pointer_closure());
2206 vmSymbols::oops_do(adjust_root_pointer_closure());
2208 // Now adjust pointers in remaining weak roots. (All of which should
2209 // have been cleared if they pointed to non-surviving objects.)
2210 // Global (weak) JNI handles
2211 JNIHandles::weak_oops_do(&always_true, adjust_root_pointer_closure());
2213 CodeCache::oops_do(adjust_pointer_closure());
2214 SymbolTable::oops_do(adjust_root_pointer_closure());
2215 StringTable::oops_do(adjust_root_pointer_closure());
2216 ref_processor()->weak_oops_do(adjust_root_pointer_closure());
2217 // Roots were visited so references into the young gen in roots
2218 // may have been scanned. Process them also.
2219 // Should the reference processor have a span that excludes
2220 // young gen objects?
2221 PSScavenge::reference_processor()->weak_oops_do(
2222 adjust_root_pointer_closure());
2223 }
2225 void PSParallelCompact::compact_perm(ParCompactionManager* cm) {
2226 EventMark m("4 compact perm");
2227 TraceTime tm("compact perm gen", print_phases(), true, gclog_or_tty);
2228 // trace("4");
2230 gc_heap()->perm_gen()->start_array()->reset();
2231 move_and_update(cm, perm_space_id);
2232 }
2234 void PSParallelCompact::enqueue_region_draining_tasks(GCTaskQueue* q,
2235 uint parallel_gc_threads)
2236 {
2237 TraceTime tm("drain task setup", print_phases(), true, gclog_or_tty);
2239 const unsigned int task_count = MAX2(parallel_gc_threads, 1U);
2240 for (unsigned int j = 0; j < task_count; j++) {
2241 q->enqueue(new DrainStacksCompactionTask());
2242 }
2244 // Find all regions that are available (can be filled immediately) and
2245 // distribute them to the thread stacks. The iteration is done in reverse
2246 // order (high to low) so the regions will be removed in ascending order.
2248 const ParallelCompactData& sd = PSParallelCompact::summary_data();
2250 size_t fillable_regions = 0; // A count for diagnostic purposes.
2251 unsigned int which = 0; // The worker thread number.
2253 for (unsigned int id = to_space_id; id > perm_space_id; --id) {
2254 SpaceInfo* const space_info = _space_info + id;
2255 MutableSpace* const space = space_info->space();
2256 HeapWord* const new_top = space_info->new_top();
2258 const size_t beg_region = sd.addr_to_region_idx(space_info->dense_prefix());
2259 const size_t end_region =
2260 sd.addr_to_region_idx(sd.region_align_up(new_top));
2261 assert(end_region > 0, "perm gen cannot be empty");
2263 for (size_t cur = end_region - 1; cur >= beg_region; --cur) {
2264 if (sd.region(cur)->claim_unsafe()) {
2265 ParCompactionManager* cm = ParCompactionManager::manager_array(which);
2266 cm->save_for_processing(cur);
2268 if (TraceParallelOldGCCompactionPhase && Verbose) {
2269 const size_t count_mod_8 = fillable_regions & 7;
2270 if (count_mod_8 == 0) gclog_or_tty->print("fillable: ");
2271 gclog_or_tty->print(" " SIZE_FORMAT_W(7), cur);
2272 if (count_mod_8 == 7) gclog_or_tty->cr();
2273 }
2275 NOT_PRODUCT(++fillable_regions;)
2277 // Assign regions to threads in round-robin fashion.
2278 if (++which == task_count) {
2279 which = 0;
2280 }
2281 }
2282 }
2283 }
2285 if (TraceParallelOldGCCompactionPhase) {
2286 if (Verbose && (fillable_regions & 7) != 0) gclog_or_tty->cr();
2287 gclog_or_tty->print_cr("%u initially fillable regions", fillable_regions);
2288 }
2289 }
2291 #define PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING 4
2293 void PSParallelCompact::enqueue_dense_prefix_tasks(GCTaskQueue* q,
2294 uint parallel_gc_threads) {
2295 TraceTime tm("dense prefix task setup", print_phases(), true, gclog_or_tty);
2297 ParallelCompactData& sd = PSParallelCompact::summary_data();
2299 // Iterate over all the spaces adding tasks for updating
2300 // regions in the dense prefix. Assume that 1 gc thread
2301 // will work on opening the gaps and the remaining gc threads
2302 // will work on the dense prefix.
2303 unsigned int space_id;
2304 for (space_id = old_space_id; space_id < last_space_id; ++ space_id) {
2305 HeapWord* const dense_prefix_end = _space_info[space_id].dense_prefix();
2306 const MutableSpace* const space = _space_info[space_id].space();
2308 if (dense_prefix_end == space->bottom()) {
2309 // There is no dense prefix for this space.
2310 continue;
2311 }
2313 // The dense prefix is before this region.
2314 size_t region_index_end_dense_prefix =
2315 sd.addr_to_region_idx(dense_prefix_end);
2316 RegionData* const dense_prefix_cp =
2317 sd.region(region_index_end_dense_prefix);
2318 assert(dense_prefix_end == space->end() ||
2319 dense_prefix_cp->available() ||
2320 dense_prefix_cp->claimed(),
2321 "The region after the dense prefix should always be ready to fill");
2323 size_t region_index_start = sd.addr_to_region_idx(space->bottom());
2325 // Is there dense prefix work?
2326 size_t total_dense_prefix_regions =
2327 region_index_end_dense_prefix - region_index_start;
2328 // How many regions of the dense prefix should be given to
2329 // each thread?
2330 if (total_dense_prefix_regions > 0) {
2331 uint tasks_for_dense_prefix = 1;
2332 if (UseParallelDensePrefixUpdate) {
2333 if (total_dense_prefix_regions <=
2334 (parallel_gc_threads * PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING)) {
2335 // Don't over partition. This assumes that
2336 // PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING is a small integer value
2337 // so there are not many regions to process.
2338 tasks_for_dense_prefix = parallel_gc_threads;
2339 } else {
2340 // Over partition
2341 tasks_for_dense_prefix = parallel_gc_threads *
2342 PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING;
2343 }
2344 }
2345 size_t regions_per_thread = total_dense_prefix_regions /
2346 tasks_for_dense_prefix;
2347 // Give each thread at least 1 region.
2348 if (regions_per_thread == 0) {
2349 regions_per_thread = 1;
2350 }
2352 for (uint k = 0; k < tasks_for_dense_prefix; k++) {
2353 if (region_index_start >= region_index_end_dense_prefix) {
2354 break;
2355 }
2356 // region_index_end is not processed
2357 size_t region_index_end = MIN2(region_index_start + regions_per_thread,
2358 region_index_end_dense_prefix);
2359 q->enqueue(new UpdateDensePrefixTask(SpaceId(space_id),
2360 region_index_start,
2361 region_index_end));
2362 region_index_start = region_index_end;
2363 }
2364 }
2365 // This gets any part of the dense prefix that did not
2366 // fit evenly.
2367 if (region_index_start < region_index_end_dense_prefix) {
2368 q->enqueue(new UpdateDensePrefixTask(SpaceId(space_id),
2369 region_index_start,
2370 region_index_end_dense_prefix));
2371 }
2372 }
2373 }
2375 void PSParallelCompact::enqueue_region_stealing_tasks(
2376 GCTaskQueue* q,
2377 ParallelTaskTerminator* terminator_ptr,
2378 uint parallel_gc_threads) {
2379 TraceTime tm("steal task setup", print_phases(), true, gclog_or_tty);
2381 // Once a thread has drained it's stack, it should try to steal regions from
2382 // other threads.
2383 if (parallel_gc_threads > 1) {
2384 for (uint j = 0; j < parallel_gc_threads; j++) {
2385 q->enqueue(new StealRegionCompactionTask(terminator_ptr));
2386 }
2387 }
2388 }
2390 void PSParallelCompact::compact() {
2391 EventMark m("5 compact");
2392 // trace("5");
2393 TraceTime tm("compaction phase", print_phases(), true, gclog_or_tty);
2395 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
2396 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
2397 PSOldGen* old_gen = heap->old_gen();
2398 old_gen->start_array()->reset();
2399 uint parallel_gc_threads = heap->gc_task_manager()->workers();
2400 TaskQueueSetSuper* qset = ParCompactionManager::region_array();
2401 ParallelTaskTerminator terminator(parallel_gc_threads, qset);
2403 GCTaskQueue* q = GCTaskQueue::create();
2404 enqueue_region_draining_tasks(q, parallel_gc_threads);
2405 enqueue_dense_prefix_tasks(q, parallel_gc_threads);
2406 enqueue_region_stealing_tasks(q, &terminator, parallel_gc_threads);
2408 {
2409 TraceTime tm_pc("par compact", print_phases(), true, gclog_or_tty);
2411 WaitForBarrierGCTask* fin = WaitForBarrierGCTask::create();
2412 q->enqueue(fin);
2414 gc_task_manager()->add_list(q);
2416 fin->wait_for();
2418 // We have to release the barrier tasks!
2419 WaitForBarrierGCTask::destroy(fin);
2421 #ifdef ASSERT
2422 // Verify that all regions have been processed before the deferred updates.
2423 // Note that perm_space_id is skipped; this type of verification is not
2424 // valid until the perm gen is compacted by regions.
2425 for (unsigned int id = old_space_id; id < last_space_id; ++id) {
2426 verify_complete(SpaceId(id));
2427 }
2428 #endif
2429 }
2431 {
2432 // Update the deferred objects, if any. Any compaction manager can be used.
2433 TraceTime tm_du("deferred updates", print_phases(), true, gclog_or_tty);
2434 ParCompactionManager* cm = ParCompactionManager::manager_array(0);
2435 for (unsigned int id = old_space_id; id < last_space_id; ++id) {
2436 update_deferred_objects(cm, SpaceId(id));
2437 }
2438 }
2439 }
2441 #ifdef ASSERT
2442 void PSParallelCompact::verify_complete(SpaceId space_id) {
2443 // All Regions between space bottom() to new_top() should be marked as filled
2444 // and all Regions between new_top() and top() should be available (i.e.,
2445 // should have been emptied).
2446 ParallelCompactData& sd = summary_data();
2447 SpaceInfo si = _space_info[space_id];
2448 HeapWord* new_top_addr = sd.region_align_up(si.new_top());
2449 HeapWord* old_top_addr = sd.region_align_up(si.space()->top());
2450 const size_t beg_region = sd.addr_to_region_idx(si.space()->bottom());
2451 const size_t new_top_region = sd.addr_to_region_idx(new_top_addr);
2452 const size_t old_top_region = sd.addr_to_region_idx(old_top_addr);
2454 bool issued_a_warning = false;
2456 size_t cur_region;
2457 for (cur_region = beg_region; cur_region < new_top_region; ++cur_region) {
2458 const RegionData* const c = sd.region(cur_region);
2459 if (!c->completed()) {
2460 warning("region " SIZE_FORMAT " not filled: "
2461 "destination_count=" SIZE_FORMAT,
2462 cur_region, c->destination_count());
2463 issued_a_warning = true;
2464 }
2465 }
2467 for (cur_region = new_top_region; cur_region < old_top_region; ++cur_region) {
2468 const RegionData* const c = sd.region(cur_region);
2469 if (!c->available()) {
2470 warning("region " SIZE_FORMAT " not empty: "
2471 "destination_count=" SIZE_FORMAT,
2472 cur_region, c->destination_count());
2473 issued_a_warning = true;
2474 }
2475 }
2477 if (issued_a_warning) {
2478 print_region_ranges();
2479 }
2480 }
2481 #endif // #ifdef ASSERT
2483 void PSParallelCompact::compact_serial(ParCompactionManager* cm) {
2484 EventMark m("5 compact serial");
2485 TraceTime tm("compact serial", print_phases(), true, gclog_or_tty);
2487 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
2488 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
2490 PSYoungGen* young_gen = heap->young_gen();
2491 PSOldGen* old_gen = heap->old_gen();
2493 old_gen->start_array()->reset();
2494 old_gen->move_and_update(cm);
2495 young_gen->move_and_update(cm);
2496 }
2499 void PSParallelCompact::follow_stack(ParCompactionManager* cm) {
2500 while(!cm->overflow_stack()->is_empty()) {
2501 oop obj = cm->overflow_stack()->pop();
2502 obj->follow_contents(cm);
2503 }
2505 oop obj;
2506 // obj is a reference!!!
2507 while (cm->marking_stack()->pop_local(obj)) {
2508 // It would be nice to assert about the type of objects we might
2509 // pop, but they can come from anywhere, unfortunately.
2510 obj->follow_contents(cm);
2511 }
2512 }
2514 void
2515 PSParallelCompact::follow_weak_klass_links(ParCompactionManager* serial_cm) {
2516 // All klasses on the revisit stack are marked at this point.
2517 // Update and follow all subklass, sibling and implementor links.
2518 for (uint i = 0; i < ParallelGCThreads+1; i++) {
2519 ParCompactionManager* cm = ParCompactionManager::manager_array(i);
2520 KeepAliveClosure keep_alive_closure(cm);
2521 for (int i = 0; i < cm->revisit_klass_stack()->length(); i++) {
2522 cm->revisit_klass_stack()->at(i)->follow_weak_klass_links(
2523 is_alive_closure(),
2524 &keep_alive_closure);
2525 }
2526 follow_stack(cm);
2527 }
2528 }
2530 void
2531 PSParallelCompact::revisit_weak_klass_link(ParCompactionManager* cm, Klass* k) {
2532 cm->revisit_klass_stack()->push(k);
2533 }
2535 #ifdef VALIDATE_MARK_SWEEP
2537 void PSParallelCompact::track_adjusted_pointer(void* p, bool isroot) {
2538 if (!ValidateMarkSweep)
2539 return;
2541 if (!isroot) {
2542 if (_pointer_tracking) {
2543 guarantee(_adjusted_pointers->contains(p), "should have seen this pointer");
2544 _adjusted_pointers->remove(p);
2545 }
2546 } else {
2547 ptrdiff_t index = _root_refs_stack->find(p);
2548 if (index != -1) {
2549 int l = _root_refs_stack->length();
2550 if (l > 0 && l - 1 != index) {
2551 void* last = _root_refs_stack->pop();
2552 assert(last != p, "should be different");
2553 _root_refs_stack->at_put(index, last);
2554 } else {
2555 _root_refs_stack->remove(p);
2556 }
2557 }
2558 }
2559 }
2562 void PSParallelCompact::check_adjust_pointer(void* p) {
2563 _adjusted_pointers->push(p);
2564 }
2567 class AdjusterTracker: public OopClosure {
2568 public:
2569 AdjusterTracker() {};
2570 void do_oop(oop* o) { PSParallelCompact::check_adjust_pointer(o); }
2571 void do_oop(narrowOop* o) { PSParallelCompact::check_adjust_pointer(o); }
2572 };
2575 void PSParallelCompact::track_interior_pointers(oop obj) {
2576 if (ValidateMarkSweep) {
2577 _adjusted_pointers->clear();
2578 _pointer_tracking = true;
2580 AdjusterTracker checker;
2581 obj->oop_iterate(&checker);
2582 }
2583 }
2586 void PSParallelCompact::check_interior_pointers() {
2587 if (ValidateMarkSweep) {
2588 _pointer_tracking = false;
2589 guarantee(_adjusted_pointers->length() == 0, "should have processed the same pointers");
2590 }
2591 }
2594 void PSParallelCompact::reset_live_oop_tracking(bool at_perm) {
2595 if (ValidateMarkSweep) {
2596 guarantee((size_t)_live_oops->length() == _live_oops_index, "should be at end of live oops");
2597 _live_oops_index = at_perm ? _live_oops_index_at_perm : 0;
2598 }
2599 }
2602 void PSParallelCompact::register_live_oop(oop p, size_t size) {
2603 if (ValidateMarkSweep) {
2604 _live_oops->push(p);
2605 _live_oops_size->push(size);
2606 _live_oops_index++;
2607 }
2608 }
2610 void PSParallelCompact::validate_live_oop(oop p, size_t size) {
2611 if (ValidateMarkSweep) {
2612 oop obj = _live_oops->at((int)_live_oops_index);
2613 guarantee(obj == p, "should be the same object");
2614 guarantee(_live_oops_size->at((int)_live_oops_index) == size, "should be the same size");
2615 _live_oops_index++;
2616 }
2617 }
2619 void PSParallelCompact::live_oop_moved_to(HeapWord* q, size_t size,
2620 HeapWord* compaction_top) {
2621 assert(oop(q)->forwardee() == NULL || oop(q)->forwardee() == oop(compaction_top),
2622 "should be moved to forwarded location");
2623 if (ValidateMarkSweep) {
2624 PSParallelCompact::validate_live_oop(oop(q), size);
2625 _live_oops_moved_to->push(oop(compaction_top));
2626 }
2627 if (RecordMarkSweepCompaction) {
2628 _cur_gc_live_oops->push(q);
2629 _cur_gc_live_oops_moved_to->push(compaction_top);
2630 _cur_gc_live_oops_size->push(size);
2631 }
2632 }
2635 void PSParallelCompact::compaction_complete() {
2636 if (RecordMarkSweepCompaction) {
2637 GrowableArray<HeapWord*>* _tmp_live_oops = _cur_gc_live_oops;
2638 GrowableArray<HeapWord*>* _tmp_live_oops_moved_to = _cur_gc_live_oops_moved_to;
2639 GrowableArray<size_t> * _tmp_live_oops_size = _cur_gc_live_oops_size;
2641 _cur_gc_live_oops = _last_gc_live_oops;
2642 _cur_gc_live_oops_moved_to = _last_gc_live_oops_moved_to;
2643 _cur_gc_live_oops_size = _last_gc_live_oops_size;
2644 _last_gc_live_oops = _tmp_live_oops;
2645 _last_gc_live_oops_moved_to = _tmp_live_oops_moved_to;
2646 _last_gc_live_oops_size = _tmp_live_oops_size;
2647 }
2648 }
2651 void PSParallelCompact::print_new_location_of_heap_address(HeapWord* q) {
2652 if (!RecordMarkSweepCompaction) {
2653 tty->print_cr("Requires RecordMarkSweepCompaction to be enabled");
2654 return;
2655 }
2657 if (_last_gc_live_oops == NULL) {
2658 tty->print_cr("No compaction information gathered yet");
2659 return;
2660 }
2662 for (int i = 0; i < _last_gc_live_oops->length(); i++) {
2663 HeapWord* old_oop = _last_gc_live_oops->at(i);
2664 size_t sz = _last_gc_live_oops_size->at(i);
2665 if (old_oop <= q && q < (old_oop + sz)) {
2666 HeapWord* new_oop = _last_gc_live_oops_moved_to->at(i);
2667 size_t offset = (q - old_oop);
2668 tty->print_cr("Address " PTR_FORMAT, q);
2669 tty->print_cr(" Was in oop " PTR_FORMAT ", size %d, at offset %d", old_oop, sz, offset);
2670 tty->print_cr(" Now in oop " PTR_FORMAT ", actual address " PTR_FORMAT, new_oop, new_oop + offset);
2671 return;
2672 }
2673 }
2675 tty->print_cr("Address " PTR_FORMAT " not found in live oop information from last GC", q);
2676 }
2677 #endif //VALIDATE_MARK_SWEEP
2679 // Update interior oops in the ranges of regions [beg_region, end_region).
2680 void
2681 PSParallelCompact::update_and_deadwood_in_dense_prefix(ParCompactionManager* cm,
2682 SpaceId space_id,
2683 size_t beg_region,
2684 size_t end_region) {
2685 ParallelCompactData& sd = summary_data();
2686 ParMarkBitMap* const mbm = mark_bitmap();
2688 HeapWord* beg_addr = sd.region_to_addr(beg_region);
2689 HeapWord* const end_addr = sd.region_to_addr(end_region);
2690 assert(beg_region <= end_region, "bad region range");
2691 assert(end_addr <= dense_prefix(space_id), "not in the dense prefix");
2693 #ifdef ASSERT
2694 // Claim the regions to avoid triggering an assert when they are marked as
2695 // filled.
2696 for (size_t claim_region = beg_region; claim_region < end_region; ++claim_region) {
2697 assert(sd.region(claim_region)->claim_unsafe(), "claim() failed");
2698 }
2699 #endif // #ifdef ASSERT
2701 if (beg_addr != space(space_id)->bottom()) {
2702 // Find the first live object or block of dead space that *starts* in this
2703 // range of regions. If a partial object crosses onto the region, skip it;
2704 // it will be marked for 'deferred update' when the object head is
2705 // processed. If dead space crosses onto the region, it is also skipped; it
2706 // will be filled when the prior region is processed. If neither of those
2707 // apply, the first word in the region is the start of a live object or dead
2708 // space.
2709 assert(beg_addr > space(space_id)->bottom(), "sanity");
2710 const RegionData* const cp = sd.region(beg_region);
2711 if (cp->partial_obj_size() != 0) {
2712 beg_addr = sd.partial_obj_end(beg_region);
2713 } else if (dead_space_crosses_boundary(cp, mbm->addr_to_bit(beg_addr))) {
2714 beg_addr = mbm->find_obj_beg(beg_addr, end_addr);
2715 }
2716 }
2718 if (beg_addr < end_addr) {
2719 // A live object or block of dead space starts in this range of Regions.
2720 HeapWord* const dense_prefix_end = dense_prefix(space_id);
2722 // Create closures and iterate.
2723 UpdateOnlyClosure update_closure(mbm, cm, space_id);
2724 FillClosure fill_closure(cm, space_id);
2725 ParMarkBitMap::IterationStatus status;
2726 status = mbm->iterate(&update_closure, &fill_closure, beg_addr, end_addr,
2727 dense_prefix_end);
2728 if (status == ParMarkBitMap::incomplete) {
2729 update_closure.do_addr(update_closure.source());
2730 }
2731 }
2733 // Mark the regions as filled.
2734 RegionData* const beg_cp = sd.region(beg_region);
2735 RegionData* const end_cp = sd.region(end_region);
2736 for (RegionData* cp = beg_cp; cp < end_cp; ++cp) {
2737 cp->set_completed();
2738 }
2739 }
2741 // Return the SpaceId for the space containing addr. If addr is not in the
2742 // heap, last_space_id is returned. In debug mode it expects the address to be
2743 // in the heap and asserts such.
2744 PSParallelCompact::SpaceId PSParallelCompact::space_id(HeapWord* addr) {
2745 assert(Universe::heap()->is_in_reserved(addr), "addr not in the heap");
2747 for (unsigned int id = perm_space_id; id < last_space_id; ++id) {
2748 if (_space_info[id].space()->contains(addr)) {
2749 return SpaceId(id);
2750 }
2751 }
2753 assert(false, "no space contains the addr");
2754 return last_space_id;
2755 }
2757 void PSParallelCompact::update_deferred_objects(ParCompactionManager* cm,
2758 SpaceId id) {
2759 assert(id < last_space_id, "bad space id");
2761 ParallelCompactData& sd = summary_data();
2762 const SpaceInfo* const space_info = _space_info + id;
2763 ObjectStartArray* const start_array = space_info->start_array();
2765 const MutableSpace* const space = space_info->space();
2766 assert(space_info->dense_prefix() >= space->bottom(), "dense_prefix not set");
2767 HeapWord* const beg_addr = space_info->dense_prefix();
2768 HeapWord* const end_addr = sd.region_align_up(space_info->new_top());
2770 const RegionData* const beg_region = sd.addr_to_region_ptr(beg_addr);
2771 const RegionData* const end_region = sd.addr_to_region_ptr(end_addr);
2772 const RegionData* cur_region;
2773 for (cur_region = beg_region; cur_region < end_region; ++cur_region) {
2774 HeapWord* const addr = cur_region->deferred_obj_addr();
2775 if (addr != NULL) {
2776 if (start_array != NULL) {
2777 start_array->allocate_block(addr);
2778 }
2779 oop(addr)->update_contents(cm);
2780 assert(oop(addr)->is_oop_or_null(), "should be an oop now");
2781 }
2782 }
2783 }
2785 // Skip over count live words starting from beg, and return the address of the
2786 // next live word. Unless marked, the word corresponding to beg is assumed to
2787 // be dead. Callers must either ensure beg does not correspond to the middle of
2788 // an object, or account for those live words in some other way. Callers must
2789 // also ensure that there are enough live words in the range [beg, end) to skip.
2790 HeapWord*
2791 PSParallelCompact::skip_live_words(HeapWord* beg, HeapWord* end, size_t count)
2792 {
2793 assert(count > 0, "sanity");
2795 ParMarkBitMap* m = mark_bitmap();
2796 idx_t bits_to_skip = m->words_to_bits(count);
2797 idx_t cur_beg = m->addr_to_bit(beg);
2798 const idx_t search_end = BitMap::word_align_up(m->addr_to_bit(end));
2800 do {
2801 cur_beg = m->find_obj_beg(cur_beg, search_end);
2802 idx_t cur_end = m->find_obj_end(cur_beg, search_end);
2803 const size_t obj_bits = cur_end - cur_beg + 1;
2804 if (obj_bits > bits_to_skip) {
2805 return m->bit_to_addr(cur_beg + bits_to_skip);
2806 }
2807 bits_to_skip -= obj_bits;
2808 cur_beg = cur_end + 1;
2809 } while (bits_to_skip > 0);
2811 // Skipping the desired number of words landed just past the end of an object.
2812 // Find the start of the next object.
2813 cur_beg = m->find_obj_beg(cur_beg, search_end);
2814 assert(cur_beg < m->addr_to_bit(end), "not enough live words to skip");
2815 return m->bit_to_addr(cur_beg);
2816 }
2818 HeapWord* PSParallelCompact::first_src_addr(HeapWord* const dest_addr,
2819 SpaceId src_space_id,
2820 size_t src_region_idx)
2821 {
2822 assert(summary_data().is_region_aligned(dest_addr), "not aligned");
2824 const SplitInfo& split_info = _space_info[src_space_id].split_info();
2825 if (split_info.dest_region_addr() == dest_addr) {
2826 // The partial object ending at the split point contains the first word to
2827 // be copied to dest_addr.
2828 return split_info.first_src_addr();
2829 }
2831 const ParallelCompactData& sd = summary_data();
2832 ParMarkBitMap* const bitmap = mark_bitmap();
2833 const size_t RegionSize = ParallelCompactData::RegionSize;
2835 assert(sd.is_region_aligned(dest_addr), "not aligned");
2836 const RegionData* const src_region_ptr = sd.region(src_region_idx);
2837 const size_t partial_obj_size = src_region_ptr->partial_obj_size();
2838 HeapWord* const src_region_destination = src_region_ptr->destination();
2840 assert(dest_addr >= src_region_destination, "wrong src region");
2841 assert(src_region_ptr->data_size() > 0, "src region cannot be empty");
2843 HeapWord* const src_region_beg = sd.region_to_addr(src_region_idx);
2844 HeapWord* const src_region_end = src_region_beg + RegionSize;
2846 HeapWord* addr = src_region_beg;
2847 if (dest_addr == src_region_destination) {
2848 // Return the first live word in the source region.
2849 if (partial_obj_size == 0) {
2850 addr = bitmap->find_obj_beg(addr, src_region_end);
2851 assert(addr < src_region_end, "no objects start in src region");
2852 }
2853 return addr;
2854 }
2856 // Must skip some live data.
2857 size_t words_to_skip = dest_addr - src_region_destination;
2858 assert(src_region_ptr->data_size() > words_to_skip, "wrong src region");
2860 if (partial_obj_size >= words_to_skip) {
2861 // All the live words to skip are part of the partial object.
2862 addr += words_to_skip;
2863 if (partial_obj_size == words_to_skip) {
2864 // Find the first live word past the partial object.
2865 addr = bitmap->find_obj_beg(addr, src_region_end);
2866 assert(addr < src_region_end, "wrong src region");
2867 }
2868 return addr;
2869 }
2871 // Skip over the partial object (if any).
2872 if (partial_obj_size != 0) {
2873 words_to_skip -= partial_obj_size;
2874 addr += partial_obj_size;
2875 }
2877 // Skip over live words due to objects that start in the region.
2878 addr = skip_live_words(addr, src_region_end, words_to_skip);
2879 assert(addr < src_region_end, "wrong src region");
2880 return addr;
2881 }
2883 void PSParallelCompact::decrement_destination_counts(ParCompactionManager* cm,
2884 size_t beg_region,
2885 HeapWord* end_addr)
2886 {
2887 ParallelCompactData& sd = summary_data();
2888 RegionData* const beg = sd.region(beg_region);
2889 HeapWord* const end_addr_aligned_up = sd.region_align_up(end_addr);
2890 RegionData* const end = sd.addr_to_region_ptr(end_addr_aligned_up);
2891 size_t cur_idx = beg_region;
2892 for (RegionData* cur = beg; cur < end; ++cur, ++cur_idx) {
2893 assert(cur->data_size() > 0, "region must have live data");
2894 cur->decrement_destination_count();
2895 if (cur_idx <= cur->source_region() && cur->available() && cur->claim()) {
2896 cm->save_for_processing(cur_idx);
2897 }
2898 }
2899 }
2901 size_t PSParallelCompact::next_src_region(MoveAndUpdateClosure& closure,
2902 SpaceId& src_space_id,
2903 HeapWord*& src_space_top,
2904 HeapWord* end_addr)
2905 {
2906 typedef ParallelCompactData::RegionData RegionData;
2908 ParallelCompactData& sd = PSParallelCompact::summary_data();
2909 const size_t region_size = ParallelCompactData::RegionSize;
2911 size_t src_region_idx = 0;
2913 // Skip empty regions (if any) up to the top of the space.
2914 HeapWord* const src_aligned_up = sd.region_align_up(end_addr);
2915 RegionData* src_region_ptr = sd.addr_to_region_ptr(src_aligned_up);
2916 HeapWord* const top_aligned_up = sd.region_align_up(src_space_top);
2917 const RegionData* const top_region_ptr =
2918 sd.addr_to_region_ptr(top_aligned_up);
2919 while (src_region_ptr < top_region_ptr && src_region_ptr->data_size() == 0) {
2920 ++src_region_ptr;
2921 }
2923 if (src_region_ptr < top_region_ptr) {
2924 // The next source region is in the current space. Update src_region_idx
2925 // and the source address to match src_region_ptr.
2926 src_region_idx = sd.region(src_region_ptr);
2927 HeapWord* const src_region_addr = sd.region_to_addr(src_region_idx);
2928 if (src_region_addr > closure.source()) {
2929 closure.set_source(src_region_addr);
2930 }
2931 return src_region_idx;
2932 }
2934 // Switch to a new source space and find the first non-empty region.
2935 unsigned int space_id = src_space_id + 1;
2936 assert(space_id < last_space_id, "not enough spaces");
2938 HeapWord* const destination = closure.destination();
2940 do {
2941 MutableSpace* space = _space_info[space_id].space();
2942 HeapWord* const bottom = space->bottom();
2943 const RegionData* const bottom_cp = sd.addr_to_region_ptr(bottom);
2945 // Iterate over the spaces that do not compact into themselves.
2946 if (bottom_cp->destination() != bottom) {
2947 HeapWord* const top_aligned_up = sd.region_align_up(space->top());
2948 const RegionData* const top_cp = sd.addr_to_region_ptr(top_aligned_up);
2950 for (const RegionData* src_cp = bottom_cp; src_cp < top_cp; ++src_cp) {
2951 if (src_cp->live_obj_size() > 0) {
2952 // Found it.
2953 assert(src_cp->destination() == destination,
2954 "first live obj in the space must match the destination");
2955 assert(src_cp->partial_obj_size() == 0,
2956 "a space cannot begin with a partial obj");
2958 src_space_id = SpaceId(space_id);
2959 src_space_top = space->top();
2960 const size_t src_region_idx = sd.region(src_cp);
2961 closure.set_source(sd.region_to_addr(src_region_idx));
2962 return src_region_idx;
2963 } else {
2964 assert(src_cp->data_size() == 0, "sanity");
2965 }
2966 }
2967 }
2968 } while (++space_id < last_space_id);
2970 assert(false, "no source region was found");
2971 return 0;
2972 }
2974 void PSParallelCompact::fill_region(ParCompactionManager* cm, size_t region_idx)
2975 {
2976 typedef ParMarkBitMap::IterationStatus IterationStatus;
2977 const size_t RegionSize = ParallelCompactData::RegionSize;
2978 ParMarkBitMap* const bitmap = mark_bitmap();
2979 ParallelCompactData& sd = summary_data();
2980 RegionData* const region_ptr = sd.region(region_idx);
2982 // Get the items needed to construct the closure.
2983 HeapWord* dest_addr = sd.region_to_addr(region_idx);
2984 SpaceId dest_space_id = space_id(dest_addr);
2985 ObjectStartArray* start_array = _space_info[dest_space_id].start_array();
2986 HeapWord* new_top = _space_info[dest_space_id].new_top();
2987 assert(dest_addr < new_top, "sanity");
2988 const size_t words = MIN2(pointer_delta(new_top, dest_addr), RegionSize);
2990 // Get the source region and related info.
2991 size_t src_region_idx = region_ptr->source_region();
2992 SpaceId src_space_id = space_id(sd.region_to_addr(src_region_idx));
2993 HeapWord* src_space_top = _space_info[src_space_id].space()->top();
2995 MoveAndUpdateClosure closure(bitmap, cm, start_array, dest_addr, words);
2996 closure.set_source(first_src_addr(dest_addr, src_space_id, src_region_idx));
2998 // Adjust src_region_idx to prepare for decrementing destination counts (the
2999 // destination count is not decremented when a region is copied to itself).
3000 if (src_region_idx == region_idx) {
3001 src_region_idx += 1;
3002 }
3004 if (bitmap->is_unmarked(closure.source())) {
3005 // The first source word is in the middle of an object; copy the remainder
3006 // of the object or as much as will fit. The fact that pointer updates were
3007 // deferred will be noted when the object header is processed.
3008 HeapWord* const old_src_addr = closure.source();
3009 closure.copy_partial_obj();
3010 if (closure.is_full()) {
3011 decrement_destination_counts(cm, src_region_idx, closure.source());
3012 region_ptr->set_deferred_obj_addr(NULL);
3013 region_ptr->set_completed();
3014 return;
3015 }
3017 HeapWord* const end_addr = sd.region_align_down(closure.source());
3018 if (sd.region_align_down(old_src_addr) != end_addr) {
3019 // The partial object was copied from more than one source region.
3020 decrement_destination_counts(cm, src_region_idx, end_addr);
3022 // Move to the next source region, possibly switching spaces as well. All
3023 // args except end_addr may be modified.
3024 src_region_idx = next_src_region(closure, src_space_id, src_space_top,
3025 end_addr);
3026 }
3027 }
3029 do {
3030 HeapWord* const cur_addr = closure.source();
3031 HeapWord* const end_addr = MIN2(sd.region_align_up(cur_addr + 1),
3032 src_space_top);
3033 IterationStatus status = bitmap->iterate(&closure, cur_addr, end_addr);
3035 if (status == ParMarkBitMap::incomplete) {
3036 // The last obj that starts in the source region does not end in the
3037 // region.
3038 assert(closure.source() < end_addr, "sanity")
3039 HeapWord* const obj_beg = closure.source();
3040 HeapWord* const range_end = MIN2(obj_beg + closure.words_remaining(),
3041 src_space_top);
3042 HeapWord* const obj_end = bitmap->find_obj_end(obj_beg, range_end);
3043 if (obj_end < range_end) {
3044 // The end was found; the entire object will fit.
3045 status = closure.do_addr(obj_beg, bitmap->obj_size(obj_beg, obj_end));
3046 assert(status != ParMarkBitMap::would_overflow, "sanity");
3047 } else {
3048 // The end was not found; the object will not fit.
3049 assert(range_end < src_space_top, "obj cannot cross space boundary");
3050 status = ParMarkBitMap::would_overflow;
3051 }
3052 }
3054 if (status == ParMarkBitMap::would_overflow) {
3055 // The last object did not fit. Note that interior oop updates were
3056 // deferred, then copy enough of the object to fill the region.
3057 region_ptr->set_deferred_obj_addr(closure.destination());
3058 status = closure.copy_until_full(); // copies from closure.source()
3060 decrement_destination_counts(cm, src_region_idx, closure.source());
3061 region_ptr->set_completed();
3062 return;
3063 }
3065 if (status == ParMarkBitMap::full) {
3066 decrement_destination_counts(cm, src_region_idx, closure.source());
3067 region_ptr->set_deferred_obj_addr(NULL);
3068 region_ptr->set_completed();
3069 return;
3070 }
3072 decrement_destination_counts(cm, src_region_idx, end_addr);
3074 // Move to the next source region, possibly switching spaces as well. All
3075 // args except end_addr may be modified.
3076 src_region_idx = next_src_region(closure, src_space_id, src_space_top,
3077 end_addr);
3078 } while (true);
3079 }
3081 void
3082 PSParallelCompact::move_and_update(ParCompactionManager* cm, SpaceId space_id) {
3083 const MutableSpace* sp = space(space_id);
3084 if (sp->is_empty()) {
3085 return;
3086 }
3088 ParallelCompactData& sd = PSParallelCompact::summary_data();
3089 ParMarkBitMap* const bitmap = mark_bitmap();
3090 HeapWord* const dp_addr = dense_prefix(space_id);
3091 HeapWord* beg_addr = sp->bottom();
3092 HeapWord* end_addr = sp->top();
3094 #ifdef ASSERT
3095 assert(beg_addr <= dp_addr && dp_addr <= end_addr, "bad dense prefix");
3096 if (cm->should_verify_only()) {
3097 VerifyUpdateClosure verify_update(cm, sp);
3098 bitmap->iterate(&verify_update, beg_addr, end_addr);
3099 return;
3100 }
3102 if (cm->should_reset_only()) {
3103 ResetObjectsClosure reset_objects(cm);
3104 bitmap->iterate(&reset_objects, beg_addr, end_addr);
3105 return;
3106 }
3107 #endif
3109 const size_t beg_region = sd.addr_to_region_idx(beg_addr);
3110 const size_t dp_region = sd.addr_to_region_idx(dp_addr);
3111 if (beg_region < dp_region) {
3112 update_and_deadwood_in_dense_prefix(cm, space_id, beg_region, dp_region);
3113 }
3115 // The destination of the first live object that starts in the region is one
3116 // past the end of the partial object entering the region (if any).
3117 HeapWord* const dest_addr = sd.partial_obj_end(dp_region);
3118 HeapWord* const new_top = _space_info[space_id].new_top();
3119 assert(new_top >= dest_addr, "bad new_top value");
3120 const size_t words = pointer_delta(new_top, dest_addr);
3122 if (words > 0) {
3123 ObjectStartArray* start_array = _space_info[space_id].start_array();
3124 MoveAndUpdateClosure closure(bitmap, cm, start_array, dest_addr, words);
3126 ParMarkBitMap::IterationStatus status;
3127 status = bitmap->iterate(&closure, dest_addr, end_addr);
3128 assert(status == ParMarkBitMap::full, "iteration not complete");
3129 assert(bitmap->find_obj_beg(closure.source(), end_addr) == end_addr,
3130 "live objects skipped because closure is full");
3131 }
3132 }
3134 jlong PSParallelCompact::millis_since_last_gc() {
3135 jlong ret_val = os::javaTimeMillis() - _time_of_last_gc;
3136 // XXX See note in genCollectedHeap::millis_since_last_gc().
3137 if (ret_val < 0) {
3138 NOT_PRODUCT(warning("time warp: %d", ret_val);)
3139 return 0;
3140 }
3141 return ret_val;
3142 }
3144 void PSParallelCompact::reset_millis_since_last_gc() {
3145 _time_of_last_gc = os::javaTimeMillis();
3146 }
3148 ParMarkBitMap::IterationStatus MoveAndUpdateClosure::copy_until_full()
3149 {
3150 if (source() != destination()) {
3151 assert(source() > destination(), "must copy to the left");
3152 Copy::aligned_conjoint_words(source(), destination(), words_remaining());
3153 }
3154 update_state(words_remaining());
3155 assert(is_full(), "sanity");
3156 return ParMarkBitMap::full;
3157 }
3159 void MoveAndUpdateClosure::copy_partial_obj()
3160 {
3161 size_t words = words_remaining();
3163 HeapWord* const range_end = MIN2(source() + words, bitmap()->region_end());
3164 HeapWord* const end_addr = bitmap()->find_obj_end(source(), range_end);
3165 if (end_addr < range_end) {
3166 words = bitmap()->obj_size(source(), end_addr);
3167 }
3169 // This test is necessary; if omitted, the pointer updates to a partial object
3170 // that crosses the dense prefix boundary could be overwritten.
3171 if (source() != destination()) {
3172 assert(source() > destination(), "must copy to the left");
3173 Copy::aligned_conjoint_words(source(), destination(), words);
3174 }
3175 update_state(words);
3176 }
3178 ParMarkBitMapClosure::IterationStatus
3179 MoveAndUpdateClosure::do_addr(HeapWord* addr, size_t words) {
3180 assert(destination() != NULL, "sanity");
3181 assert(bitmap()->obj_size(addr) == words, "bad size");
3183 _source = addr;
3184 assert(PSParallelCompact::summary_data().calc_new_pointer(source()) ==
3185 destination(), "wrong destination");
3187 if (words > words_remaining()) {
3188 return ParMarkBitMap::would_overflow;
3189 }
3191 // The start_array must be updated even if the object is not moving.
3192 if (_start_array != NULL) {
3193 _start_array->allocate_block(destination());
3194 }
3196 if (destination() != source()) {
3197 assert(destination() < source(), "must copy to the left");
3198 Copy::aligned_conjoint_words(source(), destination(), words);
3199 }
3201 oop moved_oop = (oop) destination();
3202 moved_oop->update_contents(compaction_manager());
3203 assert(moved_oop->is_oop_or_null(), "Object should be whole at this point");
3205 update_state(words);
3206 assert(destination() == (HeapWord*)moved_oop + moved_oop->size(), "sanity");
3207 return is_full() ? ParMarkBitMap::full : ParMarkBitMap::incomplete;
3208 }
3210 UpdateOnlyClosure::UpdateOnlyClosure(ParMarkBitMap* mbm,
3211 ParCompactionManager* cm,
3212 PSParallelCompact::SpaceId space_id) :
3213 ParMarkBitMapClosure(mbm, cm),
3214 _space_id(space_id),
3215 _start_array(PSParallelCompact::start_array(space_id))
3216 {
3217 }
3219 // Updates the references in the object to their new values.
3220 ParMarkBitMapClosure::IterationStatus
3221 UpdateOnlyClosure::do_addr(HeapWord* addr, size_t words) {
3222 do_addr(addr);
3223 return ParMarkBitMap::incomplete;
3224 }
3226 // Verify the new location using the forwarding pointer
3227 // from MarkSweep::mark_sweep_phase2(). Set the mark_word
3228 // to the initial value.
3229 ParMarkBitMapClosure::IterationStatus
3230 PSParallelCompact::VerifyUpdateClosure::do_addr(HeapWord* addr, size_t words) {
3231 // The second arg (words) is not used.
3232 oop obj = (oop) addr;
3233 HeapWord* forwarding_ptr = (HeapWord*) obj->mark()->decode_pointer();
3234 HeapWord* new_pointer = summary_data().calc_new_pointer(obj);
3235 if (forwarding_ptr == NULL) {
3236 // The object is dead or not moving.
3237 assert(bitmap()->is_unmarked(obj) || (new_pointer == (HeapWord*) obj),
3238 "Object liveness is wrong.");
3239 return ParMarkBitMap::incomplete;
3240 }
3241 assert(UseParallelOldGCDensePrefix ||
3242 (HeapMaximumCompactionInterval > 1) ||
3243 (MarkSweepAlwaysCompactCount > 1) ||
3244 (forwarding_ptr == new_pointer),
3245 "Calculation of new location is incorrect");
3246 return ParMarkBitMap::incomplete;
3247 }
3249 // Reset objects modified for debug checking.
3250 ParMarkBitMapClosure::IterationStatus
3251 PSParallelCompact::ResetObjectsClosure::do_addr(HeapWord* addr, size_t words) {
3252 // The second arg (words) is not used.
3253 oop obj = (oop) addr;
3254 obj->init_mark();
3255 return ParMarkBitMap::incomplete;
3256 }
3258 // Prepare for compaction. This method is executed once
3259 // (i.e., by a single thread) before compaction.
3260 // Save the updated location of the intArrayKlassObj for
3261 // filling holes in the dense prefix.
3262 void PSParallelCompact::compact_prologue() {
3263 _updated_int_array_klass_obj = (klassOop)
3264 summary_data().calc_new_pointer(Universe::intArrayKlassObj());
3265 }