Thu, 30 May 2013 13:04:51 -0700
6725714: par compact - add a table to speed up bitmap searches
Reviewed-by: jmasa, tschatzl
1 /*
2 * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPARALLELCOMPACT_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPARALLELCOMPACT_HPP
28 #include "gc_implementation/parallelScavenge/objectStartArray.hpp"
29 #include "gc_implementation/parallelScavenge/parMarkBitMap.hpp"
30 #include "gc_implementation/parallelScavenge/psCompactionManager.hpp"
31 #include "gc_implementation/shared/collectorCounters.hpp"
32 #include "gc_implementation/shared/markSweep.hpp"
33 #include "gc_implementation/shared/mutableSpace.hpp"
34 #include "memory/sharedHeap.hpp"
35 #include "oops/oop.hpp"
37 class ParallelScavengeHeap;
38 class PSAdaptiveSizePolicy;
39 class PSYoungGen;
40 class PSOldGen;
41 class ParCompactionManager;
42 class ParallelTaskTerminator;
43 class PSParallelCompact;
44 class GCTaskManager;
45 class GCTaskQueue;
46 class PreGCValues;
47 class MoveAndUpdateClosure;
48 class RefProcTaskExecutor;
50 // The SplitInfo class holds the information needed to 'split' a source region
51 // so that the live data can be copied to two destination *spaces*. Normally,
52 // all the live data in a region is copied to a single destination space (e.g.,
53 // everything live in a region in eden is copied entirely into the old gen).
54 // However, when the heap is nearly full, all the live data in eden may not fit
55 // into the old gen. Copying only some of the regions from eden to old gen
56 // requires finding a region that does not contain a partial object (i.e., no
57 // live object crosses the region boundary) somewhere near the last object that
58 // does fit into the old gen. Since it's not always possible to find such a
59 // region, splitting is necessary for predictable behavior.
60 //
61 // A region is always split at the end of the partial object. This avoids
62 // additional tests when calculating the new location of a pointer, which is a
63 // very hot code path. The partial object and everything to its left will be
64 // copied to another space (call it dest_space_1). The live data to the right
65 // of the partial object will be copied either within the space itself, or to a
66 // different destination space (distinct from dest_space_1).
67 //
68 // Split points are identified during the summary phase, when region
69 // destinations are computed: data about the split, including the
70 // partial_object_size, is recorded in a SplitInfo record and the
71 // partial_object_size field in the summary data is set to zero. The zeroing is
72 // possible (and necessary) since the partial object will move to a different
73 // destination space than anything to its right, thus the partial object should
74 // not affect the locations of any objects to its right.
75 //
76 // The recorded data is used during the compaction phase, but only rarely: when
77 // the partial object on the split region will be copied across a destination
78 // region boundary. This test is made once each time a region is filled, and is
79 // a simple address comparison, so the overhead is negligible (see
80 // PSParallelCompact::first_src_addr()).
81 //
82 // Notes:
83 //
84 // Only regions with partial objects are split; a region without a partial
85 // object does not need any extra bookkeeping.
86 //
87 // At most one region is split per space, so the amount of data required is
88 // constant.
89 //
90 // A region is split only when the destination space would overflow. Once that
91 // happens, the destination space is abandoned and no other data (even from
92 // other source spaces) is targeted to that destination space. Abandoning the
93 // destination space may leave a somewhat large unused area at the end, if a
94 // large object caused the overflow.
95 //
96 // Future work:
97 //
98 // More bookkeeping would be required to continue to use the destination space.
99 // The most general solution would allow data from regions in two different
100 // source spaces to be "joined" in a single destination region. At the very
101 // least, additional code would be required in next_src_region() to detect the
102 // join and skip to an out-of-order source region. If the join region was also
103 // the last destination region to which a split region was copied (the most
104 // likely case), then additional work would be needed to get fill_region() to
105 // stop iteration and switch to a new source region at the right point. Basic
106 // idea would be to use a fake value for the top of the source space. It is
107 // doable, if a bit tricky.
108 //
109 // A simpler (but less general) solution would fill the remainder of the
110 // destination region with a dummy object and continue filling the next
111 // destination region.
113 class SplitInfo
114 {
115 public:
116 // Return true if this split info is valid (i.e., if a split has been
117 // recorded). The very first region cannot have a partial object and thus is
118 // never split, so 0 is the 'invalid' value.
119 bool is_valid() const { return _src_region_idx > 0; }
121 // Return true if this split holds data for the specified source region.
122 inline bool is_split(size_t source_region) const;
124 // The index of the split region, the size of the partial object on that
125 // region and the destination of the partial object.
126 size_t src_region_idx() const { return _src_region_idx; }
127 size_t partial_obj_size() const { return _partial_obj_size; }
128 HeapWord* destination() const { return _destination; }
130 // The destination count of the partial object referenced by this split
131 // (either 1 or 2). This must be added to the destination count of the
132 // remainder of the source region.
133 unsigned int destination_count() const { return _destination_count; }
135 // If a word within the partial object will be written to the first word of a
136 // destination region, this is the address of the destination region;
137 // otherwise this is NULL.
138 HeapWord* dest_region_addr() const { return _dest_region_addr; }
140 // If a word within the partial object will be written to the first word of a
141 // destination region, this is the address of that word within the partial
142 // object; otherwise this is NULL.
143 HeapWord* first_src_addr() const { return _first_src_addr; }
145 // Record the data necessary to split the region src_region_idx.
146 void record(size_t src_region_idx, size_t partial_obj_size,
147 HeapWord* destination);
149 void clear();
151 DEBUG_ONLY(void verify_clear();)
153 private:
154 size_t _src_region_idx;
155 size_t _partial_obj_size;
156 HeapWord* _destination;
157 unsigned int _destination_count;
158 HeapWord* _dest_region_addr;
159 HeapWord* _first_src_addr;
160 };
162 inline bool SplitInfo::is_split(size_t region_idx) const
163 {
164 return _src_region_idx == region_idx && is_valid();
165 }
167 class SpaceInfo
168 {
169 public:
170 MutableSpace* space() const { return _space; }
172 // Where the free space will start after the collection. Valid only after the
173 // summary phase completes.
174 HeapWord* new_top() const { return _new_top; }
176 // Allows new_top to be set.
177 HeapWord** new_top_addr() { return &_new_top; }
179 // Where the smallest allowable dense prefix ends (used only for perm gen).
180 HeapWord* min_dense_prefix() const { return _min_dense_prefix; }
182 // Where the dense prefix ends, or the compacted region begins.
183 HeapWord* dense_prefix() const { return _dense_prefix; }
185 // The start array for the (generation containing the) space, or NULL if there
186 // is no start array.
187 ObjectStartArray* start_array() const { return _start_array; }
189 SplitInfo& split_info() { return _split_info; }
191 void set_space(MutableSpace* s) { _space = s; }
192 void set_new_top(HeapWord* addr) { _new_top = addr; }
193 void set_min_dense_prefix(HeapWord* addr) { _min_dense_prefix = addr; }
194 void set_dense_prefix(HeapWord* addr) { _dense_prefix = addr; }
195 void set_start_array(ObjectStartArray* s) { _start_array = s; }
197 void publish_new_top() const { _space->set_top(_new_top); }
199 private:
200 MutableSpace* _space;
201 HeapWord* _new_top;
202 HeapWord* _min_dense_prefix;
203 HeapWord* _dense_prefix;
204 ObjectStartArray* _start_array;
205 SplitInfo _split_info;
206 };
208 class ParallelCompactData
209 {
210 public:
211 // Sizes are in HeapWords, unless indicated otherwise.
212 static const size_t Log2RegionSize;
213 static const size_t RegionSize;
214 static const size_t RegionSizeBytes;
216 // Mask for the bits in a size_t to get an offset within a region.
217 static const size_t RegionSizeOffsetMask;
218 // Mask for the bits in a pointer to get an offset within a region.
219 static const size_t RegionAddrOffsetMask;
220 // Mask for the bits in a pointer to get the address of the start of a region.
221 static const size_t RegionAddrMask;
223 static const size_t Log2BlockSize;
224 static const size_t BlockSize;
225 static const size_t BlockSizeBytes;
227 static const size_t BlockSizeOffsetMask;
228 static const size_t BlockAddrOffsetMask;
229 static const size_t BlockAddrMask;
231 static const size_t BlocksPerRegion;
232 static const size_t Log2BlocksPerRegion;
234 class RegionData
235 {
236 public:
237 // Destination address of the region.
238 HeapWord* destination() const { return _destination; }
240 // The first region containing data destined for this region.
241 size_t source_region() const { return _source_region; }
243 // The object (if any) starting in this region and ending in a different
244 // region that could not be updated during the main (parallel) compaction
245 // phase. This is different from _partial_obj_addr, which is an object that
246 // extends onto a source region. However, the two uses do not overlap in
247 // time, so the same field is used to save space.
248 HeapWord* deferred_obj_addr() const { return _partial_obj_addr; }
250 // The starting address of the partial object extending onto the region.
251 HeapWord* partial_obj_addr() const { return _partial_obj_addr; }
253 // Size of the partial object extending onto the region (words).
254 size_t partial_obj_size() const { return _partial_obj_size; }
256 // Size of live data that lies within this region due to objects that start
257 // in this region (words). This does not include the partial object
258 // extending onto the region (if any), or the part of an object that extends
259 // onto the next region (if any).
260 size_t live_obj_size() const { return _dc_and_los & los_mask; }
262 // Total live data that lies within the region (words).
263 size_t data_size() const { return partial_obj_size() + live_obj_size(); }
265 // The destination_count is the number of other regions to which data from
266 // this region will be copied. At the end of the summary phase, the valid
267 // values of destination_count are
268 //
269 // 0 - data from the region will be compacted completely into itself, or the
270 // region is empty. The region can be claimed and then filled.
271 // 1 - data from the region will be compacted into 1 other region; some
272 // data from the region may also be compacted into the region itself.
273 // 2 - data from the region will be copied to 2 other regions.
274 //
275 // During compaction as regions are emptied, the destination_count is
276 // decremented (atomically) and when it reaches 0, it can be claimed and
277 // then filled.
278 //
279 // A region is claimed for processing by atomically changing the
280 // destination_count to the claimed value (dc_claimed). After a region has
281 // been filled, the destination_count should be set to the completed value
282 // (dc_completed).
283 inline uint destination_count() const;
284 inline uint destination_count_raw() const;
286 // Whether the block table for this region has been filled.
287 inline bool blocks_filled() const;
289 // Number of times the block table was filled.
290 DEBUG_ONLY(inline size_t blocks_filled_count() const;)
292 // The location of the java heap data that corresponds to this region.
293 inline HeapWord* data_location() const;
295 // The highest address referenced by objects in this region.
296 inline HeapWord* highest_ref() const;
298 // Whether this region is available to be claimed, has been claimed, or has
299 // been completed.
300 //
301 // Minor subtlety: claimed() returns true if the region is marked
302 // completed(), which is desirable since a region must be claimed before it
303 // can be completed.
304 bool available() const { return _dc_and_los < dc_one; }
305 bool claimed() const { return _dc_and_los >= dc_claimed; }
306 bool completed() const { return _dc_and_los >= dc_completed; }
308 // These are not atomic.
309 void set_destination(HeapWord* addr) { _destination = addr; }
310 void set_source_region(size_t region) { _source_region = region; }
311 void set_deferred_obj_addr(HeapWord* addr) { _partial_obj_addr = addr; }
312 void set_partial_obj_addr(HeapWord* addr) { _partial_obj_addr = addr; }
313 void set_partial_obj_size(size_t words) {
314 _partial_obj_size = (region_sz_t) words;
315 }
316 inline void set_blocks_filled();
318 inline void set_destination_count(uint count);
319 inline void set_live_obj_size(size_t words);
320 inline void set_data_location(HeapWord* addr);
321 inline void set_completed();
322 inline bool claim_unsafe();
324 // These are atomic.
325 inline void add_live_obj(size_t words);
326 inline void set_highest_ref(HeapWord* addr);
327 inline void decrement_destination_count();
328 inline bool claim();
330 private:
331 // The type used to represent object sizes within a region.
332 typedef uint region_sz_t;
334 // Constants for manipulating the _dc_and_los field, which holds both the
335 // destination count and live obj size. The live obj size lives at the
336 // least significant end so no masking is necessary when adding.
337 static const region_sz_t dc_shift; // Shift amount.
338 static const region_sz_t dc_mask; // Mask for destination count.
339 static const region_sz_t dc_one; // 1, shifted appropriately.
340 static const region_sz_t dc_claimed; // Region has been claimed.
341 static const region_sz_t dc_completed; // Region has been completed.
342 static const region_sz_t los_mask; // Mask for live obj size.
344 HeapWord* _destination;
345 size_t _source_region;
346 HeapWord* _partial_obj_addr;
347 region_sz_t _partial_obj_size;
348 region_sz_t volatile _dc_and_los;
349 bool _blocks_filled;
351 #ifdef ASSERT
352 size_t _blocks_filled_count; // Number of block table fills.
354 // These enable optimizations that are only partially implemented. Use
355 // debug builds to prevent the code fragments from breaking.
356 HeapWord* _data_location;
357 HeapWord* _highest_ref;
358 #endif // #ifdef ASSERT
360 #ifdef ASSERT
361 public:
362 uint _pushed; // 0 until region is pushed onto a stack
363 private:
364 #endif
365 };
367 // "Blocks" allow shorter sections of the bitmap to be searched. Each Block
368 // holds an offset, which is the amount of live data in the Region to the left
369 // of the first live object that starts in the Block.
370 class BlockData
371 {
372 public:
373 typedef unsigned short int blk_ofs_t;
375 blk_ofs_t offset() const { return _offset; }
376 void set_offset(size_t val) { _offset = (blk_ofs_t)val; }
378 private:
379 blk_ofs_t _offset;
380 };
382 public:
383 ParallelCompactData();
384 bool initialize(MemRegion covered_region);
386 size_t region_count() const { return _region_count; }
387 size_t reserved_byte_size() const { return _reserved_byte_size; }
389 // Convert region indices to/from RegionData pointers.
390 inline RegionData* region(size_t region_idx) const;
391 inline size_t region(const RegionData* const region_ptr) const;
393 size_t block_count() const { return _block_count; }
394 inline BlockData* block(size_t block_idx) const;
395 inline size_t block(const BlockData* block_ptr) const;
397 void add_obj(HeapWord* addr, size_t len);
398 void add_obj(oop p, size_t len) { add_obj((HeapWord*)p, len); }
400 // Fill in the regions covering [beg, end) so that no data moves; i.e., the
401 // destination of region n is simply the start of region n. The argument beg
402 // must be region-aligned; end need not be.
403 void summarize_dense_prefix(HeapWord* beg, HeapWord* end);
405 HeapWord* summarize_split_space(size_t src_region, SplitInfo& split_info,
406 HeapWord* destination, HeapWord* target_end,
407 HeapWord** target_next);
408 bool summarize(SplitInfo& split_info,
409 HeapWord* source_beg, HeapWord* source_end,
410 HeapWord** source_next,
411 HeapWord* target_beg, HeapWord* target_end,
412 HeapWord** target_next);
414 void clear();
415 void clear_range(size_t beg_region, size_t end_region);
416 void clear_range(HeapWord* beg, HeapWord* end) {
417 clear_range(addr_to_region_idx(beg), addr_to_region_idx(end));
418 }
420 // Return the number of words between addr and the start of the region
421 // containing addr.
422 inline size_t region_offset(const HeapWord* addr) const;
424 // Convert addresses to/from a region index or region pointer.
425 inline size_t addr_to_region_idx(const HeapWord* addr) const;
426 inline RegionData* addr_to_region_ptr(const HeapWord* addr) const;
427 inline HeapWord* region_to_addr(size_t region) const;
428 inline HeapWord* region_to_addr(size_t region, size_t offset) const;
429 inline HeapWord* region_to_addr(const RegionData* region) const;
431 inline HeapWord* region_align_down(HeapWord* addr) const;
432 inline HeapWord* region_align_up(HeapWord* addr) const;
433 inline bool is_region_aligned(HeapWord* addr) const;
435 // Analogous to region_offset() for blocks.
436 size_t block_offset(const HeapWord* addr) const;
437 size_t addr_to_block_idx(const HeapWord* addr) const;
438 size_t addr_to_block_idx(const oop obj) const {
439 return addr_to_block_idx((HeapWord*) obj);
440 }
441 inline BlockData* addr_to_block_ptr(const HeapWord* addr) const;
442 inline HeapWord* block_to_addr(size_t block) const;
443 inline size_t region_to_block_idx(size_t region) const;
445 inline HeapWord* block_align_down(HeapWord* addr) const;
446 inline HeapWord* block_align_up(HeapWord* addr) const;
447 inline bool is_block_aligned(HeapWord* addr) const;
449 // Return the address one past the end of the partial object.
450 HeapWord* partial_obj_end(size_t region_idx) const;
452 // Return the location of the object after compaction.
453 HeapWord* calc_new_pointer(HeapWord* addr);
455 HeapWord* calc_new_pointer(oop p) {
456 return calc_new_pointer((HeapWord*) p);
457 }
459 #ifdef ASSERT
460 void verify_clear(const PSVirtualSpace* vspace);
461 void verify_clear();
462 #endif // #ifdef ASSERT
464 private:
465 bool initialize_block_data();
466 bool initialize_region_data(size_t region_size);
467 PSVirtualSpace* create_vspace(size_t count, size_t element_size);
469 private:
470 HeapWord* _region_start;
471 #ifdef ASSERT
472 HeapWord* _region_end;
473 #endif // #ifdef ASSERT
475 PSVirtualSpace* _region_vspace;
476 size_t _reserved_byte_size;
477 RegionData* _region_data;
478 size_t _region_count;
480 PSVirtualSpace* _block_vspace;
481 BlockData* _block_data;
482 size_t _block_count;
483 };
485 inline uint
486 ParallelCompactData::RegionData::destination_count_raw() const
487 {
488 return _dc_and_los & dc_mask;
489 }
491 inline uint
492 ParallelCompactData::RegionData::destination_count() const
493 {
494 return destination_count_raw() >> dc_shift;
495 }
497 inline bool
498 ParallelCompactData::RegionData::blocks_filled() const
499 {
500 return _blocks_filled;
501 }
503 #ifdef ASSERT
504 inline size_t
505 ParallelCompactData::RegionData::blocks_filled_count() const
506 {
507 return _blocks_filled_count;
508 }
509 #endif // #ifdef ASSERT
511 inline void
512 ParallelCompactData::RegionData::set_blocks_filled()
513 {
514 _blocks_filled = true;
515 // Debug builds count the number of times the table was filled.
516 DEBUG_ONLY(Atomic::inc_ptr(&_blocks_filled_count));
517 }
519 inline void
520 ParallelCompactData::RegionData::set_destination_count(uint count)
521 {
522 assert(count <= (dc_completed >> dc_shift), "count too large");
523 const region_sz_t live_sz = (region_sz_t) live_obj_size();
524 _dc_and_los = (count << dc_shift) | live_sz;
525 }
527 inline void ParallelCompactData::RegionData::set_live_obj_size(size_t words)
528 {
529 assert(words <= los_mask, "would overflow");
530 _dc_and_los = destination_count_raw() | (region_sz_t)words;
531 }
533 inline void ParallelCompactData::RegionData::decrement_destination_count()
534 {
535 assert(_dc_and_los < dc_claimed, "already claimed");
536 assert(_dc_and_los >= dc_one, "count would go negative");
537 Atomic::add((int)dc_mask, (volatile int*)&_dc_and_los);
538 }
540 inline HeapWord* ParallelCompactData::RegionData::data_location() const
541 {
542 DEBUG_ONLY(return _data_location;)
543 NOT_DEBUG(return NULL;)
544 }
546 inline HeapWord* ParallelCompactData::RegionData::highest_ref() const
547 {
548 DEBUG_ONLY(return _highest_ref;)
549 NOT_DEBUG(return NULL;)
550 }
552 inline void ParallelCompactData::RegionData::set_data_location(HeapWord* addr)
553 {
554 DEBUG_ONLY(_data_location = addr;)
555 }
557 inline void ParallelCompactData::RegionData::set_completed()
558 {
559 assert(claimed(), "must be claimed first");
560 _dc_and_los = dc_completed | (region_sz_t) live_obj_size();
561 }
563 // MT-unsafe claiming of a region. Should only be used during single threaded
564 // execution.
565 inline bool ParallelCompactData::RegionData::claim_unsafe()
566 {
567 if (available()) {
568 _dc_and_los |= dc_claimed;
569 return true;
570 }
571 return false;
572 }
574 inline void ParallelCompactData::RegionData::add_live_obj(size_t words)
575 {
576 assert(words <= (size_t)los_mask - live_obj_size(), "overflow");
577 Atomic::add((int) words, (volatile int*) &_dc_and_los);
578 }
580 inline void ParallelCompactData::RegionData::set_highest_ref(HeapWord* addr)
581 {
582 #ifdef ASSERT
583 HeapWord* tmp = _highest_ref;
584 while (addr > tmp) {
585 tmp = (HeapWord*)Atomic::cmpxchg_ptr(addr, &_highest_ref, tmp);
586 }
587 #endif // #ifdef ASSERT
588 }
590 inline bool ParallelCompactData::RegionData::claim()
591 {
592 const int los = (int) live_obj_size();
593 const int old = Atomic::cmpxchg(dc_claimed | los,
594 (volatile int*) &_dc_and_los, los);
595 return old == los;
596 }
598 inline ParallelCompactData::RegionData*
599 ParallelCompactData::region(size_t region_idx) const
600 {
601 assert(region_idx <= region_count(), "bad arg");
602 return _region_data + region_idx;
603 }
605 inline size_t
606 ParallelCompactData::region(const RegionData* const region_ptr) const
607 {
608 assert(region_ptr >= _region_data, "bad arg");
609 assert(region_ptr <= _region_data + region_count(), "bad arg");
610 return pointer_delta(region_ptr, _region_data, sizeof(RegionData));
611 }
613 inline ParallelCompactData::BlockData*
614 ParallelCompactData::block(size_t n) const {
615 assert(n < block_count(), "bad arg");
616 return _block_data + n;
617 }
619 inline size_t
620 ParallelCompactData::region_offset(const HeapWord* addr) const
621 {
622 assert(addr >= _region_start, "bad addr");
623 assert(addr <= _region_end, "bad addr");
624 return (size_t(addr) & RegionAddrOffsetMask) >> LogHeapWordSize;
625 }
627 inline size_t
628 ParallelCompactData::addr_to_region_idx(const HeapWord* addr) const
629 {
630 assert(addr >= _region_start, "bad addr");
631 assert(addr <= _region_end, "bad addr");
632 return pointer_delta(addr, _region_start) >> Log2RegionSize;
633 }
635 inline ParallelCompactData::RegionData*
636 ParallelCompactData::addr_to_region_ptr(const HeapWord* addr) const
637 {
638 return region(addr_to_region_idx(addr));
639 }
641 inline HeapWord*
642 ParallelCompactData::region_to_addr(size_t region) const
643 {
644 assert(region <= _region_count, "region out of range");
645 return _region_start + (region << Log2RegionSize);
646 }
648 inline HeapWord*
649 ParallelCompactData::region_to_addr(const RegionData* region) const
650 {
651 return region_to_addr(pointer_delta(region, _region_data,
652 sizeof(RegionData)));
653 }
655 inline HeapWord*
656 ParallelCompactData::region_to_addr(size_t region, size_t offset) const
657 {
658 assert(region <= _region_count, "region out of range");
659 assert(offset < RegionSize, "offset too big"); // This may be too strict.
660 return region_to_addr(region) + offset;
661 }
663 inline HeapWord*
664 ParallelCompactData::region_align_down(HeapWord* addr) const
665 {
666 assert(addr >= _region_start, "bad addr");
667 assert(addr < _region_end + RegionSize, "bad addr");
668 return (HeapWord*)(size_t(addr) & RegionAddrMask);
669 }
671 inline HeapWord*
672 ParallelCompactData::region_align_up(HeapWord* addr) const
673 {
674 assert(addr >= _region_start, "bad addr");
675 assert(addr <= _region_end, "bad addr");
676 return region_align_down(addr + RegionSizeOffsetMask);
677 }
679 inline bool
680 ParallelCompactData::is_region_aligned(HeapWord* addr) const
681 {
682 return region_offset(addr) == 0;
683 }
685 inline size_t
686 ParallelCompactData::block_offset(const HeapWord* addr) const
687 {
688 assert(addr >= _region_start, "bad addr");
689 assert(addr <= _region_end, "bad addr");
690 return (size_t(addr) & BlockAddrOffsetMask) >> LogHeapWordSize;
691 }
693 inline size_t
694 ParallelCompactData::addr_to_block_idx(const HeapWord* addr) const
695 {
696 assert(addr >= _region_start, "bad addr");
697 assert(addr <= _region_end, "bad addr");
698 return pointer_delta(addr, _region_start) >> Log2BlockSize;
699 }
701 inline ParallelCompactData::BlockData*
702 ParallelCompactData::addr_to_block_ptr(const HeapWord* addr) const
703 {
704 return block(addr_to_block_idx(addr));
705 }
707 inline HeapWord*
708 ParallelCompactData::block_to_addr(size_t block) const
709 {
710 assert(block < _block_count, "block out of range");
711 return _region_start + (block << Log2BlockSize);
712 }
714 inline size_t
715 ParallelCompactData::region_to_block_idx(size_t region) const
716 {
717 return region << Log2BlocksPerRegion;
718 }
720 inline HeapWord*
721 ParallelCompactData::block_align_down(HeapWord* addr) const
722 {
723 assert(addr >= _region_start, "bad addr");
724 assert(addr < _region_end + RegionSize, "bad addr");
725 return (HeapWord*)(size_t(addr) & BlockAddrMask);
726 }
728 inline HeapWord*
729 ParallelCompactData::block_align_up(HeapWord* addr) const
730 {
731 assert(addr >= _region_start, "bad addr");
732 assert(addr <= _region_end, "bad addr");
733 return block_align_down(addr + BlockSizeOffsetMask);
734 }
736 inline bool
737 ParallelCompactData::is_block_aligned(HeapWord* addr) const
738 {
739 return block_offset(addr) == 0;
740 }
742 // Abstract closure for use with ParMarkBitMap::iterate(), which will invoke the
743 // do_addr() method.
744 //
745 // The closure is initialized with the number of heap words to process
746 // (words_remaining()), and becomes 'full' when it reaches 0. The do_addr()
747 // methods in subclasses should update the total as words are processed. Since
748 // only one subclass actually uses this mechanism to terminate iteration, the
749 // default initial value is > 0. The implementation is here and not in the
750 // single subclass that uses it to avoid making is_full() virtual, and thus
751 // adding a virtual call per live object.
753 class ParMarkBitMapClosure: public StackObj {
754 public:
755 typedef ParMarkBitMap::idx_t idx_t;
756 typedef ParMarkBitMap::IterationStatus IterationStatus;
758 public:
759 inline ParMarkBitMapClosure(ParMarkBitMap* mbm, ParCompactionManager* cm,
760 size_t words = max_uintx);
762 inline ParCompactionManager* compaction_manager() const;
763 inline ParMarkBitMap* bitmap() const;
764 inline size_t words_remaining() const;
765 inline bool is_full() const;
766 inline HeapWord* source() const;
768 inline void set_source(HeapWord* addr);
770 virtual IterationStatus do_addr(HeapWord* addr, size_t words) = 0;
772 protected:
773 inline void decrement_words_remaining(size_t words);
775 private:
776 ParMarkBitMap* const _bitmap;
777 ParCompactionManager* const _compaction_manager;
778 DEBUG_ONLY(const size_t _initial_words_remaining;) // Useful in debugger.
779 size_t _words_remaining; // Words left to copy.
781 protected:
782 HeapWord* _source; // Next addr that would be read.
783 };
785 inline
786 ParMarkBitMapClosure::ParMarkBitMapClosure(ParMarkBitMap* bitmap,
787 ParCompactionManager* cm,
788 size_t words):
789 _bitmap(bitmap), _compaction_manager(cm)
790 #ifdef ASSERT
791 , _initial_words_remaining(words)
792 #endif
793 {
794 _words_remaining = words;
795 _source = NULL;
796 }
798 inline ParCompactionManager* ParMarkBitMapClosure::compaction_manager() const {
799 return _compaction_manager;
800 }
802 inline ParMarkBitMap* ParMarkBitMapClosure::bitmap() const {
803 return _bitmap;
804 }
806 inline size_t ParMarkBitMapClosure::words_remaining() const {
807 return _words_remaining;
808 }
810 inline bool ParMarkBitMapClosure::is_full() const {
811 return words_remaining() == 0;
812 }
814 inline HeapWord* ParMarkBitMapClosure::source() const {
815 return _source;
816 }
818 inline void ParMarkBitMapClosure::set_source(HeapWord* addr) {
819 _source = addr;
820 }
822 inline void ParMarkBitMapClosure::decrement_words_remaining(size_t words) {
823 assert(_words_remaining >= words, "processed too many words");
824 _words_remaining -= words;
825 }
827 // The UseParallelOldGC collector is a stop-the-world garbage collector that
828 // does parts of the collection using parallel threads. The collection includes
829 // the tenured generation and the young generation. The permanent generation is
830 // collected at the same time as the other two generations but the permanent
831 // generation is collect by a single GC thread. The permanent generation is
832 // collected serially because of the requirement that during the processing of a
833 // klass AAA, any objects reference by AAA must already have been processed.
834 // This requirement is enforced by a left (lower address) to right (higher
835 // address) sliding compaction.
836 //
837 // There are four phases of the collection.
838 //
839 // - marking phase
840 // - summary phase
841 // - compacting phase
842 // - clean up phase
843 //
844 // Roughly speaking these phases correspond, respectively, to
845 // - mark all the live objects
846 // - calculate the destination of each object at the end of the collection
847 // - move the objects to their destination
848 // - update some references and reinitialize some variables
849 //
850 // These three phases are invoked in PSParallelCompact::invoke_no_policy(). The
851 // marking phase is implemented in PSParallelCompact::marking_phase() and does a
852 // complete marking of the heap. The summary phase is implemented in
853 // PSParallelCompact::summary_phase(). The move and update phase is implemented
854 // in PSParallelCompact::compact().
855 //
856 // A space that is being collected is divided into regions and with each region
857 // is associated an object of type ParallelCompactData. Each region is of a
858 // fixed size and typically will contain more than 1 object and may have parts
859 // of objects at the front and back of the region.
860 //
861 // region -----+---------------------+----------
862 // objects covered [ AAA )[ BBB )[ CCC )[ DDD )
863 //
864 // The marking phase does a complete marking of all live objects in the heap.
865 // The marking also compiles the size of the data for all live objects covered
866 // by the region. This size includes the part of any live object spanning onto
867 // the region (part of AAA if it is live) from the front, all live objects
868 // contained in the region (BBB and/or CCC if they are live), and the part of
869 // any live objects covered by the region that extends off the region (part of
870 // DDD if it is live). The marking phase uses multiple GC threads and marking
871 // is done in a bit array of type ParMarkBitMap. The marking of the bit map is
872 // done atomically as is the accumulation of the size of the live objects
873 // covered by a region.
874 //
875 // The summary phase calculates the total live data to the left of each region
876 // XXX. Based on that total and the bottom of the space, it can calculate the
877 // starting location of the live data in XXX. The summary phase calculates for
878 // each region XXX quantites such as
879 //
880 // - the amount of live data at the beginning of a region from an object
881 // entering the region.
882 // - the location of the first live data on the region
883 // - a count of the number of regions receiving live data from XXX.
884 //
885 // See ParallelCompactData for precise details. The summary phase also
886 // calculates the dense prefix for the compaction. The dense prefix is a
887 // portion at the beginning of the space that is not moved. The objects in the
888 // dense prefix do need to have their object references updated. See method
889 // summarize_dense_prefix().
890 //
891 // The summary phase is done using 1 GC thread.
892 //
893 // The compaction phase moves objects to their new location and updates all
894 // references in the object.
895 //
896 // A current exception is that objects that cross a region boundary are moved
897 // but do not have their references updated. References are not updated because
898 // it cannot easily be determined if the klass pointer KKK for the object AAA
899 // has been updated. KKK likely resides in a region to the left of the region
900 // containing AAA. These AAA's have there references updated at the end in a
901 // clean up phase. See the method PSParallelCompact::update_deferred_objects().
902 // An alternate strategy is being investigated for this deferral of updating.
903 //
904 // Compaction is done on a region basis. A region that is ready to be filled is
905 // put on a ready list and GC threads take region off the list and fill them. A
906 // region is ready to be filled if it empty of live objects. Such a region may
907 // have been initially empty (only contained dead objects) or may have had all
908 // its live objects copied out already. A region that compacts into itself is
909 // also ready for filling. The ready list is initially filled with empty
910 // regions and regions compacting into themselves. There is always at least 1
911 // region that can be put on the ready list. The regions are atomically added
912 // and removed from the ready list.
914 class PSParallelCompact : AllStatic {
915 public:
916 // Convenient access to type names.
917 typedef ParMarkBitMap::idx_t idx_t;
918 typedef ParallelCompactData::RegionData RegionData;
919 typedef ParallelCompactData::BlockData BlockData;
921 typedef enum {
922 old_space_id, eden_space_id,
923 from_space_id, to_space_id, last_space_id
924 } SpaceId;
926 public:
927 // Inline closure decls
928 //
929 class IsAliveClosure: public BoolObjectClosure {
930 public:
931 virtual bool do_object_b(oop p);
932 };
934 class KeepAliveClosure: public OopClosure {
935 private:
936 ParCompactionManager* _compaction_manager;
937 protected:
938 template <class T> inline void do_oop_work(T* p);
939 public:
940 KeepAliveClosure(ParCompactionManager* cm) : _compaction_manager(cm) { }
941 virtual void do_oop(oop* p);
942 virtual void do_oop(narrowOop* p);
943 };
945 class FollowStackClosure: public VoidClosure {
946 private:
947 ParCompactionManager* _compaction_manager;
948 public:
949 FollowStackClosure(ParCompactionManager* cm) : _compaction_manager(cm) { }
950 virtual void do_void();
951 };
953 class AdjustPointerClosure: public OopClosure {
954 public:
955 virtual void do_oop(oop* p);
956 virtual void do_oop(narrowOop* p);
957 // do not walk from thread stacks to the code cache on this phase
958 virtual void do_code_blob(CodeBlob* cb) const { }
959 };
961 class AdjustKlassClosure : public KlassClosure {
962 public:
963 void do_klass(Klass* klass);
964 };
966 friend class KeepAliveClosure;
967 friend class FollowStackClosure;
968 friend class AdjustPointerClosure;
969 friend class AdjustKlassClosure;
970 friend class FollowKlassClosure;
971 friend class InstanceClassLoaderKlass;
972 friend class RefProcTaskProxy;
974 private:
975 static elapsedTimer _accumulated_time;
976 static unsigned int _total_invocations;
977 static unsigned int _maximum_compaction_gc_num;
978 static jlong _time_of_last_gc; // ms
979 static CollectorCounters* _counters;
980 static ParMarkBitMap _mark_bitmap;
981 static ParallelCompactData _summary_data;
982 static IsAliveClosure _is_alive_closure;
983 static SpaceInfo _space_info[last_space_id];
984 static bool _print_phases;
985 static AdjustPointerClosure _adjust_pointer_closure;
986 static AdjustKlassClosure _adjust_klass_closure;
988 // Reference processing (used in ...follow_contents)
989 static ReferenceProcessor* _ref_processor;
991 // Updated location of intArrayKlassObj.
992 static Klass* _updated_int_array_klass_obj;
994 // Values computed at initialization and used by dead_wood_limiter().
995 static double _dwl_mean;
996 static double _dwl_std_dev;
997 static double _dwl_first_term;
998 static double _dwl_adjustment;
999 #ifdef ASSERT
1000 static bool _dwl_initialized;
1001 #endif // #ifdef ASSERT
1003 private:
1005 static void initialize_space_info();
1007 // Return true if details about individual phases should be printed.
1008 static inline bool print_phases();
1010 // Clear the marking bitmap and summary data that cover the specified space.
1011 static void clear_data_covering_space(SpaceId id);
1013 static void pre_compact(PreGCValues* pre_gc_values);
1014 static void post_compact();
1016 // Mark live objects
1017 static void marking_phase(ParCompactionManager* cm,
1018 bool maximum_heap_compaction);
1020 template <class T>
1021 static inline void follow_root(ParCompactionManager* cm, T* p);
1023 // Compute the dense prefix for the designated space. This is an experimental
1024 // implementation currently not used in production.
1025 static HeapWord* compute_dense_prefix_via_density(const SpaceId id,
1026 bool maximum_compaction);
1028 // Methods used to compute the dense prefix.
1030 // Compute the value of the normal distribution at x = density. The mean and
1031 // standard deviation are values saved by initialize_dead_wood_limiter().
1032 static inline double normal_distribution(double density);
1034 // Initialize the static vars used by dead_wood_limiter().
1035 static void initialize_dead_wood_limiter();
1037 // Return the percentage of space that can be treated as "dead wood" (i.e.,
1038 // not reclaimed).
1039 static double dead_wood_limiter(double density, size_t min_percent);
1041 // Find the first (left-most) region in the range [beg, end) that has at least
1042 // dead_words of dead space to the left. The argument beg must be the first
1043 // region in the space that is not completely live.
1044 static RegionData* dead_wood_limit_region(const RegionData* beg,
1045 const RegionData* end,
1046 size_t dead_words);
1048 // Return a pointer to the first region in the range [beg, end) that is not
1049 // completely full.
1050 static RegionData* first_dead_space_region(const RegionData* beg,
1051 const RegionData* end);
1053 // Return a value indicating the benefit or 'yield' if the compacted region
1054 // were to start (or equivalently if the dense prefix were to end) at the
1055 // candidate region. Higher values are better.
1056 //
1057 // The value is based on the amount of space reclaimed vs. the costs of (a)
1058 // updating references in the dense prefix plus (b) copying objects and
1059 // updating references in the compacted region.
1060 static inline double reclaimed_ratio(const RegionData* const candidate,
1061 HeapWord* const bottom,
1062 HeapWord* const top,
1063 HeapWord* const new_top);
1065 // Compute the dense prefix for the designated space.
1066 static HeapWord* compute_dense_prefix(const SpaceId id,
1067 bool maximum_compaction);
1069 // Return true if dead space crosses onto the specified Region; bit must be
1070 // the bit index corresponding to the first word of the Region.
1071 static inline bool dead_space_crosses_boundary(const RegionData* region,
1072 idx_t bit);
1074 // Summary phase utility routine to fill dead space (if any) at the dense
1075 // prefix boundary. Should only be called if the the dense prefix is
1076 // non-empty.
1077 static void fill_dense_prefix_end(SpaceId id);
1079 // Clear the summary data source_region field for the specified addresses.
1080 static void clear_source_region(HeapWord* beg_addr, HeapWord* end_addr);
1082 #ifndef PRODUCT
1083 // Routines to provoke splitting a young gen space (ParallelOldGCSplitALot).
1085 // Fill the region [start, start + words) with live object(s). Only usable
1086 // for the old and permanent generations.
1087 static void fill_with_live_objects(SpaceId id, HeapWord* const start,
1088 size_t words);
1089 // Include the new objects in the summary data.
1090 static void summarize_new_objects(SpaceId id, HeapWord* start);
1092 // Add live objects to a survivor space since it's rare that both survivors
1093 // are non-empty.
1094 static void provoke_split_fill_survivor(SpaceId id);
1096 // Add live objects and/or choose the dense prefix to provoke splitting.
1097 static void provoke_split(bool & maximum_compaction);
1098 #endif
1100 static void summarize_spaces_quick();
1101 static void summarize_space(SpaceId id, bool maximum_compaction);
1102 static void summary_phase(ParCompactionManager* cm, bool maximum_compaction);
1104 // Adjust addresses in roots. Does not adjust addresses in heap.
1105 static void adjust_roots();
1107 DEBUG_ONLY(static void write_block_fill_histogram(outputStream* const out);)
1109 // Move objects to new locations.
1110 static void compact_perm(ParCompactionManager* cm);
1111 static void compact();
1113 // Add available regions to the stack and draining tasks to the task queue.
1114 static void enqueue_region_draining_tasks(GCTaskQueue* q,
1115 uint parallel_gc_threads);
1117 // Add dense prefix update tasks to the task queue.
1118 static void enqueue_dense_prefix_tasks(GCTaskQueue* q,
1119 uint parallel_gc_threads);
1121 // Add region stealing tasks to the task queue.
1122 static void enqueue_region_stealing_tasks(
1123 GCTaskQueue* q,
1124 ParallelTaskTerminator* terminator_ptr,
1125 uint parallel_gc_threads);
1127 // If objects are left in eden after a collection, try to move the boundary
1128 // and absorb them into the old gen. Returns true if eden was emptied.
1129 static bool absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
1130 PSYoungGen* young_gen,
1131 PSOldGen* old_gen);
1133 // Reset time since last full gc
1134 static void reset_millis_since_last_gc();
1136 public:
1137 class MarkAndPushClosure: public OopClosure {
1138 private:
1139 ParCompactionManager* _compaction_manager;
1140 public:
1141 MarkAndPushClosure(ParCompactionManager* cm) : _compaction_manager(cm) { }
1142 virtual void do_oop(oop* p);
1143 virtual void do_oop(narrowOop* p);
1144 };
1146 // The one and only place to start following the classes.
1147 // Should only be applied to the ClassLoaderData klasses list.
1148 class FollowKlassClosure : public KlassClosure {
1149 private:
1150 MarkAndPushClosure* _mark_and_push_closure;
1151 public:
1152 FollowKlassClosure(MarkAndPushClosure* mark_and_push_closure) :
1153 _mark_and_push_closure(mark_and_push_closure) { }
1154 void do_klass(Klass* klass);
1155 };
1157 PSParallelCompact();
1159 // Convenient accessor for Universe::heap().
1160 static ParallelScavengeHeap* gc_heap() {
1161 return (ParallelScavengeHeap*)Universe::heap();
1162 }
1164 static void invoke(bool maximum_heap_compaction);
1165 static bool invoke_no_policy(bool maximum_heap_compaction);
1167 static void post_initialize();
1168 // Perform initialization for PSParallelCompact that requires
1169 // allocations. This should be called during the VM initialization
1170 // at a pointer where it would be appropriate to return a JNI_ENOMEM
1171 // in the event of a failure.
1172 static bool initialize();
1174 // Closure accessors
1175 static OopClosure* adjust_pointer_closure() { return (OopClosure*)&_adjust_pointer_closure; }
1176 static KlassClosure* adjust_klass_closure() { return (KlassClosure*)&_adjust_klass_closure; }
1177 static BoolObjectClosure* is_alive_closure() { return (BoolObjectClosure*)&_is_alive_closure; }
1179 // Public accessors
1180 static elapsedTimer* accumulated_time() { return &_accumulated_time; }
1181 static unsigned int total_invocations() { return _total_invocations; }
1182 static CollectorCounters* counters() { return _counters; }
1184 // Used to add tasks
1185 static GCTaskManager* const gc_task_manager();
1186 static Klass* updated_int_array_klass_obj() {
1187 return _updated_int_array_klass_obj;
1188 }
1190 // Marking support
1191 static inline bool mark_obj(oop obj);
1192 static inline bool is_marked(oop obj);
1193 // Check mark and maybe push on marking stack
1194 template <class T> static inline void mark_and_push(ParCompactionManager* cm,
1195 T* p);
1196 template <class T> static inline void adjust_pointer(T* p);
1198 static void follow_klass(ParCompactionManager* cm, Klass* klass);
1199 static void adjust_klass(ParCompactionManager* cm, Klass* klass);
1201 static void follow_class_loader(ParCompactionManager* cm,
1202 ClassLoaderData* klass);
1203 static void adjust_class_loader(ParCompactionManager* cm,
1204 ClassLoaderData* klass);
1206 // Compaction support.
1207 // Return true if p is in the range [beg_addr, end_addr).
1208 static inline bool is_in(HeapWord* p, HeapWord* beg_addr, HeapWord* end_addr);
1209 static inline bool is_in(oop* p, HeapWord* beg_addr, HeapWord* end_addr);
1211 // Convenience wrappers for per-space data kept in _space_info.
1212 static inline MutableSpace* space(SpaceId space_id);
1213 static inline HeapWord* new_top(SpaceId space_id);
1214 static inline HeapWord* dense_prefix(SpaceId space_id);
1215 static inline ObjectStartArray* start_array(SpaceId space_id);
1217 // Move and update the live objects in the specified space.
1218 static void move_and_update(ParCompactionManager* cm, SpaceId space_id);
1220 // Process the end of the given region range in the dense prefix.
1221 // This includes saving any object not updated.
1222 static void dense_prefix_regions_epilogue(ParCompactionManager* cm,
1223 size_t region_start_index,
1224 size_t region_end_index,
1225 idx_t exiting_object_offset,
1226 idx_t region_offset_start,
1227 idx_t region_offset_end);
1229 // Update a region in the dense prefix. For each live object
1230 // in the region, update it's interior references. For each
1231 // dead object, fill it with deadwood. Dead space at the end
1232 // of a region range will be filled to the start of the next
1233 // live object regardless of the region_index_end. None of the
1234 // objects in the dense prefix move and dead space is dead
1235 // (holds only dead objects that don't need any processing), so
1236 // dead space can be filled in any order.
1237 static void update_and_deadwood_in_dense_prefix(ParCompactionManager* cm,
1238 SpaceId space_id,
1239 size_t region_index_start,
1240 size_t region_index_end);
1242 // Return the address of the count + 1st live word in the range [beg, end).
1243 static HeapWord* skip_live_words(HeapWord* beg, HeapWord* end, size_t count);
1245 // Return the address of the word to be copied to dest_addr, which must be
1246 // aligned to a region boundary.
1247 static HeapWord* first_src_addr(HeapWord* const dest_addr,
1248 SpaceId src_space_id,
1249 size_t src_region_idx);
1251 // Determine the next source region, set closure.source() to the start of the
1252 // new region return the region index. Parameter end_addr is the address one
1253 // beyond the end of source range just processed. If necessary, switch to a
1254 // new source space and set src_space_id (in-out parameter) and src_space_top
1255 // (out parameter) accordingly.
1256 static size_t next_src_region(MoveAndUpdateClosure& closure,
1257 SpaceId& src_space_id,
1258 HeapWord*& src_space_top,
1259 HeapWord* end_addr);
1261 // Decrement the destination count for each non-empty source region in the
1262 // range [beg_region, region(region_align_up(end_addr))). If the destination
1263 // count for a region goes to 0 and it needs to be filled, enqueue it.
1264 static void decrement_destination_counts(ParCompactionManager* cm,
1265 SpaceId src_space_id,
1266 size_t beg_region,
1267 HeapWord* end_addr);
1269 // Fill a region, copying objects from one or more source regions.
1270 static void fill_region(ParCompactionManager* cm, size_t region_idx);
1271 static void fill_and_update_region(ParCompactionManager* cm, size_t region) {
1272 fill_region(cm, region);
1273 }
1275 // Fill in the block table for the specified region.
1276 static void fill_blocks(size_t region_idx);
1278 // Update the deferred objects in the space.
1279 static void update_deferred_objects(ParCompactionManager* cm, SpaceId id);
1281 static ParMarkBitMap* mark_bitmap() { return &_mark_bitmap; }
1282 static ParallelCompactData& summary_data() { return _summary_data; }
1284 // Reference Processing
1285 static ReferenceProcessor* const ref_processor() { return _ref_processor; }
1287 // Return the SpaceId for the given address.
1288 static SpaceId space_id(HeapWord* addr);
1290 // Time since last full gc (in milliseconds).
1291 static jlong millis_since_last_gc();
1293 static void print_on_error(outputStream* st);
1295 #ifndef PRODUCT
1296 // Debugging support.
1297 static const char* space_names[last_space_id];
1298 static void print_region_ranges();
1299 static void print_dense_prefix_stats(const char* const algorithm,
1300 const SpaceId id,
1301 const bool maximum_compaction,
1302 HeapWord* const addr);
1303 static void summary_phase_msg(SpaceId dst_space_id,
1304 HeapWord* dst_beg, HeapWord* dst_end,
1305 SpaceId src_space_id,
1306 HeapWord* src_beg, HeapWord* src_end);
1307 #endif // #ifndef PRODUCT
1309 #ifdef ASSERT
1310 // Sanity check the new location of a word in the heap.
1311 static inline void check_new_location(HeapWord* old_addr, HeapWord* new_addr);
1312 // Verify that all the regions have been emptied.
1313 static void verify_complete(SpaceId space_id);
1314 #endif // #ifdef ASSERT
1315 };
1317 inline bool PSParallelCompact::mark_obj(oop obj) {
1318 const int obj_size = obj->size();
1319 if (mark_bitmap()->mark_obj(obj, obj_size)) {
1320 _summary_data.add_obj(obj, obj_size);
1321 return true;
1322 } else {
1323 return false;
1324 }
1325 }
1327 inline bool PSParallelCompact::is_marked(oop obj) {
1328 return mark_bitmap()->is_marked(obj);
1329 }
1331 template <class T>
1332 inline void PSParallelCompact::follow_root(ParCompactionManager* cm, T* p) {
1333 assert(!Universe::heap()->is_in_reserved(p),
1334 "roots shouldn't be things within the heap");
1336 T heap_oop = oopDesc::load_heap_oop(p);
1337 if (!oopDesc::is_null(heap_oop)) {
1338 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
1339 if (mark_bitmap()->is_unmarked(obj)) {
1340 if (mark_obj(obj)) {
1341 obj->follow_contents(cm);
1342 }
1343 }
1344 }
1345 cm->follow_marking_stacks();
1346 }
1348 template <class T>
1349 inline void PSParallelCompact::mark_and_push(ParCompactionManager* cm, T* p) {
1350 T heap_oop = oopDesc::load_heap_oop(p);
1351 if (!oopDesc::is_null(heap_oop)) {
1352 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
1353 if (mark_bitmap()->is_unmarked(obj) && mark_obj(obj)) {
1354 cm->push(obj);
1355 }
1356 }
1357 }
1359 template <class T>
1360 inline void PSParallelCompact::adjust_pointer(T* p) {
1361 T heap_oop = oopDesc::load_heap_oop(p);
1362 if (!oopDesc::is_null(heap_oop)) {
1363 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
1364 oop new_obj = (oop)summary_data().calc_new_pointer(obj);
1365 assert(new_obj != NULL, // is forwarding ptr?
1366 "should be forwarded");
1367 // Just always do the update unconditionally?
1368 if (new_obj != NULL) {
1369 assert(Universe::heap()->is_in_reserved(new_obj),
1370 "should be in object space");
1371 oopDesc::encode_store_heap_oop_not_null(p, new_obj);
1372 }
1373 }
1374 }
1376 template <class T>
1377 inline void PSParallelCompact::KeepAliveClosure::do_oop_work(T* p) {
1378 mark_and_push(_compaction_manager, p);
1379 }
1381 inline bool PSParallelCompact::print_phases() {
1382 return _print_phases;
1383 }
1385 inline double PSParallelCompact::normal_distribution(double density) {
1386 assert(_dwl_initialized, "uninitialized");
1387 const double squared_term = (density - _dwl_mean) / _dwl_std_dev;
1388 return _dwl_first_term * exp(-0.5 * squared_term * squared_term);
1389 }
1391 inline bool
1392 PSParallelCompact::dead_space_crosses_boundary(const RegionData* region,
1393 idx_t bit)
1394 {
1395 assert(bit > 0, "cannot call this for the first bit/region");
1396 assert(_summary_data.region_to_addr(region) == _mark_bitmap.bit_to_addr(bit),
1397 "sanity check");
1399 // Dead space crosses the boundary if (1) a partial object does not extend
1400 // onto the region, (2) an object does not start at the beginning of the
1401 // region, and (3) an object does not end at the end of the prior region.
1402 return region->partial_obj_size() == 0 &&
1403 !_mark_bitmap.is_obj_beg(bit) &&
1404 !_mark_bitmap.is_obj_end(bit - 1);
1405 }
1407 inline bool
1408 PSParallelCompact::is_in(HeapWord* p, HeapWord* beg_addr, HeapWord* end_addr) {
1409 return p >= beg_addr && p < end_addr;
1410 }
1412 inline bool
1413 PSParallelCompact::is_in(oop* p, HeapWord* beg_addr, HeapWord* end_addr) {
1414 return is_in((HeapWord*)p, beg_addr, end_addr);
1415 }
1417 inline MutableSpace* PSParallelCompact::space(SpaceId id) {
1418 assert(id < last_space_id, "id out of range");
1419 return _space_info[id].space();
1420 }
1422 inline HeapWord* PSParallelCompact::new_top(SpaceId id) {
1423 assert(id < last_space_id, "id out of range");
1424 return _space_info[id].new_top();
1425 }
1427 inline HeapWord* PSParallelCompact::dense_prefix(SpaceId id) {
1428 assert(id < last_space_id, "id out of range");
1429 return _space_info[id].dense_prefix();
1430 }
1432 inline ObjectStartArray* PSParallelCompact::start_array(SpaceId id) {
1433 assert(id < last_space_id, "id out of range");
1434 return _space_info[id].start_array();
1435 }
1437 #ifdef ASSERT
1438 inline void
1439 PSParallelCompact::check_new_location(HeapWord* old_addr, HeapWord* new_addr)
1440 {
1441 assert(old_addr >= new_addr || space_id(old_addr) != space_id(new_addr),
1442 "must move left or to a different space");
1443 assert(is_object_aligned((intptr_t)old_addr) && is_object_aligned((intptr_t)new_addr),
1444 "checking alignment");
1445 }
1446 #endif // ASSERT
1448 class MoveAndUpdateClosure: public ParMarkBitMapClosure {
1449 public:
1450 inline MoveAndUpdateClosure(ParMarkBitMap* bitmap, ParCompactionManager* cm,
1451 ObjectStartArray* start_array,
1452 HeapWord* destination, size_t words);
1454 // Accessors.
1455 HeapWord* destination() const { return _destination; }
1457 // If the object will fit (size <= words_remaining()), copy it to the current
1458 // destination, update the interior oops and the start array and return either
1459 // full (if the closure is full) or incomplete. If the object will not fit,
1460 // return would_overflow.
1461 virtual IterationStatus do_addr(HeapWord* addr, size_t size);
1463 // Copy enough words to fill this closure, starting at source(). Interior
1464 // oops and the start array are not updated. Return full.
1465 IterationStatus copy_until_full();
1467 // Copy enough words to fill this closure or to the end of an object,
1468 // whichever is smaller, starting at source(). Interior oops and the start
1469 // array are not updated.
1470 void copy_partial_obj();
1472 protected:
1473 // Update variables to indicate that word_count words were processed.
1474 inline void update_state(size_t word_count);
1476 protected:
1477 ObjectStartArray* const _start_array;
1478 HeapWord* _destination; // Next addr to be written.
1479 };
1481 inline
1482 MoveAndUpdateClosure::MoveAndUpdateClosure(ParMarkBitMap* bitmap,
1483 ParCompactionManager* cm,
1484 ObjectStartArray* start_array,
1485 HeapWord* destination,
1486 size_t words) :
1487 ParMarkBitMapClosure(bitmap, cm, words), _start_array(start_array)
1488 {
1489 _destination = destination;
1490 }
1492 inline void MoveAndUpdateClosure::update_state(size_t words)
1493 {
1494 decrement_words_remaining(words);
1495 _source += words;
1496 _destination += words;
1497 }
1499 class UpdateOnlyClosure: public ParMarkBitMapClosure {
1500 private:
1501 const PSParallelCompact::SpaceId _space_id;
1502 ObjectStartArray* const _start_array;
1504 public:
1505 UpdateOnlyClosure(ParMarkBitMap* mbm,
1506 ParCompactionManager* cm,
1507 PSParallelCompact::SpaceId space_id);
1509 // Update the object.
1510 virtual IterationStatus do_addr(HeapWord* addr, size_t words);
1512 inline void do_addr(HeapWord* addr);
1513 };
1515 inline void UpdateOnlyClosure::do_addr(HeapWord* addr)
1516 {
1517 _start_array->allocate_block(addr);
1518 oop(addr)->update_contents(compaction_manager());
1519 }
1521 class FillClosure: public ParMarkBitMapClosure
1522 {
1523 public:
1524 FillClosure(ParCompactionManager* cm, PSParallelCompact::SpaceId space_id) :
1525 ParMarkBitMapClosure(PSParallelCompact::mark_bitmap(), cm),
1526 _start_array(PSParallelCompact::start_array(space_id))
1527 {
1528 assert(space_id == PSParallelCompact::old_space_id,
1529 "cannot use FillClosure in the young gen");
1530 }
1532 virtual IterationStatus do_addr(HeapWord* addr, size_t size) {
1533 CollectedHeap::fill_with_objects(addr, size);
1534 HeapWord* const end = addr + size;
1535 do {
1536 _start_array->allocate_block(addr);
1537 addr += oop(addr)->size();
1538 } while (addr < end);
1539 return ParMarkBitMap::incomplete;
1540 }
1542 private:
1543 ObjectStartArray* const _start_array;
1544 };
1546 #endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPARALLELCOMPACT_HPP