Wed, 24 Apr 2013 20:13:37 +0200
8013132: Add a flag to turn off the output of the verbose verification code
Reviewed-by: johnc, brutisso
1 /*
2 * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPARALLELCOMPACT_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPARALLELCOMPACT_HPP
28 #include "gc_implementation/parallelScavenge/objectStartArray.hpp"
29 #include "gc_implementation/parallelScavenge/parMarkBitMap.hpp"
30 #include "gc_implementation/parallelScavenge/psCompactionManager.hpp"
31 #include "gc_implementation/shared/collectorCounters.hpp"
32 #include "gc_implementation/shared/markSweep.hpp"
33 #include "gc_implementation/shared/mutableSpace.hpp"
34 #include "memory/sharedHeap.hpp"
35 #include "oops/oop.hpp"
37 class ParallelScavengeHeap;
38 class PSAdaptiveSizePolicy;
39 class PSYoungGen;
40 class PSOldGen;
41 class ParCompactionManager;
42 class ParallelTaskTerminator;
43 class PSParallelCompact;
44 class GCTaskManager;
45 class GCTaskQueue;
46 class PreGCValues;
47 class MoveAndUpdateClosure;
48 class RefProcTaskExecutor;
50 // The SplitInfo class holds the information needed to 'split' a source region
51 // so that the live data can be copied to two destination *spaces*. Normally,
52 // all the live data in a region is copied to a single destination space (e.g.,
53 // everything live in a region in eden is copied entirely into the old gen).
54 // However, when the heap is nearly full, all the live data in eden may not fit
55 // into the old gen. Copying only some of the regions from eden to old gen
56 // requires finding a region that does not contain a partial object (i.e., no
57 // live object crosses the region boundary) somewhere near the last object that
58 // does fit into the old gen. Since it's not always possible to find such a
59 // region, splitting is necessary for predictable behavior.
60 //
61 // A region is always split at the end of the partial object. This avoids
62 // additional tests when calculating the new location of a pointer, which is a
63 // very hot code path. The partial object and everything to its left will be
64 // copied to another space (call it dest_space_1). The live data to the right
65 // of the partial object will be copied either within the space itself, or to a
66 // different destination space (distinct from dest_space_1).
67 //
68 // Split points are identified during the summary phase, when region
69 // destinations are computed: data about the split, including the
70 // partial_object_size, is recorded in a SplitInfo record and the
71 // partial_object_size field in the summary data is set to zero. The zeroing is
72 // possible (and necessary) since the partial object will move to a different
73 // destination space than anything to its right, thus the partial object should
74 // not affect the locations of any objects to its right.
75 //
76 // The recorded data is used during the compaction phase, but only rarely: when
77 // the partial object on the split region will be copied across a destination
78 // region boundary. This test is made once each time a region is filled, and is
79 // a simple address comparison, so the overhead is negligible (see
80 // PSParallelCompact::first_src_addr()).
81 //
82 // Notes:
83 //
84 // Only regions with partial objects are split; a region without a partial
85 // object does not need any extra bookkeeping.
86 //
87 // At most one region is split per space, so the amount of data required is
88 // constant.
89 //
90 // A region is split only when the destination space would overflow. Once that
91 // happens, the destination space is abandoned and no other data (even from
92 // other source spaces) is targeted to that destination space. Abandoning the
93 // destination space may leave a somewhat large unused area at the end, if a
94 // large object caused the overflow.
95 //
96 // Future work:
97 //
98 // More bookkeeping would be required to continue to use the destination space.
99 // The most general solution would allow data from regions in two different
100 // source spaces to be "joined" in a single destination region. At the very
101 // least, additional code would be required in next_src_region() to detect the
102 // join and skip to an out-of-order source region. If the join region was also
103 // the last destination region to which a split region was copied (the most
104 // likely case), then additional work would be needed to get fill_region() to
105 // stop iteration and switch to a new source region at the right point. Basic
106 // idea would be to use a fake value for the top of the source space. It is
107 // doable, if a bit tricky.
108 //
109 // A simpler (but less general) solution would fill the remainder of the
110 // destination region with a dummy object and continue filling the next
111 // destination region.
113 class SplitInfo
114 {
115 public:
116 // Return true if this split info is valid (i.e., if a split has been
117 // recorded). The very first region cannot have a partial object and thus is
118 // never split, so 0 is the 'invalid' value.
119 bool is_valid() const { return _src_region_idx > 0; }
121 // Return true if this split holds data for the specified source region.
122 inline bool is_split(size_t source_region) const;
124 // The index of the split region, the size of the partial object on that
125 // region and the destination of the partial object.
126 size_t src_region_idx() const { return _src_region_idx; }
127 size_t partial_obj_size() const { return _partial_obj_size; }
128 HeapWord* destination() const { return _destination; }
130 // The destination count of the partial object referenced by this split
131 // (either 1 or 2). This must be added to the destination count of the
132 // remainder of the source region.
133 unsigned int destination_count() const { return _destination_count; }
135 // If a word within the partial object will be written to the first word of a
136 // destination region, this is the address of the destination region;
137 // otherwise this is NULL.
138 HeapWord* dest_region_addr() const { return _dest_region_addr; }
140 // If a word within the partial object will be written to the first word of a
141 // destination region, this is the address of that word within the partial
142 // object; otherwise this is NULL.
143 HeapWord* first_src_addr() const { return _first_src_addr; }
145 // Record the data necessary to split the region src_region_idx.
146 void record(size_t src_region_idx, size_t partial_obj_size,
147 HeapWord* destination);
149 void clear();
151 DEBUG_ONLY(void verify_clear();)
153 private:
154 size_t _src_region_idx;
155 size_t _partial_obj_size;
156 HeapWord* _destination;
157 unsigned int _destination_count;
158 HeapWord* _dest_region_addr;
159 HeapWord* _first_src_addr;
160 };
162 inline bool SplitInfo::is_split(size_t region_idx) const
163 {
164 return _src_region_idx == region_idx && is_valid();
165 }
167 class SpaceInfo
168 {
169 public:
170 MutableSpace* space() const { return _space; }
172 // Where the free space will start after the collection. Valid only after the
173 // summary phase completes.
174 HeapWord* new_top() const { return _new_top; }
176 // Allows new_top to be set.
177 HeapWord** new_top_addr() { return &_new_top; }
179 // Where the smallest allowable dense prefix ends (used only for perm gen).
180 HeapWord* min_dense_prefix() const { return _min_dense_prefix; }
182 // Where the dense prefix ends, or the compacted region begins.
183 HeapWord* dense_prefix() const { return _dense_prefix; }
185 // The start array for the (generation containing the) space, or NULL if there
186 // is no start array.
187 ObjectStartArray* start_array() const { return _start_array; }
189 SplitInfo& split_info() { return _split_info; }
191 void set_space(MutableSpace* s) { _space = s; }
192 void set_new_top(HeapWord* addr) { _new_top = addr; }
193 void set_min_dense_prefix(HeapWord* addr) { _min_dense_prefix = addr; }
194 void set_dense_prefix(HeapWord* addr) { _dense_prefix = addr; }
195 void set_start_array(ObjectStartArray* s) { _start_array = s; }
197 void publish_new_top() const { _space->set_top(_new_top); }
199 private:
200 MutableSpace* _space;
201 HeapWord* _new_top;
202 HeapWord* _min_dense_prefix;
203 HeapWord* _dense_prefix;
204 ObjectStartArray* _start_array;
205 SplitInfo _split_info;
206 };
208 class ParallelCompactData
209 {
210 public:
211 // Sizes are in HeapWords, unless indicated otherwise.
212 static const size_t Log2RegionSize;
213 static const size_t RegionSize;
214 static const size_t RegionSizeBytes;
216 // Mask for the bits in a size_t to get an offset within a region.
217 static const size_t RegionSizeOffsetMask;
218 // Mask for the bits in a pointer to get an offset within a region.
219 static const size_t RegionAddrOffsetMask;
220 // Mask for the bits in a pointer to get the address of the start of a region.
221 static const size_t RegionAddrMask;
223 class RegionData
224 {
225 public:
226 // Destination address of the region.
227 HeapWord* destination() const { return _destination; }
229 // The first region containing data destined for this region.
230 size_t source_region() const { return _source_region; }
232 // The object (if any) starting in this region and ending in a different
233 // region that could not be updated during the main (parallel) compaction
234 // phase. This is different from _partial_obj_addr, which is an object that
235 // extends onto a source region. However, the two uses do not overlap in
236 // time, so the same field is used to save space.
237 HeapWord* deferred_obj_addr() const { return _partial_obj_addr; }
239 // The starting address of the partial object extending onto the region.
240 HeapWord* partial_obj_addr() const { return _partial_obj_addr; }
242 // Size of the partial object extending onto the region (words).
243 size_t partial_obj_size() const { return _partial_obj_size; }
245 // Size of live data that lies within this region due to objects that start
246 // in this region (words). This does not include the partial object
247 // extending onto the region (if any), or the part of an object that extends
248 // onto the next region (if any).
249 size_t live_obj_size() const { return _dc_and_los & los_mask; }
251 // Total live data that lies within the region (words).
252 size_t data_size() const { return partial_obj_size() + live_obj_size(); }
254 // The destination_count is the number of other regions to which data from
255 // this region will be copied. At the end of the summary phase, the valid
256 // values of destination_count are
257 //
258 // 0 - data from the region will be compacted completely into itself, or the
259 // region is empty. The region can be claimed and then filled.
260 // 1 - data from the region will be compacted into 1 other region; some
261 // data from the region may also be compacted into the region itself.
262 // 2 - data from the region will be copied to 2 other regions.
263 //
264 // During compaction as regions are emptied, the destination_count is
265 // decremented (atomically) and when it reaches 0, it can be claimed and
266 // then filled.
267 //
268 // A region is claimed for processing by atomically changing the
269 // destination_count to the claimed value (dc_claimed). After a region has
270 // been filled, the destination_count should be set to the completed value
271 // (dc_completed).
272 inline uint destination_count() const;
273 inline uint destination_count_raw() const;
275 // The location of the java heap data that corresponds to this region.
276 inline HeapWord* data_location() const;
278 // The highest address referenced by objects in this region.
279 inline HeapWord* highest_ref() const;
281 // Whether this region is available to be claimed, has been claimed, or has
282 // been completed.
283 //
284 // Minor subtlety: claimed() returns true if the region is marked
285 // completed(), which is desirable since a region must be claimed before it
286 // can be completed.
287 bool available() const { return _dc_and_los < dc_one; }
288 bool claimed() const { return _dc_and_los >= dc_claimed; }
289 bool completed() const { return _dc_and_los >= dc_completed; }
291 // These are not atomic.
292 void set_destination(HeapWord* addr) { _destination = addr; }
293 void set_source_region(size_t region) { _source_region = region; }
294 void set_deferred_obj_addr(HeapWord* addr) { _partial_obj_addr = addr; }
295 void set_partial_obj_addr(HeapWord* addr) { _partial_obj_addr = addr; }
296 void set_partial_obj_size(size_t words) {
297 _partial_obj_size = (region_sz_t) words;
298 }
300 inline void set_destination_count(uint count);
301 inline void set_live_obj_size(size_t words);
302 inline void set_data_location(HeapWord* addr);
303 inline void set_completed();
304 inline bool claim_unsafe();
306 // These are atomic.
307 inline void add_live_obj(size_t words);
308 inline void set_highest_ref(HeapWord* addr);
309 inline void decrement_destination_count();
310 inline bool claim();
312 private:
313 // The type used to represent object sizes within a region.
314 typedef uint region_sz_t;
316 // Constants for manipulating the _dc_and_los field, which holds both the
317 // destination count and live obj size. The live obj size lives at the
318 // least significant end so no masking is necessary when adding.
319 static const region_sz_t dc_shift; // Shift amount.
320 static const region_sz_t dc_mask; // Mask for destination count.
321 static const region_sz_t dc_one; // 1, shifted appropriately.
322 static const region_sz_t dc_claimed; // Region has been claimed.
323 static const region_sz_t dc_completed; // Region has been completed.
324 static const region_sz_t los_mask; // Mask for live obj size.
326 HeapWord* _destination;
327 size_t _source_region;
328 HeapWord* _partial_obj_addr;
329 region_sz_t _partial_obj_size;
330 region_sz_t volatile _dc_and_los;
331 #ifdef ASSERT
332 // These enable optimizations that are only partially implemented. Use
333 // debug builds to prevent the code fragments from breaking.
334 HeapWord* _data_location;
335 HeapWord* _highest_ref;
336 #endif // #ifdef ASSERT
338 #ifdef ASSERT
339 public:
340 uint _pushed; // 0 until region is pushed onto a worker's stack
341 private:
342 #endif
343 };
345 public:
346 ParallelCompactData();
347 bool initialize(MemRegion covered_region);
349 size_t region_count() const { return _region_count; }
351 // Convert region indices to/from RegionData pointers.
352 inline RegionData* region(size_t region_idx) const;
353 inline size_t region(const RegionData* const region_ptr) const;
355 // Returns true if the given address is contained within the region
356 bool region_contains(size_t region_index, HeapWord* addr);
358 void add_obj(HeapWord* addr, size_t len);
359 void add_obj(oop p, size_t len) { add_obj((HeapWord*)p, len); }
361 // Fill in the regions covering [beg, end) so that no data moves; i.e., the
362 // destination of region n is simply the start of region n. The argument beg
363 // must be region-aligned; end need not be.
364 void summarize_dense_prefix(HeapWord* beg, HeapWord* end);
366 HeapWord* summarize_split_space(size_t src_region, SplitInfo& split_info,
367 HeapWord* destination, HeapWord* target_end,
368 HeapWord** target_next);
369 bool summarize(SplitInfo& split_info,
370 HeapWord* source_beg, HeapWord* source_end,
371 HeapWord** source_next,
372 HeapWord* target_beg, HeapWord* target_end,
373 HeapWord** target_next);
375 void clear();
376 void clear_range(size_t beg_region, size_t end_region);
377 void clear_range(HeapWord* beg, HeapWord* end) {
378 clear_range(addr_to_region_idx(beg), addr_to_region_idx(end));
379 }
381 // Return the number of words between addr and the start of the region
382 // containing addr.
383 inline size_t region_offset(const HeapWord* addr) const;
385 // Convert addresses to/from a region index or region pointer.
386 inline size_t addr_to_region_idx(const HeapWord* addr) const;
387 inline RegionData* addr_to_region_ptr(const HeapWord* addr) const;
388 inline HeapWord* region_to_addr(size_t region) const;
389 inline HeapWord* region_to_addr(size_t region, size_t offset) const;
390 inline HeapWord* region_to_addr(const RegionData* region) const;
392 inline HeapWord* region_align_down(HeapWord* addr) const;
393 inline HeapWord* region_align_up(HeapWord* addr) const;
394 inline bool is_region_aligned(HeapWord* addr) const;
396 // Return the address one past the end of the partial object.
397 HeapWord* partial_obj_end(size_t region_idx) const;
399 // Return the new location of the object p after the
400 // the compaction.
401 HeapWord* calc_new_pointer(HeapWord* addr);
403 HeapWord* calc_new_pointer(oop p) {
404 return calc_new_pointer((HeapWord*) p);
405 }
407 #ifdef ASSERT
408 void verify_clear(const PSVirtualSpace* vspace);
409 void verify_clear();
410 #endif // #ifdef ASSERT
412 private:
413 bool initialize_region_data(size_t region_size);
414 PSVirtualSpace* create_vspace(size_t count, size_t element_size);
416 private:
417 HeapWord* _region_start;
418 #ifdef ASSERT
419 HeapWord* _region_end;
420 #endif // #ifdef ASSERT
422 PSVirtualSpace* _region_vspace;
423 RegionData* _region_data;
424 size_t _region_count;
425 };
427 inline uint
428 ParallelCompactData::RegionData::destination_count_raw() const
429 {
430 return _dc_and_los & dc_mask;
431 }
433 inline uint
434 ParallelCompactData::RegionData::destination_count() const
435 {
436 return destination_count_raw() >> dc_shift;
437 }
439 inline void
440 ParallelCompactData::RegionData::set_destination_count(uint count)
441 {
442 assert(count <= (dc_completed >> dc_shift), "count too large");
443 const region_sz_t live_sz = (region_sz_t) live_obj_size();
444 _dc_and_los = (count << dc_shift) | live_sz;
445 }
447 inline void ParallelCompactData::RegionData::set_live_obj_size(size_t words)
448 {
449 assert(words <= los_mask, "would overflow");
450 _dc_and_los = destination_count_raw() | (region_sz_t)words;
451 }
453 inline void ParallelCompactData::RegionData::decrement_destination_count()
454 {
455 assert(_dc_and_los < dc_claimed, "already claimed");
456 assert(_dc_and_los >= dc_one, "count would go negative");
457 Atomic::add((int)dc_mask, (volatile int*)&_dc_and_los);
458 }
460 inline HeapWord* ParallelCompactData::RegionData::data_location() const
461 {
462 DEBUG_ONLY(return _data_location;)
463 NOT_DEBUG(return NULL;)
464 }
466 inline HeapWord* ParallelCompactData::RegionData::highest_ref() const
467 {
468 DEBUG_ONLY(return _highest_ref;)
469 NOT_DEBUG(return NULL;)
470 }
472 inline void ParallelCompactData::RegionData::set_data_location(HeapWord* addr)
473 {
474 DEBUG_ONLY(_data_location = addr;)
475 }
477 inline void ParallelCompactData::RegionData::set_completed()
478 {
479 assert(claimed(), "must be claimed first");
480 _dc_and_los = dc_completed | (region_sz_t) live_obj_size();
481 }
483 // MT-unsafe claiming of a region. Should only be used during single threaded
484 // execution.
485 inline bool ParallelCompactData::RegionData::claim_unsafe()
486 {
487 if (available()) {
488 _dc_and_los |= dc_claimed;
489 return true;
490 }
491 return false;
492 }
494 inline void ParallelCompactData::RegionData::add_live_obj(size_t words)
495 {
496 assert(words <= (size_t)los_mask - live_obj_size(), "overflow");
497 Atomic::add((int) words, (volatile int*) &_dc_and_los);
498 }
500 inline void ParallelCompactData::RegionData::set_highest_ref(HeapWord* addr)
501 {
502 #ifdef ASSERT
503 HeapWord* tmp = _highest_ref;
504 while (addr > tmp) {
505 tmp = (HeapWord*)Atomic::cmpxchg_ptr(addr, &_highest_ref, tmp);
506 }
507 #endif // #ifdef ASSERT
508 }
510 inline bool ParallelCompactData::RegionData::claim()
511 {
512 const int los = (int) live_obj_size();
513 const int old = Atomic::cmpxchg(dc_claimed | los,
514 (volatile int*) &_dc_and_los, los);
515 return old == los;
516 }
518 inline ParallelCompactData::RegionData*
519 ParallelCompactData::region(size_t region_idx) const
520 {
521 assert(region_idx <= region_count(), "bad arg");
522 return _region_data + region_idx;
523 }
525 inline size_t
526 ParallelCompactData::region(const RegionData* const region_ptr) const
527 {
528 assert(region_ptr >= _region_data, "bad arg");
529 assert(region_ptr <= _region_data + region_count(), "bad arg");
530 return pointer_delta(region_ptr, _region_data, sizeof(RegionData));
531 }
533 inline size_t
534 ParallelCompactData::region_offset(const HeapWord* addr) const
535 {
536 assert(addr >= _region_start, "bad addr");
537 assert(addr <= _region_end, "bad addr");
538 return (size_t(addr) & RegionAddrOffsetMask) >> LogHeapWordSize;
539 }
541 inline size_t
542 ParallelCompactData::addr_to_region_idx(const HeapWord* addr) const
543 {
544 assert(addr >= _region_start, "bad addr");
545 assert(addr <= _region_end, "bad addr");
546 return pointer_delta(addr, _region_start) >> Log2RegionSize;
547 }
549 inline ParallelCompactData::RegionData*
550 ParallelCompactData::addr_to_region_ptr(const HeapWord* addr) const
551 {
552 return region(addr_to_region_idx(addr));
553 }
555 inline HeapWord*
556 ParallelCompactData::region_to_addr(size_t region) const
557 {
558 assert(region <= _region_count, "region out of range");
559 return _region_start + (region << Log2RegionSize);
560 }
562 inline HeapWord*
563 ParallelCompactData::region_to_addr(const RegionData* region) const
564 {
565 return region_to_addr(pointer_delta(region, _region_data,
566 sizeof(RegionData)));
567 }
569 inline HeapWord*
570 ParallelCompactData::region_to_addr(size_t region, size_t offset) const
571 {
572 assert(region <= _region_count, "region out of range");
573 assert(offset < RegionSize, "offset too big"); // This may be too strict.
574 return region_to_addr(region) + offset;
575 }
577 inline HeapWord*
578 ParallelCompactData::region_align_down(HeapWord* addr) const
579 {
580 assert(addr >= _region_start, "bad addr");
581 assert(addr < _region_end + RegionSize, "bad addr");
582 return (HeapWord*)(size_t(addr) & RegionAddrMask);
583 }
585 inline HeapWord*
586 ParallelCompactData::region_align_up(HeapWord* addr) const
587 {
588 assert(addr >= _region_start, "bad addr");
589 assert(addr <= _region_end, "bad addr");
590 return region_align_down(addr + RegionSizeOffsetMask);
591 }
593 inline bool
594 ParallelCompactData::is_region_aligned(HeapWord* addr) const
595 {
596 return region_offset(addr) == 0;
597 }
599 // Abstract closure for use with ParMarkBitMap::iterate(), which will invoke the
600 // do_addr() method.
601 //
602 // The closure is initialized with the number of heap words to process
603 // (words_remaining()), and becomes 'full' when it reaches 0. The do_addr()
604 // methods in subclasses should update the total as words are processed. Since
605 // only one subclass actually uses this mechanism to terminate iteration, the
606 // default initial value is > 0. The implementation is here and not in the
607 // single subclass that uses it to avoid making is_full() virtual, and thus
608 // adding a virtual call per live object.
610 class ParMarkBitMapClosure: public StackObj {
611 public:
612 typedef ParMarkBitMap::idx_t idx_t;
613 typedef ParMarkBitMap::IterationStatus IterationStatus;
615 public:
616 inline ParMarkBitMapClosure(ParMarkBitMap* mbm, ParCompactionManager* cm,
617 size_t words = max_uintx);
619 inline ParCompactionManager* compaction_manager() const;
620 inline ParMarkBitMap* bitmap() const;
621 inline size_t words_remaining() const;
622 inline bool is_full() const;
623 inline HeapWord* source() const;
625 inline void set_source(HeapWord* addr);
627 virtual IterationStatus do_addr(HeapWord* addr, size_t words) = 0;
629 protected:
630 inline void decrement_words_remaining(size_t words);
632 private:
633 ParMarkBitMap* const _bitmap;
634 ParCompactionManager* const _compaction_manager;
635 DEBUG_ONLY(const size_t _initial_words_remaining;) // Useful in debugger.
636 size_t _words_remaining; // Words left to copy.
638 protected:
639 HeapWord* _source; // Next addr that would be read.
640 };
642 inline
643 ParMarkBitMapClosure::ParMarkBitMapClosure(ParMarkBitMap* bitmap,
644 ParCompactionManager* cm,
645 size_t words):
646 _bitmap(bitmap), _compaction_manager(cm)
647 #ifdef ASSERT
648 , _initial_words_remaining(words)
649 #endif
650 {
651 _words_remaining = words;
652 _source = NULL;
653 }
655 inline ParCompactionManager* ParMarkBitMapClosure::compaction_manager() const {
656 return _compaction_manager;
657 }
659 inline ParMarkBitMap* ParMarkBitMapClosure::bitmap() const {
660 return _bitmap;
661 }
663 inline size_t ParMarkBitMapClosure::words_remaining() const {
664 return _words_remaining;
665 }
667 inline bool ParMarkBitMapClosure::is_full() const {
668 return words_remaining() == 0;
669 }
671 inline HeapWord* ParMarkBitMapClosure::source() const {
672 return _source;
673 }
675 inline void ParMarkBitMapClosure::set_source(HeapWord* addr) {
676 _source = addr;
677 }
679 inline void ParMarkBitMapClosure::decrement_words_remaining(size_t words) {
680 assert(_words_remaining >= words, "processed too many words");
681 _words_remaining -= words;
682 }
684 // The UseParallelOldGC collector is a stop-the-world garbage collector that
685 // does parts of the collection using parallel threads. The collection includes
686 // the tenured generation and the young generation. The permanent generation is
687 // collected at the same time as the other two generations but the permanent
688 // generation is collect by a single GC thread. The permanent generation is
689 // collected serially because of the requirement that during the processing of a
690 // klass AAA, any objects reference by AAA must already have been processed.
691 // This requirement is enforced by a left (lower address) to right (higher
692 // address) sliding compaction.
693 //
694 // There are four phases of the collection.
695 //
696 // - marking phase
697 // - summary phase
698 // - compacting phase
699 // - clean up phase
700 //
701 // Roughly speaking these phases correspond, respectively, to
702 // - mark all the live objects
703 // - calculate the destination of each object at the end of the collection
704 // - move the objects to their destination
705 // - update some references and reinitialize some variables
706 //
707 // These three phases are invoked in PSParallelCompact::invoke_no_policy(). The
708 // marking phase is implemented in PSParallelCompact::marking_phase() and does a
709 // complete marking of the heap. The summary phase is implemented in
710 // PSParallelCompact::summary_phase(). The move and update phase is implemented
711 // in PSParallelCompact::compact().
712 //
713 // A space that is being collected is divided into regions and with each region
714 // is associated an object of type ParallelCompactData. Each region is of a
715 // fixed size and typically will contain more than 1 object and may have parts
716 // of objects at the front and back of the region.
717 //
718 // region -----+---------------------+----------
719 // objects covered [ AAA )[ BBB )[ CCC )[ DDD )
720 //
721 // The marking phase does a complete marking of all live objects in the heap.
722 // The marking also compiles the size of the data for all live objects covered
723 // by the region. This size includes the part of any live object spanning onto
724 // the region (part of AAA if it is live) from the front, all live objects
725 // contained in the region (BBB and/or CCC if they are live), and the part of
726 // any live objects covered by the region that extends off the region (part of
727 // DDD if it is live). The marking phase uses multiple GC threads and marking
728 // is done in a bit array of type ParMarkBitMap. The marking of the bit map is
729 // done atomically as is the accumulation of the size of the live objects
730 // covered by a region.
731 //
732 // The summary phase calculates the total live data to the left of each region
733 // XXX. Based on that total and the bottom of the space, it can calculate the
734 // starting location of the live data in XXX. The summary phase calculates for
735 // each region XXX quantites such as
736 //
737 // - the amount of live data at the beginning of a region from an object
738 // entering the region.
739 // - the location of the first live data on the region
740 // - a count of the number of regions receiving live data from XXX.
741 //
742 // See ParallelCompactData for precise details. The summary phase also
743 // calculates the dense prefix for the compaction. The dense prefix is a
744 // portion at the beginning of the space that is not moved. The objects in the
745 // dense prefix do need to have their object references updated. See method
746 // summarize_dense_prefix().
747 //
748 // The summary phase is done using 1 GC thread.
749 //
750 // The compaction phase moves objects to their new location and updates all
751 // references in the object.
752 //
753 // A current exception is that objects that cross a region boundary are moved
754 // but do not have their references updated. References are not updated because
755 // it cannot easily be determined if the klass pointer KKK for the object AAA
756 // has been updated. KKK likely resides in a region to the left of the region
757 // containing AAA. These AAA's have there references updated at the end in a
758 // clean up phase. See the method PSParallelCompact::update_deferred_objects().
759 // An alternate strategy is being investigated for this deferral of updating.
760 //
761 // Compaction is done on a region basis. A region that is ready to be filled is
762 // put on a ready list and GC threads take region off the list and fill them. A
763 // region is ready to be filled if it empty of live objects. Such a region may
764 // have been initially empty (only contained dead objects) or may have had all
765 // its live objects copied out already. A region that compacts into itself is
766 // also ready for filling. The ready list is initially filled with empty
767 // regions and regions compacting into themselves. There is always at least 1
768 // region that can be put on the ready list. The regions are atomically added
769 // and removed from the ready list.
771 class PSParallelCompact : AllStatic {
772 public:
773 // Convenient access to type names.
774 typedef ParMarkBitMap::idx_t idx_t;
775 typedef ParallelCompactData::RegionData RegionData;
777 typedef enum {
778 old_space_id, eden_space_id,
779 from_space_id, to_space_id, last_space_id
780 } SpaceId;
782 public:
783 // Inline closure decls
784 //
785 class IsAliveClosure: public BoolObjectClosure {
786 public:
787 virtual void do_object(oop p);
788 virtual bool do_object_b(oop p);
789 };
791 class KeepAliveClosure: public OopClosure {
792 private:
793 ParCompactionManager* _compaction_manager;
794 protected:
795 template <class T> inline void do_oop_work(T* p);
796 public:
797 KeepAliveClosure(ParCompactionManager* cm) : _compaction_manager(cm) { }
798 virtual void do_oop(oop* p);
799 virtual void do_oop(narrowOop* p);
800 };
802 class FollowStackClosure: public VoidClosure {
803 private:
804 ParCompactionManager* _compaction_manager;
805 public:
806 FollowStackClosure(ParCompactionManager* cm) : _compaction_manager(cm) { }
807 virtual void do_void();
808 };
810 class AdjustPointerClosure: public OopClosure {
811 public:
812 virtual void do_oop(oop* p);
813 virtual void do_oop(narrowOop* p);
814 // do not walk from thread stacks to the code cache on this phase
815 virtual void do_code_blob(CodeBlob* cb) const { }
816 };
818 class AdjustKlassClosure : public KlassClosure {
819 public:
820 void do_klass(Klass* klass);
821 };
823 friend class KeepAliveClosure;
824 friend class FollowStackClosure;
825 friend class AdjustPointerClosure;
826 friend class AdjustKlassClosure;
827 friend class FollowKlassClosure;
828 friend class InstanceClassLoaderKlass;
829 friend class RefProcTaskProxy;
831 private:
832 static elapsedTimer _accumulated_time;
833 static unsigned int _total_invocations;
834 static unsigned int _maximum_compaction_gc_num;
835 static jlong _time_of_last_gc; // ms
836 static CollectorCounters* _counters;
837 static ParMarkBitMap _mark_bitmap;
838 static ParallelCompactData _summary_data;
839 static IsAliveClosure _is_alive_closure;
840 static SpaceInfo _space_info[last_space_id];
841 static bool _print_phases;
842 static AdjustPointerClosure _adjust_pointer_closure;
843 static AdjustKlassClosure _adjust_klass_closure;
845 // Reference processing (used in ...follow_contents)
846 static ReferenceProcessor* _ref_processor;
848 // Updated location of intArrayKlassObj.
849 static Klass* _updated_int_array_klass_obj;
851 // Values computed at initialization and used by dead_wood_limiter().
852 static double _dwl_mean;
853 static double _dwl_std_dev;
854 static double _dwl_first_term;
855 static double _dwl_adjustment;
856 #ifdef ASSERT
857 static bool _dwl_initialized;
858 #endif // #ifdef ASSERT
860 private:
862 static void initialize_space_info();
864 // Return true if details about individual phases should be printed.
865 static inline bool print_phases();
867 // Clear the marking bitmap and summary data that cover the specified space.
868 static void clear_data_covering_space(SpaceId id);
870 static void pre_compact(PreGCValues* pre_gc_values);
871 static void post_compact();
873 // Mark live objects
874 static void marking_phase(ParCompactionManager* cm,
875 bool maximum_heap_compaction);
877 template <class T>
878 static inline void follow_root(ParCompactionManager* cm, T* p);
880 // Compute the dense prefix for the designated space. This is an experimental
881 // implementation currently not used in production.
882 static HeapWord* compute_dense_prefix_via_density(const SpaceId id,
883 bool maximum_compaction);
885 // Methods used to compute the dense prefix.
887 // Compute the value of the normal distribution at x = density. The mean and
888 // standard deviation are values saved by initialize_dead_wood_limiter().
889 static inline double normal_distribution(double density);
891 // Initialize the static vars used by dead_wood_limiter().
892 static void initialize_dead_wood_limiter();
894 // Return the percentage of space that can be treated as "dead wood" (i.e.,
895 // not reclaimed).
896 static double dead_wood_limiter(double density, size_t min_percent);
898 // Find the first (left-most) region in the range [beg, end) that has at least
899 // dead_words of dead space to the left. The argument beg must be the first
900 // region in the space that is not completely live.
901 static RegionData* dead_wood_limit_region(const RegionData* beg,
902 const RegionData* end,
903 size_t dead_words);
905 // Return a pointer to the first region in the range [beg, end) that is not
906 // completely full.
907 static RegionData* first_dead_space_region(const RegionData* beg,
908 const RegionData* end);
910 // Return a value indicating the benefit or 'yield' if the compacted region
911 // were to start (or equivalently if the dense prefix were to end) at the
912 // candidate region. Higher values are better.
913 //
914 // The value is based on the amount of space reclaimed vs. the costs of (a)
915 // updating references in the dense prefix plus (b) copying objects and
916 // updating references in the compacted region.
917 static inline double reclaimed_ratio(const RegionData* const candidate,
918 HeapWord* const bottom,
919 HeapWord* const top,
920 HeapWord* const new_top);
922 // Compute the dense prefix for the designated space.
923 static HeapWord* compute_dense_prefix(const SpaceId id,
924 bool maximum_compaction);
926 // Return true if dead space crosses onto the specified Region; bit must be
927 // the bit index corresponding to the first word of the Region.
928 static inline bool dead_space_crosses_boundary(const RegionData* region,
929 idx_t bit);
931 // Summary phase utility routine to fill dead space (if any) at the dense
932 // prefix boundary. Should only be called if the the dense prefix is
933 // non-empty.
934 static void fill_dense_prefix_end(SpaceId id);
936 // Clear the summary data source_region field for the specified addresses.
937 static void clear_source_region(HeapWord* beg_addr, HeapWord* end_addr);
939 #ifndef PRODUCT
940 // Routines to provoke splitting a young gen space (ParallelOldGCSplitALot).
942 // Fill the region [start, start + words) with live object(s). Only usable
943 // for the old and permanent generations.
944 static void fill_with_live_objects(SpaceId id, HeapWord* const start,
945 size_t words);
946 // Include the new objects in the summary data.
947 static void summarize_new_objects(SpaceId id, HeapWord* start);
949 // Add live objects to a survivor space since it's rare that both survivors
950 // are non-empty.
951 static void provoke_split_fill_survivor(SpaceId id);
953 // Add live objects and/or choose the dense prefix to provoke splitting.
954 static void provoke_split(bool & maximum_compaction);
955 #endif
957 static void summarize_spaces_quick();
958 static void summarize_space(SpaceId id, bool maximum_compaction);
959 static void summary_phase(ParCompactionManager* cm, bool maximum_compaction);
961 // Adjust addresses in roots. Does not adjust addresses in heap.
962 static void adjust_roots();
964 // Move objects to new locations.
965 static void compact_perm(ParCompactionManager* cm);
966 static void compact();
968 // Add available regions to the stack and draining tasks to the task queue.
969 static void enqueue_region_draining_tasks(GCTaskQueue* q,
970 uint parallel_gc_threads);
972 // Add dense prefix update tasks to the task queue.
973 static void enqueue_dense_prefix_tasks(GCTaskQueue* q,
974 uint parallel_gc_threads);
976 // Add region stealing tasks to the task queue.
977 static void enqueue_region_stealing_tasks(
978 GCTaskQueue* q,
979 ParallelTaskTerminator* terminator_ptr,
980 uint parallel_gc_threads);
982 // If objects are left in eden after a collection, try to move the boundary
983 // and absorb them into the old gen. Returns true if eden was emptied.
984 static bool absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
985 PSYoungGen* young_gen,
986 PSOldGen* old_gen);
988 // Reset time since last full gc
989 static void reset_millis_since_last_gc();
991 public:
992 class MarkAndPushClosure: public OopClosure {
993 private:
994 ParCompactionManager* _compaction_manager;
995 public:
996 MarkAndPushClosure(ParCompactionManager* cm) : _compaction_manager(cm) { }
997 virtual void do_oop(oop* p);
998 virtual void do_oop(narrowOop* p);
999 };
1001 // The one and only place to start following the classes.
1002 // Should only be applied to the ClassLoaderData klasses list.
1003 class FollowKlassClosure : public KlassClosure {
1004 private:
1005 MarkAndPushClosure* _mark_and_push_closure;
1006 public:
1007 FollowKlassClosure(MarkAndPushClosure* mark_and_push_closure) :
1008 _mark_and_push_closure(mark_and_push_closure) { }
1009 void do_klass(Klass* klass);
1010 };
1012 PSParallelCompact();
1014 // Convenient accessor for Universe::heap().
1015 static ParallelScavengeHeap* gc_heap() {
1016 return (ParallelScavengeHeap*)Universe::heap();
1017 }
1019 static void invoke(bool maximum_heap_compaction);
1020 static bool invoke_no_policy(bool maximum_heap_compaction);
1022 static void post_initialize();
1023 // Perform initialization for PSParallelCompact that requires
1024 // allocations. This should be called during the VM initialization
1025 // at a pointer where it would be appropriate to return a JNI_ENOMEM
1026 // in the event of a failure.
1027 static bool initialize();
1029 // Closure accessors
1030 static OopClosure* adjust_pointer_closure() { return (OopClosure*)&_adjust_pointer_closure; }
1031 static KlassClosure* adjust_klass_closure() { return (KlassClosure*)&_adjust_klass_closure; }
1032 static BoolObjectClosure* is_alive_closure() { return (BoolObjectClosure*)&_is_alive_closure; }
1034 // Public accessors
1035 static elapsedTimer* accumulated_time() { return &_accumulated_time; }
1036 static unsigned int total_invocations() { return _total_invocations; }
1037 static CollectorCounters* counters() { return _counters; }
1039 // Used to add tasks
1040 static GCTaskManager* const gc_task_manager();
1041 static Klass* updated_int_array_klass_obj() {
1042 return _updated_int_array_klass_obj;
1043 }
1045 // Marking support
1046 static inline bool mark_obj(oop obj);
1047 static inline bool is_marked(oop obj);
1048 // Check mark and maybe push on marking stack
1049 template <class T> static inline void mark_and_push(ParCompactionManager* cm,
1050 T* p);
1051 template <class T> static inline void adjust_pointer(T* p);
1053 static void follow_klass(ParCompactionManager* cm, Klass* klass);
1054 static void adjust_klass(ParCompactionManager* cm, Klass* klass);
1056 static void follow_class_loader(ParCompactionManager* cm,
1057 ClassLoaderData* klass);
1058 static void adjust_class_loader(ParCompactionManager* cm,
1059 ClassLoaderData* klass);
1061 // Compaction support.
1062 // Return true if p is in the range [beg_addr, end_addr).
1063 static inline bool is_in(HeapWord* p, HeapWord* beg_addr, HeapWord* end_addr);
1064 static inline bool is_in(oop* p, HeapWord* beg_addr, HeapWord* end_addr);
1066 // Convenience wrappers for per-space data kept in _space_info.
1067 static inline MutableSpace* space(SpaceId space_id);
1068 static inline HeapWord* new_top(SpaceId space_id);
1069 static inline HeapWord* dense_prefix(SpaceId space_id);
1070 static inline ObjectStartArray* start_array(SpaceId space_id);
1072 // Move and update the live objects in the specified space.
1073 static void move_and_update(ParCompactionManager* cm, SpaceId space_id);
1075 // Process the end of the given region range in the dense prefix.
1076 // This includes saving any object not updated.
1077 static void dense_prefix_regions_epilogue(ParCompactionManager* cm,
1078 size_t region_start_index,
1079 size_t region_end_index,
1080 idx_t exiting_object_offset,
1081 idx_t region_offset_start,
1082 idx_t region_offset_end);
1084 // Update a region in the dense prefix. For each live object
1085 // in the region, update it's interior references. For each
1086 // dead object, fill it with deadwood. Dead space at the end
1087 // of a region range will be filled to the start of the next
1088 // live object regardless of the region_index_end. None of the
1089 // objects in the dense prefix move and dead space is dead
1090 // (holds only dead objects that don't need any processing), so
1091 // dead space can be filled in any order.
1092 static void update_and_deadwood_in_dense_prefix(ParCompactionManager* cm,
1093 SpaceId space_id,
1094 size_t region_index_start,
1095 size_t region_index_end);
1097 // Return the address of the count + 1st live word in the range [beg, end).
1098 static HeapWord* skip_live_words(HeapWord* beg, HeapWord* end, size_t count);
1100 // Return the address of the word to be copied to dest_addr, which must be
1101 // aligned to a region boundary.
1102 static HeapWord* first_src_addr(HeapWord* const dest_addr,
1103 SpaceId src_space_id,
1104 size_t src_region_idx);
1106 // Determine the next source region, set closure.source() to the start of the
1107 // new region return the region index. Parameter end_addr is the address one
1108 // beyond the end of source range just processed. If necessary, switch to a
1109 // new source space and set src_space_id (in-out parameter) and src_space_top
1110 // (out parameter) accordingly.
1111 static size_t next_src_region(MoveAndUpdateClosure& closure,
1112 SpaceId& src_space_id,
1113 HeapWord*& src_space_top,
1114 HeapWord* end_addr);
1116 // Decrement the destination count for each non-empty source region in the
1117 // range [beg_region, region(region_align_up(end_addr))). If the destination
1118 // count for a region goes to 0 and it needs to be filled, enqueue it.
1119 static void decrement_destination_counts(ParCompactionManager* cm,
1120 SpaceId src_space_id,
1121 size_t beg_region,
1122 HeapWord* end_addr);
1124 // Fill a region, copying objects from one or more source regions.
1125 static void fill_region(ParCompactionManager* cm, size_t region_idx);
1126 static void fill_and_update_region(ParCompactionManager* cm, size_t region) {
1127 fill_region(cm, region);
1128 }
1130 // Update the deferred objects in the space.
1131 static void update_deferred_objects(ParCompactionManager* cm, SpaceId id);
1133 static ParMarkBitMap* mark_bitmap() { return &_mark_bitmap; }
1134 static ParallelCompactData& summary_data() { return _summary_data; }
1136 // Reference Processing
1137 static ReferenceProcessor* const ref_processor() { return _ref_processor; }
1139 // Return the SpaceId for the given address.
1140 static SpaceId space_id(HeapWord* addr);
1142 // Time since last full gc (in milliseconds).
1143 static jlong millis_since_last_gc();
1145 static void print_on_error(outputStream* st);
1147 #ifndef PRODUCT
1148 // Debugging support.
1149 static const char* space_names[last_space_id];
1150 static void print_region_ranges();
1151 static void print_dense_prefix_stats(const char* const algorithm,
1152 const SpaceId id,
1153 const bool maximum_compaction,
1154 HeapWord* const addr);
1155 static void summary_phase_msg(SpaceId dst_space_id,
1156 HeapWord* dst_beg, HeapWord* dst_end,
1157 SpaceId src_space_id,
1158 HeapWord* src_beg, HeapWord* src_end);
1159 #endif // #ifndef PRODUCT
1161 #ifdef ASSERT
1162 // Sanity check the new location of a word in the heap.
1163 static inline void check_new_location(HeapWord* old_addr, HeapWord* new_addr);
1164 // Verify that all the regions have been emptied.
1165 static void verify_complete(SpaceId space_id);
1166 #endif // #ifdef ASSERT
1167 };
1169 inline bool PSParallelCompact::mark_obj(oop obj) {
1170 const int obj_size = obj->size();
1171 if (mark_bitmap()->mark_obj(obj, obj_size)) {
1172 _summary_data.add_obj(obj, obj_size);
1173 return true;
1174 } else {
1175 return false;
1176 }
1177 }
1179 inline bool PSParallelCompact::is_marked(oop obj) {
1180 return mark_bitmap()->is_marked(obj);
1181 }
1183 template <class T>
1184 inline void PSParallelCompact::follow_root(ParCompactionManager* cm, T* p) {
1185 assert(!Universe::heap()->is_in_reserved(p),
1186 "roots shouldn't be things within the heap");
1188 T heap_oop = oopDesc::load_heap_oop(p);
1189 if (!oopDesc::is_null(heap_oop)) {
1190 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
1191 if (mark_bitmap()->is_unmarked(obj)) {
1192 if (mark_obj(obj)) {
1193 obj->follow_contents(cm);
1194 }
1195 }
1196 }
1197 cm->follow_marking_stacks();
1198 }
1200 template <class T>
1201 inline void PSParallelCompact::mark_and_push(ParCompactionManager* cm, T* p) {
1202 T heap_oop = oopDesc::load_heap_oop(p);
1203 if (!oopDesc::is_null(heap_oop)) {
1204 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
1205 if (mark_bitmap()->is_unmarked(obj) && mark_obj(obj)) {
1206 cm->push(obj);
1207 }
1208 }
1209 }
1211 template <class T>
1212 inline void PSParallelCompact::adjust_pointer(T* p) {
1213 T heap_oop = oopDesc::load_heap_oop(p);
1214 if (!oopDesc::is_null(heap_oop)) {
1215 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
1216 oop new_obj = (oop)summary_data().calc_new_pointer(obj);
1217 assert(new_obj != NULL, // is forwarding ptr?
1218 "should be forwarded");
1219 // Just always do the update unconditionally?
1220 if (new_obj != NULL) {
1221 assert(Universe::heap()->is_in_reserved(new_obj),
1222 "should be in object space");
1223 oopDesc::encode_store_heap_oop_not_null(p, new_obj);
1224 }
1225 }
1226 }
1228 template <class T>
1229 inline void PSParallelCompact::KeepAliveClosure::do_oop_work(T* p) {
1230 mark_and_push(_compaction_manager, p);
1231 }
1233 inline bool PSParallelCompact::print_phases() {
1234 return _print_phases;
1235 }
1237 inline double PSParallelCompact::normal_distribution(double density) {
1238 assert(_dwl_initialized, "uninitialized");
1239 const double squared_term = (density - _dwl_mean) / _dwl_std_dev;
1240 return _dwl_first_term * exp(-0.5 * squared_term * squared_term);
1241 }
1243 inline bool
1244 PSParallelCompact::dead_space_crosses_boundary(const RegionData* region,
1245 idx_t bit)
1246 {
1247 assert(bit > 0, "cannot call this for the first bit/region");
1248 assert(_summary_data.region_to_addr(region) == _mark_bitmap.bit_to_addr(bit),
1249 "sanity check");
1251 // Dead space crosses the boundary if (1) a partial object does not extend
1252 // onto the region, (2) an object does not start at the beginning of the
1253 // region, and (3) an object does not end at the end of the prior region.
1254 return region->partial_obj_size() == 0 &&
1255 !_mark_bitmap.is_obj_beg(bit) &&
1256 !_mark_bitmap.is_obj_end(bit - 1);
1257 }
1259 inline bool
1260 PSParallelCompact::is_in(HeapWord* p, HeapWord* beg_addr, HeapWord* end_addr) {
1261 return p >= beg_addr && p < end_addr;
1262 }
1264 inline bool
1265 PSParallelCompact::is_in(oop* p, HeapWord* beg_addr, HeapWord* end_addr) {
1266 return is_in((HeapWord*)p, beg_addr, end_addr);
1267 }
1269 inline MutableSpace* PSParallelCompact::space(SpaceId id) {
1270 assert(id < last_space_id, "id out of range");
1271 return _space_info[id].space();
1272 }
1274 inline HeapWord* PSParallelCompact::new_top(SpaceId id) {
1275 assert(id < last_space_id, "id out of range");
1276 return _space_info[id].new_top();
1277 }
1279 inline HeapWord* PSParallelCompact::dense_prefix(SpaceId id) {
1280 assert(id < last_space_id, "id out of range");
1281 return _space_info[id].dense_prefix();
1282 }
1284 inline ObjectStartArray* PSParallelCompact::start_array(SpaceId id) {
1285 assert(id < last_space_id, "id out of range");
1286 return _space_info[id].start_array();
1287 }
1289 #ifdef ASSERT
1290 inline void
1291 PSParallelCompact::check_new_location(HeapWord* old_addr, HeapWord* new_addr)
1292 {
1293 assert(old_addr >= new_addr || space_id(old_addr) != space_id(new_addr),
1294 "must move left or to a different space");
1295 assert(is_object_aligned((intptr_t)old_addr) && is_object_aligned((intptr_t)new_addr),
1296 "checking alignment");
1297 }
1298 #endif // ASSERT
1300 class MoveAndUpdateClosure: public ParMarkBitMapClosure {
1301 public:
1302 inline MoveAndUpdateClosure(ParMarkBitMap* bitmap, ParCompactionManager* cm,
1303 ObjectStartArray* start_array,
1304 HeapWord* destination, size_t words);
1306 // Accessors.
1307 HeapWord* destination() const { return _destination; }
1309 // If the object will fit (size <= words_remaining()), copy it to the current
1310 // destination, update the interior oops and the start array and return either
1311 // full (if the closure is full) or incomplete. If the object will not fit,
1312 // return would_overflow.
1313 virtual IterationStatus do_addr(HeapWord* addr, size_t size);
1315 // Copy enough words to fill this closure, starting at source(). Interior
1316 // oops and the start array are not updated. Return full.
1317 IterationStatus copy_until_full();
1319 // Copy enough words to fill this closure or to the end of an object,
1320 // whichever is smaller, starting at source(). Interior oops and the start
1321 // array are not updated.
1322 void copy_partial_obj();
1324 protected:
1325 // Update variables to indicate that word_count words were processed.
1326 inline void update_state(size_t word_count);
1328 protected:
1329 ObjectStartArray* const _start_array;
1330 HeapWord* _destination; // Next addr to be written.
1331 };
1333 inline
1334 MoveAndUpdateClosure::MoveAndUpdateClosure(ParMarkBitMap* bitmap,
1335 ParCompactionManager* cm,
1336 ObjectStartArray* start_array,
1337 HeapWord* destination,
1338 size_t words) :
1339 ParMarkBitMapClosure(bitmap, cm, words), _start_array(start_array)
1340 {
1341 _destination = destination;
1342 }
1344 inline void MoveAndUpdateClosure::update_state(size_t words)
1345 {
1346 decrement_words_remaining(words);
1347 _source += words;
1348 _destination += words;
1349 }
1351 class UpdateOnlyClosure: public ParMarkBitMapClosure {
1352 private:
1353 const PSParallelCompact::SpaceId _space_id;
1354 ObjectStartArray* const _start_array;
1356 public:
1357 UpdateOnlyClosure(ParMarkBitMap* mbm,
1358 ParCompactionManager* cm,
1359 PSParallelCompact::SpaceId space_id);
1361 // Update the object.
1362 virtual IterationStatus do_addr(HeapWord* addr, size_t words);
1364 inline void do_addr(HeapWord* addr);
1365 };
1367 inline void UpdateOnlyClosure::do_addr(HeapWord* addr)
1368 {
1369 _start_array->allocate_block(addr);
1370 oop(addr)->update_contents(compaction_manager());
1371 }
1373 class FillClosure: public ParMarkBitMapClosure
1374 {
1375 public:
1376 FillClosure(ParCompactionManager* cm, PSParallelCompact::SpaceId space_id) :
1377 ParMarkBitMapClosure(PSParallelCompact::mark_bitmap(), cm),
1378 _start_array(PSParallelCompact::start_array(space_id))
1379 {
1380 assert(space_id == PSParallelCompact::old_space_id,
1381 "cannot use FillClosure in the young gen");
1382 }
1384 virtual IterationStatus do_addr(HeapWord* addr, size_t size) {
1385 CollectedHeap::fill_with_objects(addr, size);
1386 HeapWord* const end = addr + size;
1387 do {
1388 _start_array->allocate_block(addr);
1389 addr += oop(addr)->size();
1390 } while (addr < end);
1391 return ParMarkBitMap::incomplete;
1392 }
1394 private:
1395 ObjectStartArray* const _start_array;
1396 };
1398 #endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPARALLELCOMPACT_HPP