Mon, 20 May 2013 10:44:33 -0700
7186737: Unable to allocate bit maps or card tables for parallel gc for the requested heap
Summary: Print helpful error message when VM aborts due to inability of allocating bit maps or card tables
Reviewed-by: jmasa, stefank
Contributed-by: tamao <tao.mao@oracle.com>
1 /*
2 * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPARALLELCOMPACT_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPARALLELCOMPACT_HPP
28 #include "gc_implementation/parallelScavenge/objectStartArray.hpp"
29 #include "gc_implementation/parallelScavenge/parMarkBitMap.hpp"
30 #include "gc_implementation/parallelScavenge/psCompactionManager.hpp"
31 #include "gc_implementation/shared/collectorCounters.hpp"
32 #include "gc_implementation/shared/markSweep.hpp"
33 #include "gc_implementation/shared/mutableSpace.hpp"
34 #include "memory/sharedHeap.hpp"
35 #include "oops/oop.hpp"
37 class ParallelScavengeHeap;
38 class PSAdaptiveSizePolicy;
39 class PSYoungGen;
40 class PSOldGen;
41 class ParCompactionManager;
42 class ParallelTaskTerminator;
43 class PSParallelCompact;
44 class GCTaskManager;
45 class GCTaskQueue;
46 class PreGCValues;
47 class MoveAndUpdateClosure;
48 class RefProcTaskExecutor;
50 // The SplitInfo class holds the information needed to 'split' a source region
51 // so that the live data can be copied to two destination *spaces*. Normally,
52 // all the live data in a region is copied to a single destination space (e.g.,
53 // everything live in a region in eden is copied entirely into the old gen).
54 // However, when the heap is nearly full, all the live data in eden may not fit
55 // into the old gen. Copying only some of the regions from eden to old gen
56 // requires finding a region that does not contain a partial object (i.e., no
57 // live object crosses the region boundary) somewhere near the last object that
58 // does fit into the old gen. Since it's not always possible to find such a
59 // region, splitting is necessary for predictable behavior.
60 //
61 // A region is always split at the end of the partial object. This avoids
62 // additional tests when calculating the new location of a pointer, which is a
63 // very hot code path. The partial object and everything to its left will be
64 // copied to another space (call it dest_space_1). The live data to the right
65 // of the partial object will be copied either within the space itself, or to a
66 // different destination space (distinct from dest_space_1).
67 //
68 // Split points are identified during the summary phase, when region
69 // destinations are computed: data about the split, including the
70 // partial_object_size, is recorded in a SplitInfo record and the
71 // partial_object_size field in the summary data is set to zero. The zeroing is
72 // possible (and necessary) since the partial object will move to a different
73 // destination space than anything to its right, thus the partial object should
74 // not affect the locations of any objects to its right.
75 //
76 // The recorded data is used during the compaction phase, but only rarely: when
77 // the partial object on the split region will be copied across a destination
78 // region boundary. This test is made once each time a region is filled, and is
79 // a simple address comparison, so the overhead is negligible (see
80 // PSParallelCompact::first_src_addr()).
81 //
82 // Notes:
83 //
84 // Only regions with partial objects are split; a region without a partial
85 // object does not need any extra bookkeeping.
86 //
87 // At most one region is split per space, so the amount of data required is
88 // constant.
89 //
90 // A region is split only when the destination space would overflow. Once that
91 // happens, the destination space is abandoned and no other data (even from
92 // other source spaces) is targeted to that destination space. Abandoning the
93 // destination space may leave a somewhat large unused area at the end, if a
94 // large object caused the overflow.
95 //
96 // Future work:
97 //
98 // More bookkeeping would be required to continue to use the destination space.
99 // The most general solution would allow data from regions in two different
100 // source spaces to be "joined" in a single destination region. At the very
101 // least, additional code would be required in next_src_region() to detect the
102 // join and skip to an out-of-order source region. If the join region was also
103 // the last destination region to which a split region was copied (the most
104 // likely case), then additional work would be needed to get fill_region() to
105 // stop iteration and switch to a new source region at the right point. Basic
106 // idea would be to use a fake value for the top of the source space. It is
107 // doable, if a bit tricky.
108 //
109 // A simpler (but less general) solution would fill the remainder of the
110 // destination region with a dummy object and continue filling the next
111 // destination region.
113 class SplitInfo
114 {
115 public:
116 // Return true if this split info is valid (i.e., if a split has been
117 // recorded). The very first region cannot have a partial object and thus is
118 // never split, so 0 is the 'invalid' value.
119 bool is_valid() const { return _src_region_idx > 0; }
121 // Return true if this split holds data for the specified source region.
122 inline bool is_split(size_t source_region) const;
124 // The index of the split region, the size of the partial object on that
125 // region and the destination of the partial object.
126 size_t src_region_idx() const { return _src_region_idx; }
127 size_t partial_obj_size() const { return _partial_obj_size; }
128 HeapWord* destination() const { return _destination; }
130 // The destination count of the partial object referenced by this split
131 // (either 1 or 2). This must be added to the destination count of the
132 // remainder of the source region.
133 unsigned int destination_count() const { return _destination_count; }
135 // If a word within the partial object will be written to the first word of a
136 // destination region, this is the address of the destination region;
137 // otherwise this is NULL.
138 HeapWord* dest_region_addr() const { return _dest_region_addr; }
140 // If a word within the partial object will be written to the first word of a
141 // destination region, this is the address of that word within the partial
142 // object; otherwise this is NULL.
143 HeapWord* first_src_addr() const { return _first_src_addr; }
145 // Record the data necessary to split the region src_region_idx.
146 void record(size_t src_region_idx, size_t partial_obj_size,
147 HeapWord* destination);
149 void clear();
151 DEBUG_ONLY(void verify_clear();)
153 private:
154 size_t _src_region_idx;
155 size_t _partial_obj_size;
156 HeapWord* _destination;
157 unsigned int _destination_count;
158 HeapWord* _dest_region_addr;
159 HeapWord* _first_src_addr;
160 };
162 inline bool SplitInfo::is_split(size_t region_idx) const
163 {
164 return _src_region_idx == region_idx && is_valid();
165 }
167 class SpaceInfo
168 {
169 public:
170 MutableSpace* space() const { return _space; }
172 // Where the free space will start after the collection. Valid only after the
173 // summary phase completes.
174 HeapWord* new_top() const { return _new_top; }
176 // Allows new_top to be set.
177 HeapWord** new_top_addr() { return &_new_top; }
179 // Where the smallest allowable dense prefix ends (used only for perm gen).
180 HeapWord* min_dense_prefix() const { return _min_dense_prefix; }
182 // Where the dense prefix ends, or the compacted region begins.
183 HeapWord* dense_prefix() const { return _dense_prefix; }
185 // The start array for the (generation containing the) space, or NULL if there
186 // is no start array.
187 ObjectStartArray* start_array() const { return _start_array; }
189 SplitInfo& split_info() { return _split_info; }
191 void set_space(MutableSpace* s) { _space = s; }
192 void set_new_top(HeapWord* addr) { _new_top = addr; }
193 void set_min_dense_prefix(HeapWord* addr) { _min_dense_prefix = addr; }
194 void set_dense_prefix(HeapWord* addr) { _dense_prefix = addr; }
195 void set_start_array(ObjectStartArray* s) { _start_array = s; }
197 void publish_new_top() const { _space->set_top(_new_top); }
199 private:
200 MutableSpace* _space;
201 HeapWord* _new_top;
202 HeapWord* _min_dense_prefix;
203 HeapWord* _dense_prefix;
204 ObjectStartArray* _start_array;
205 SplitInfo _split_info;
206 };
208 class ParallelCompactData
209 {
210 public:
211 // Sizes are in HeapWords, unless indicated otherwise.
212 static const size_t Log2RegionSize;
213 static const size_t RegionSize;
214 static const size_t RegionSizeBytes;
216 // Mask for the bits in a size_t to get an offset within a region.
217 static const size_t RegionSizeOffsetMask;
218 // Mask for the bits in a pointer to get an offset within a region.
219 static const size_t RegionAddrOffsetMask;
220 // Mask for the bits in a pointer to get the address of the start of a region.
221 static const size_t RegionAddrMask;
223 class RegionData
224 {
225 public:
226 // Destination address of the region.
227 HeapWord* destination() const { return _destination; }
229 // The first region containing data destined for this region.
230 size_t source_region() const { return _source_region; }
232 // The object (if any) starting in this region and ending in a different
233 // region that could not be updated during the main (parallel) compaction
234 // phase. This is different from _partial_obj_addr, which is an object that
235 // extends onto a source region. However, the two uses do not overlap in
236 // time, so the same field is used to save space.
237 HeapWord* deferred_obj_addr() const { return _partial_obj_addr; }
239 // The starting address of the partial object extending onto the region.
240 HeapWord* partial_obj_addr() const { return _partial_obj_addr; }
242 // Size of the partial object extending onto the region (words).
243 size_t partial_obj_size() const { return _partial_obj_size; }
245 // Size of live data that lies within this region due to objects that start
246 // in this region (words). This does not include the partial object
247 // extending onto the region (if any), or the part of an object that extends
248 // onto the next region (if any).
249 size_t live_obj_size() const { return _dc_and_los & los_mask; }
251 // Total live data that lies within the region (words).
252 size_t data_size() const { return partial_obj_size() + live_obj_size(); }
254 // The destination_count is the number of other regions to which data from
255 // this region will be copied. At the end of the summary phase, the valid
256 // values of destination_count are
257 //
258 // 0 - data from the region will be compacted completely into itself, or the
259 // region is empty. The region can be claimed and then filled.
260 // 1 - data from the region will be compacted into 1 other region; some
261 // data from the region may also be compacted into the region itself.
262 // 2 - data from the region will be copied to 2 other regions.
263 //
264 // During compaction as regions are emptied, the destination_count is
265 // decremented (atomically) and when it reaches 0, it can be claimed and
266 // then filled.
267 //
268 // A region is claimed for processing by atomically changing the
269 // destination_count to the claimed value (dc_claimed). After a region has
270 // been filled, the destination_count should be set to the completed value
271 // (dc_completed).
272 inline uint destination_count() const;
273 inline uint destination_count_raw() const;
275 // The location of the java heap data that corresponds to this region.
276 inline HeapWord* data_location() const;
278 // The highest address referenced by objects in this region.
279 inline HeapWord* highest_ref() const;
281 // Whether this region is available to be claimed, has been claimed, or has
282 // been completed.
283 //
284 // Minor subtlety: claimed() returns true if the region is marked
285 // completed(), which is desirable since a region must be claimed before it
286 // can be completed.
287 bool available() const { return _dc_and_los < dc_one; }
288 bool claimed() const { return _dc_and_los >= dc_claimed; }
289 bool completed() const { return _dc_and_los >= dc_completed; }
291 // These are not atomic.
292 void set_destination(HeapWord* addr) { _destination = addr; }
293 void set_source_region(size_t region) { _source_region = region; }
294 void set_deferred_obj_addr(HeapWord* addr) { _partial_obj_addr = addr; }
295 void set_partial_obj_addr(HeapWord* addr) { _partial_obj_addr = addr; }
296 void set_partial_obj_size(size_t words) {
297 _partial_obj_size = (region_sz_t) words;
298 }
300 inline void set_destination_count(uint count);
301 inline void set_live_obj_size(size_t words);
302 inline void set_data_location(HeapWord* addr);
303 inline void set_completed();
304 inline bool claim_unsafe();
306 // These are atomic.
307 inline void add_live_obj(size_t words);
308 inline void set_highest_ref(HeapWord* addr);
309 inline void decrement_destination_count();
310 inline bool claim();
312 private:
313 // The type used to represent object sizes within a region.
314 typedef uint region_sz_t;
316 // Constants for manipulating the _dc_and_los field, which holds both the
317 // destination count and live obj size. The live obj size lives at the
318 // least significant end so no masking is necessary when adding.
319 static const region_sz_t dc_shift; // Shift amount.
320 static const region_sz_t dc_mask; // Mask for destination count.
321 static const region_sz_t dc_one; // 1, shifted appropriately.
322 static const region_sz_t dc_claimed; // Region has been claimed.
323 static const region_sz_t dc_completed; // Region has been completed.
324 static const region_sz_t los_mask; // Mask for live obj size.
326 HeapWord* _destination;
327 size_t _source_region;
328 HeapWord* _partial_obj_addr;
329 region_sz_t _partial_obj_size;
330 region_sz_t volatile _dc_and_los;
331 #ifdef ASSERT
332 // These enable optimizations that are only partially implemented. Use
333 // debug builds to prevent the code fragments from breaking.
334 HeapWord* _data_location;
335 HeapWord* _highest_ref;
336 #endif // #ifdef ASSERT
338 #ifdef ASSERT
339 public:
340 uint _pushed; // 0 until region is pushed onto a worker's stack
341 private:
342 #endif
343 };
345 public:
346 ParallelCompactData();
347 bool initialize(MemRegion covered_region);
349 size_t region_count() const { return _region_count; }
350 size_t reserved_byte_size() const { return _reserved_byte_size; }
352 // Convert region indices to/from RegionData pointers.
353 inline RegionData* region(size_t region_idx) const;
354 inline size_t region(const RegionData* const region_ptr) const;
356 // Returns true if the given address is contained within the region
357 bool region_contains(size_t region_index, HeapWord* addr);
359 void add_obj(HeapWord* addr, size_t len);
360 void add_obj(oop p, size_t len) { add_obj((HeapWord*)p, len); }
362 // Fill in the regions covering [beg, end) so that no data moves; i.e., the
363 // destination of region n is simply the start of region n. The argument beg
364 // must be region-aligned; end need not be.
365 void summarize_dense_prefix(HeapWord* beg, HeapWord* end);
367 HeapWord* summarize_split_space(size_t src_region, SplitInfo& split_info,
368 HeapWord* destination, HeapWord* target_end,
369 HeapWord** target_next);
370 bool summarize(SplitInfo& split_info,
371 HeapWord* source_beg, HeapWord* source_end,
372 HeapWord** source_next,
373 HeapWord* target_beg, HeapWord* target_end,
374 HeapWord** target_next);
376 void clear();
377 void clear_range(size_t beg_region, size_t end_region);
378 void clear_range(HeapWord* beg, HeapWord* end) {
379 clear_range(addr_to_region_idx(beg), addr_to_region_idx(end));
380 }
382 // Return the number of words between addr and the start of the region
383 // containing addr.
384 inline size_t region_offset(const HeapWord* addr) const;
386 // Convert addresses to/from a region index or region pointer.
387 inline size_t addr_to_region_idx(const HeapWord* addr) const;
388 inline RegionData* addr_to_region_ptr(const HeapWord* addr) const;
389 inline HeapWord* region_to_addr(size_t region) const;
390 inline HeapWord* region_to_addr(size_t region, size_t offset) const;
391 inline HeapWord* region_to_addr(const RegionData* region) const;
393 inline HeapWord* region_align_down(HeapWord* addr) const;
394 inline HeapWord* region_align_up(HeapWord* addr) const;
395 inline bool is_region_aligned(HeapWord* addr) const;
397 // Return the address one past the end of the partial object.
398 HeapWord* partial_obj_end(size_t region_idx) const;
400 // Return the new location of the object p after the
401 // the compaction.
402 HeapWord* calc_new_pointer(HeapWord* addr);
404 HeapWord* calc_new_pointer(oop p) {
405 return calc_new_pointer((HeapWord*) p);
406 }
408 #ifdef ASSERT
409 void verify_clear(const PSVirtualSpace* vspace);
410 void verify_clear();
411 #endif // #ifdef ASSERT
413 private:
414 bool initialize_region_data(size_t region_size);
415 PSVirtualSpace* create_vspace(size_t count, size_t element_size);
417 private:
418 HeapWord* _region_start;
419 #ifdef ASSERT
420 HeapWord* _region_end;
421 #endif // #ifdef ASSERT
423 PSVirtualSpace* _region_vspace;
424 size_t _reserved_byte_size;
425 RegionData* _region_data;
426 size_t _region_count;
427 };
429 inline uint
430 ParallelCompactData::RegionData::destination_count_raw() const
431 {
432 return _dc_and_los & dc_mask;
433 }
435 inline uint
436 ParallelCompactData::RegionData::destination_count() const
437 {
438 return destination_count_raw() >> dc_shift;
439 }
441 inline void
442 ParallelCompactData::RegionData::set_destination_count(uint count)
443 {
444 assert(count <= (dc_completed >> dc_shift), "count too large");
445 const region_sz_t live_sz = (region_sz_t) live_obj_size();
446 _dc_and_los = (count << dc_shift) | live_sz;
447 }
449 inline void ParallelCompactData::RegionData::set_live_obj_size(size_t words)
450 {
451 assert(words <= los_mask, "would overflow");
452 _dc_and_los = destination_count_raw() | (region_sz_t)words;
453 }
455 inline void ParallelCompactData::RegionData::decrement_destination_count()
456 {
457 assert(_dc_and_los < dc_claimed, "already claimed");
458 assert(_dc_and_los >= dc_one, "count would go negative");
459 Atomic::add((int)dc_mask, (volatile int*)&_dc_and_los);
460 }
462 inline HeapWord* ParallelCompactData::RegionData::data_location() const
463 {
464 DEBUG_ONLY(return _data_location;)
465 NOT_DEBUG(return NULL;)
466 }
468 inline HeapWord* ParallelCompactData::RegionData::highest_ref() const
469 {
470 DEBUG_ONLY(return _highest_ref;)
471 NOT_DEBUG(return NULL;)
472 }
474 inline void ParallelCompactData::RegionData::set_data_location(HeapWord* addr)
475 {
476 DEBUG_ONLY(_data_location = addr;)
477 }
479 inline void ParallelCompactData::RegionData::set_completed()
480 {
481 assert(claimed(), "must be claimed first");
482 _dc_and_los = dc_completed | (region_sz_t) live_obj_size();
483 }
485 // MT-unsafe claiming of a region. Should only be used during single threaded
486 // execution.
487 inline bool ParallelCompactData::RegionData::claim_unsafe()
488 {
489 if (available()) {
490 _dc_and_los |= dc_claimed;
491 return true;
492 }
493 return false;
494 }
496 inline void ParallelCompactData::RegionData::add_live_obj(size_t words)
497 {
498 assert(words <= (size_t)los_mask - live_obj_size(), "overflow");
499 Atomic::add((int) words, (volatile int*) &_dc_and_los);
500 }
502 inline void ParallelCompactData::RegionData::set_highest_ref(HeapWord* addr)
503 {
504 #ifdef ASSERT
505 HeapWord* tmp = _highest_ref;
506 while (addr > tmp) {
507 tmp = (HeapWord*)Atomic::cmpxchg_ptr(addr, &_highest_ref, tmp);
508 }
509 #endif // #ifdef ASSERT
510 }
512 inline bool ParallelCompactData::RegionData::claim()
513 {
514 const int los = (int) live_obj_size();
515 const int old = Atomic::cmpxchg(dc_claimed | los,
516 (volatile int*) &_dc_and_los, los);
517 return old == los;
518 }
520 inline ParallelCompactData::RegionData*
521 ParallelCompactData::region(size_t region_idx) const
522 {
523 assert(region_idx <= region_count(), "bad arg");
524 return _region_data + region_idx;
525 }
527 inline size_t
528 ParallelCompactData::region(const RegionData* const region_ptr) const
529 {
530 assert(region_ptr >= _region_data, "bad arg");
531 assert(region_ptr <= _region_data + region_count(), "bad arg");
532 return pointer_delta(region_ptr, _region_data, sizeof(RegionData));
533 }
535 inline size_t
536 ParallelCompactData::region_offset(const HeapWord* addr) const
537 {
538 assert(addr >= _region_start, "bad addr");
539 assert(addr <= _region_end, "bad addr");
540 return (size_t(addr) & RegionAddrOffsetMask) >> LogHeapWordSize;
541 }
543 inline size_t
544 ParallelCompactData::addr_to_region_idx(const HeapWord* addr) const
545 {
546 assert(addr >= _region_start, "bad addr");
547 assert(addr <= _region_end, "bad addr");
548 return pointer_delta(addr, _region_start) >> Log2RegionSize;
549 }
551 inline ParallelCompactData::RegionData*
552 ParallelCompactData::addr_to_region_ptr(const HeapWord* addr) const
553 {
554 return region(addr_to_region_idx(addr));
555 }
557 inline HeapWord*
558 ParallelCompactData::region_to_addr(size_t region) const
559 {
560 assert(region <= _region_count, "region out of range");
561 return _region_start + (region << Log2RegionSize);
562 }
564 inline HeapWord*
565 ParallelCompactData::region_to_addr(const RegionData* region) const
566 {
567 return region_to_addr(pointer_delta(region, _region_data,
568 sizeof(RegionData)));
569 }
571 inline HeapWord*
572 ParallelCompactData::region_to_addr(size_t region, size_t offset) const
573 {
574 assert(region <= _region_count, "region out of range");
575 assert(offset < RegionSize, "offset too big"); // This may be too strict.
576 return region_to_addr(region) + offset;
577 }
579 inline HeapWord*
580 ParallelCompactData::region_align_down(HeapWord* addr) const
581 {
582 assert(addr >= _region_start, "bad addr");
583 assert(addr < _region_end + RegionSize, "bad addr");
584 return (HeapWord*)(size_t(addr) & RegionAddrMask);
585 }
587 inline HeapWord*
588 ParallelCompactData::region_align_up(HeapWord* addr) const
589 {
590 assert(addr >= _region_start, "bad addr");
591 assert(addr <= _region_end, "bad addr");
592 return region_align_down(addr + RegionSizeOffsetMask);
593 }
595 inline bool
596 ParallelCompactData::is_region_aligned(HeapWord* addr) const
597 {
598 return region_offset(addr) == 0;
599 }
601 // Abstract closure for use with ParMarkBitMap::iterate(), which will invoke the
602 // do_addr() method.
603 //
604 // The closure is initialized with the number of heap words to process
605 // (words_remaining()), and becomes 'full' when it reaches 0. The do_addr()
606 // methods in subclasses should update the total as words are processed. Since
607 // only one subclass actually uses this mechanism to terminate iteration, the
608 // default initial value is > 0. The implementation is here and not in the
609 // single subclass that uses it to avoid making is_full() virtual, and thus
610 // adding a virtual call per live object.
612 class ParMarkBitMapClosure: public StackObj {
613 public:
614 typedef ParMarkBitMap::idx_t idx_t;
615 typedef ParMarkBitMap::IterationStatus IterationStatus;
617 public:
618 inline ParMarkBitMapClosure(ParMarkBitMap* mbm, ParCompactionManager* cm,
619 size_t words = max_uintx);
621 inline ParCompactionManager* compaction_manager() const;
622 inline ParMarkBitMap* bitmap() const;
623 inline size_t words_remaining() const;
624 inline bool is_full() const;
625 inline HeapWord* source() const;
627 inline void set_source(HeapWord* addr);
629 virtual IterationStatus do_addr(HeapWord* addr, size_t words) = 0;
631 protected:
632 inline void decrement_words_remaining(size_t words);
634 private:
635 ParMarkBitMap* const _bitmap;
636 ParCompactionManager* const _compaction_manager;
637 DEBUG_ONLY(const size_t _initial_words_remaining;) // Useful in debugger.
638 size_t _words_remaining; // Words left to copy.
640 protected:
641 HeapWord* _source; // Next addr that would be read.
642 };
644 inline
645 ParMarkBitMapClosure::ParMarkBitMapClosure(ParMarkBitMap* bitmap,
646 ParCompactionManager* cm,
647 size_t words):
648 _bitmap(bitmap), _compaction_manager(cm)
649 #ifdef ASSERT
650 , _initial_words_remaining(words)
651 #endif
652 {
653 _words_remaining = words;
654 _source = NULL;
655 }
657 inline ParCompactionManager* ParMarkBitMapClosure::compaction_manager() const {
658 return _compaction_manager;
659 }
661 inline ParMarkBitMap* ParMarkBitMapClosure::bitmap() const {
662 return _bitmap;
663 }
665 inline size_t ParMarkBitMapClosure::words_remaining() const {
666 return _words_remaining;
667 }
669 inline bool ParMarkBitMapClosure::is_full() const {
670 return words_remaining() == 0;
671 }
673 inline HeapWord* ParMarkBitMapClosure::source() const {
674 return _source;
675 }
677 inline void ParMarkBitMapClosure::set_source(HeapWord* addr) {
678 _source = addr;
679 }
681 inline void ParMarkBitMapClosure::decrement_words_remaining(size_t words) {
682 assert(_words_remaining >= words, "processed too many words");
683 _words_remaining -= words;
684 }
686 // The UseParallelOldGC collector is a stop-the-world garbage collector that
687 // does parts of the collection using parallel threads. The collection includes
688 // the tenured generation and the young generation. The permanent generation is
689 // collected at the same time as the other two generations but the permanent
690 // generation is collect by a single GC thread. The permanent generation is
691 // collected serially because of the requirement that during the processing of a
692 // klass AAA, any objects reference by AAA must already have been processed.
693 // This requirement is enforced by a left (lower address) to right (higher
694 // address) sliding compaction.
695 //
696 // There are four phases of the collection.
697 //
698 // - marking phase
699 // - summary phase
700 // - compacting phase
701 // - clean up phase
702 //
703 // Roughly speaking these phases correspond, respectively, to
704 // - mark all the live objects
705 // - calculate the destination of each object at the end of the collection
706 // - move the objects to their destination
707 // - update some references and reinitialize some variables
708 //
709 // These three phases are invoked in PSParallelCompact::invoke_no_policy(). The
710 // marking phase is implemented in PSParallelCompact::marking_phase() and does a
711 // complete marking of the heap. The summary phase is implemented in
712 // PSParallelCompact::summary_phase(). The move and update phase is implemented
713 // in PSParallelCompact::compact().
714 //
715 // A space that is being collected is divided into regions and with each region
716 // is associated an object of type ParallelCompactData. Each region is of a
717 // fixed size and typically will contain more than 1 object and may have parts
718 // of objects at the front and back of the region.
719 //
720 // region -----+---------------------+----------
721 // objects covered [ AAA )[ BBB )[ CCC )[ DDD )
722 //
723 // The marking phase does a complete marking of all live objects in the heap.
724 // The marking also compiles the size of the data for all live objects covered
725 // by the region. This size includes the part of any live object spanning onto
726 // the region (part of AAA if it is live) from the front, all live objects
727 // contained in the region (BBB and/or CCC if they are live), and the part of
728 // any live objects covered by the region that extends off the region (part of
729 // DDD if it is live). The marking phase uses multiple GC threads and marking
730 // is done in a bit array of type ParMarkBitMap. The marking of the bit map is
731 // done atomically as is the accumulation of the size of the live objects
732 // covered by a region.
733 //
734 // The summary phase calculates the total live data to the left of each region
735 // XXX. Based on that total and the bottom of the space, it can calculate the
736 // starting location of the live data in XXX. The summary phase calculates for
737 // each region XXX quantites such as
738 //
739 // - the amount of live data at the beginning of a region from an object
740 // entering the region.
741 // - the location of the first live data on the region
742 // - a count of the number of regions receiving live data from XXX.
743 //
744 // See ParallelCompactData for precise details. The summary phase also
745 // calculates the dense prefix for the compaction. The dense prefix is a
746 // portion at the beginning of the space that is not moved. The objects in the
747 // dense prefix do need to have their object references updated. See method
748 // summarize_dense_prefix().
749 //
750 // The summary phase is done using 1 GC thread.
751 //
752 // The compaction phase moves objects to their new location and updates all
753 // references in the object.
754 //
755 // A current exception is that objects that cross a region boundary are moved
756 // but do not have their references updated. References are not updated because
757 // it cannot easily be determined if the klass pointer KKK for the object AAA
758 // has been updated. KKK likely resides in a region to the left of the region
759 // containing AAA. These AAA's have there references updated at the end in a
760 // clean up phase. See the method PSParallelCompact::update_deferred_objects().
761 // An alternate strategy is being investigated for this deferral of updating.
762 //
763 // Compaction is done on a region basis. A region that is ready to be filled is
764 // put on a ready list and GC threads take region off the list and fill them. A
765 // region is ready to be filled if it empty of live objects. Such a region may
766 // have been initially empty (only contained dead objects) or may have had all
767 // its live objects copied out already. A region that compacts into itself is
768 // also ready for filling. The ready list is initially filled with empty
769 // regions and regions compacting into themselves. There is always at least 1
770 // region that can be put on the ready list. The regions are atomically added
771 // and removed from the ready list.
773 class PSParallelCompact : AllStatic {
774 public:
775 // Convenient access to type names.
776 typedef ParMarkBitMap::idx_t idx_t;
777 typedef ParallelCompactData::RegionData RegionData;
779 typedef enum {
780 old_space_id, eden_space_id,
781 from_space_id, to_space_id, last_space_id
782 } SpaceId;
784 public:
785 // Inline closure decls
786 //
787 class IsAliveClosure: public BoolObjectClosure {
788 public:
789 virtual bool do_object_b(oop p);
790 };
792 class KeepAliveClosure: public OopClosure {
793 private:
794 ParCompactionManager* _compaction_manager;
795 protected:
796 template <class T> inline void do_oop_work(T* p);
797 public:
798 KeepAliveClosure(ParCompactionManager* cm) : _compaction_manager(cm) { }
799 virtual void do_oop(oop* p);
800 virtual void do_oop(narrowOop* p);
801 };
803 class FollowStackClosure: public VoidClosure {
804 private:
805 ParCompactionManager* _compaction_manager;
806 public:
807 FollowStackClosure(ParCompactionManager* cm) : _compaction_manager(cm) { }
808 virtual void do_void();
809 };
811 class AdjustPointerClosure: public OopClosure {
812 public:
813 virtual void do_oop(oop* p);
814 virtual void do_oop(narrowOop* p);
815 // do not walk from thread stacks to the code cache on this phase
816 virtual void do_code_blob(CodeBlob* cb) const { }
817 };
819 class AdjustKlassClosure : public KlassClosure {
820 public:
821 void do_klass(Klass* klass);
822 };
824 friend class KeepAliveClosure;
825 friend class FollowStackClosure;
826 friend class AdjustPointerClosure;
827 friend class AdjustKlassClosure;
828 friend class FollowKlassClosure;
829 friend class InstanceClassLoaderKlass;
830 friend class RefProcTaskProxy;
832 private:
833 static elapsedTimer _accumulated_time;
834 static unsigned int _total_invocations;
835 static unsigned int _maximum_compaction_gc_num;
836 static jlong _time_of_last_gc; // ms
837 static CollectorCounters* _counters;
838 static ParMarkBitMap _mark_bitmap;
839 static ParallelCompactData _summary_data;
840 static IsAliveClosure _is_alive_closure;
841 static SpaceInfo _space_info[last_space_id];
842 static bool _print_phases;
843 static AdjustPointerClosure _adjust_pointer_closure;
844 static AdjustKlassClosure _adjust_klass_closure;
846 // Reference processing (used in ...follow_contents)
847 static ReferenceProcessor* _ref_processor;
849 // Updated location of intArrayKlassObj.
850 static Klass* _updated_int_array_klass_obj;
852 // Values computed at initialization and used by dead_wood_limiter().
853 static double _dwl_mean;
854 static double _dwl_std_dev;
855 static double _dwl_first_term;
856 static double _dwl_adjustment;
857 #ifdef ASSERT
858 static bool _dwl_initialized;
859 #endif // #ifdef ASSERT
861 private:
863 static void initialize_space_info();
865 // Return true if details about individual phases should be printed.
866 static inline bool print_phases();
868 // Clear the marking bitmap and summary data that cover the specified space.
869 static void clear_data_covering_space(SpaceId id);
871 static void pre_compact(PreGCValues* pre_gc_values);
872 static void post_compact();
874 // Mark live objects
875 static void marking_phase(ParCompactionManager* cm,
876 bool maximum_heap_compaction);
878 template <class T>
879 static inline void follow_root(ParCompactionManager* cm, T* p);
881 // Compute the dense prefix for the designated space. This is an experimental
882 // implementation currently not used in production.
883 static HeapWord* compute_dense_prefix_via_density(const SpaceId id,
884 bool maximum_compaction);
886 // Methods used to compute the dense prefix.
888 // Compute the value of the normal distribution at x = density. The mean and
889 // standard deviation are values saved by initialize_dead_wood_limiter().
890 static inline double normal_distribution(double density);
892 // Initialize the static vars used by dead_wood_limiter().
893 static void initialize_dead_wood_limiter();
895 // Return the percentage of space that can be treated as "dead wood" (i.e.,
896 // not reclaimed).
897 static double dead_wood_limiter(double density, size_t min_percent);
899 // Find the first (left-most) region in the range [beg, end) that has at least
900 // dead_words of dead space to the left. The argument beg must be the first
901 // region in the space that is not completely live.
902 static RegionData* dead_wood_limit_region(const RegionData* beg,
903 const RegionData* end,
904 size_t dead_words);
906 // Return a pointer to the first region in the range [beg, end) that is not
907 // completely full.
908 static RegionData* first_dead_space_region(const RegionData* beg,
909 const RegionData* end);
911 // Return a value indicating the benefit or 'yield' if the compacted region
912 // were to start (or equivalently if the dense prefix were to end) at the
913 // candidate region. Higher values are better.
914 //
915 // The value is based on the amount of space reclaimed vs. the costs of (a)
916 // updating references in the dense prefix plus (b) copying objects and
917 // updating references in the compacted region.
918 static inline double reclaimed_ratio(const RegionData* const candidate,
919 HeapWord* const bottom,
920 HeapWord* const top,
921 HeapWord* const new_top);
923 // Compute the dense prefix for the designated space.
924 static HeapWord* compute_dense_prefix(const SpaceId id,
925 bool maximum_compaction);
927 // Return true if dead space crosses onto the specified Region; bit must be
928 // the bit index corresponding to the first word of the Region.
929 static inline bool dead_space_crosses_boundary(const RegionData* region,
930 idx_t bit);
932 // Summary phase utility routine to fill dead space (if any) at the dense
933 // prefix boundary. Should only be called if the the dense prefix is
934 // non-empty.
935 static void fill_dense_prefix_end(SpaceId id);
937 // Clear the summary data source_region field for the specified addresses.
938 static void clear_source_region(HeapWord* beg_addr, HeapWord* end_addr);
940 #ifndef PRODUCT
941 // Routines to provoke splitting a young gen space (ParallelOldGCSplitALot).
943 // Fill the region [start, start + words) with live object(s). Only usable
944 // for the old and permanent generations.
945 static void fill_with_live_objects(SpaceId id, HeapWord* const start,
946 size_t words);
947 // Include the new objects in the summary data.
948 static void summarize_new_objects(SpaceId id, HeapWord* start);
950 // Add live objects to a survivor space since it's rare that both survivors
951 // are non-empty.
952 static void provoke_split_fill_survivor(SpaceId id);
954 // Add live objects and/or choose the dense prefix to provoke splitting.
955 static void provoke_split(bool & maximum_compaction);
956 #endif
958 static void summarize_spaces_quick();
959 static void summarize_space(SpaceId id, bool maximum_compaction);
960 static void summary_phase(ParCompactionManager* cm, bool maximum_compaction);
962 // Adjust addresses in roots. Does not adjust addresses in heap.
963 static void adjust_roots();
965 // Move objects to new locations.
966 static void compact_perm(ParCompactionManager* cm);
967 static void compact();
969 // Add available regions to the stack and draining tasks to the task queue.
970 static void enqueue_region_draining_tasks(GCTaskQueue* q,
971 uint parallel_gc_threads);
973 // Add dense prefix update tasks to the task queue.
974 static void enqueue_dense_prefix_tasks(GCTaskQueue* q,
975 uint parallel_gc_threads);
977 // Add region stealing tasks to the task queue.
978 static void enqueue_region_stealing_tasks(
979 GCTaskQueue* q,
980 ParallelTaskTerminator* terminator_ptr,
981 uint parallel_gc_threads);
983 // If objects are left in eden after a collection, try to move the boundary
984 // and absorb them into the old gen. Returns true if eden was emptied.
985 static bool absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
986 PSYoungGen* young_gen,
987 PSOldGen* old_gen);
989 // Reset time since last full gc
990 static void reset_millis_since_last_gc();
992 public:
993 class MarkAndPushClosure: public OopClosure {
994 private:
995 ParCompactionManager* _compaction_manager;
996 public:
997 MarkAndPushClosure(ParCompactionManager* cm) : _compaction_manager(cm) { }
998 virtual void do_oop(oop* p);
999 virtual void do_oop(narrowOop* p);
1000 };
1002 // The one and only place to start following the classes.
1003 // Should only be applied to the ClassLoaderData klasses list.
1004 class FollowKlassClosure : public KlassClosure {
1005 private:
1006 MarkAndPushClosure* _mark_and_push_closure;
1007 public:
1008 FollowKlassClosure(MarkAndPushClosure* mark_and_push_closure) :
1009 _mark_and_push_closure(mark_and_push_closure) { }
1010 void do_klass(Klass* klass);
1011 };
1013 PSParallelCompact();
1015 // Convenient accessor for Universe::heap().
1016 static ParallelScavengeHeap* gc_heap() {
1017 return (ParallelScavengeHeap*)Universe::heap();
1018 }
1020 static void invoke(bool maximum_heap_compaction);
1021 static bool invoke_no_policy(bool maximum_heap_compaction);
1023 static void post_initialize();
1024 // Perform initialization for PSParallelCompact that requires
1025 // allocations. This should be called during the VM initialization
1026 // at a pointer where it would be appropriate to return a JNI_ENOMEM
1027 // in the event of a failure.
1028 static bool initialize();
1030 // Closure accessors
1031 static OopClosure* adjust_pointer_closure() { return (OopClosure*)&_adjust_pointer_closure; }
1032 static KlassClosure* adjust_klass_closure() { return (KlassClosure*)&_adjust_klass_closure; }
1033 static BoolObjectClosure* is_alive_closure() { return (BoolObjectClosure*)&_is_alive_closure; }
1035 // Public accessors
1036 static elapsedTimer* accumulated_time() { return &_accumulated_time; }
1037 static unsigned int total_invocations() { return _total_invocations; }
1038 static CollectorCounters* counters() { return _counters; }
1040 // Used to add tasks
1041 static GCTaskManager* const gc_task_manager();
1042 static Klass* updated_int_array_klass_obj() {
1043 return _updated_int_array_klass_obj;
1044 }
1046 // Marking support
1047 static inline bool mark_obj(oop obj);
1048 static inline bool is_marked(oop obj);
1049 // Check mark and maybe push on marking stack
1050 template <class T> static inline void mark_and_push(ParCompactionManager* cm,
1051 T* p);
1052 template <class T> static inline void adjust_pointer(T* p);
1054 static void follow_klass(ParCompactionManager* cm, Klass* klass);
1055 static void adjust_klass(ParCompactionManager* cm, Klass* klass);
1057 static void follow_class_loader(ParCompactionManager* cm,
1058 ClassLoaderData* klass);
1059 static void adjust_class_loader(ParCompactionManager* cm,
1060 ClassLoaderData* klass);
1062 // Compaction support.
1063 // Return true if p is in the range [beg_addr, end_addr).
1064 static inline bool is_in(HeapWord* p, HeapWord* beg_addr, HeapWord* end_addr);
1065 static inline bool is_in(oop* p, HeapWord* beg_addr, HeapWord* end_addr);
1067 // Convenience wrappers for per-space data kept in _space_info.
1068 static inline MutableSpace* space(SpaceId space_id);
1069 static inline HeapWord* new_top(SpaceId space_id);
1070 static inline HeapWord* dense_prefix(SpaceId space_id);
1071 static inline ObjectStartArray* start_array(SpaceId space_id);
1073 // Move and update the live objects in the specified space.
1074 static void move_and_update(ParCompactionManager* cm, SpaceId space_id);
1076 // Process the end of the given region range in the dense prefix.
1077 // This includes saving any object not updated.
1078 static void dense_prefix_regions_epilogue(ParCompactionManager* cm,
1079 size_t region_start_index,
1080 size_t region_end_index,
1081 idx_t exiting_object_offset,
1082 idx_t region_offset_start,
1083 idx_t region_offset_end);
1085 // Update a region in the dense prefix. For each live object
1086 // in the region, update it's interior references. For each
1087 // dead object, fill it with deadwood. Dead space at the end
1088 // of a region range will be filled to the start of the next
1089 // live object regardless of the region_index_end. None of the
1090 // objects in the dense prefix move and dead space is dead
1091 // (holds only dead objects that don't need any processing), so
1092 // dead space can be filled in any order.
1093 static void update_and_deadwood_in_dense_prefix(ParCompactionManager* cm,
1094 SpaceId space_id,
1095 size_t region_index_start,
1096 size_t region_index_end);
1098 // Return the address of the count + 1st live word in the range [beg, end).
1099 static HeapWord* skip_live_words(HeapWord* beg, HeapWord* end, size_t count);
1101 // Return the address of the word to be copied to dest_addr, which must be
1102 // aligned to a region boundary.
1103 static HeapWord* first_src_addr(HeapWord* const dest_addr,
1104 SpaceId src_space_id,
1105 size_t src_region_idx);
1107 // Determine the next source region, set closure.source() to the start of the
1108 // new region return the region index. Parameter end_addr is the address one
1109 // beyond the end of source range just processed. If necessary, switch to a
1110 // new source space and set src_space_id (in-out parameter) and src_space_top
1111 // (out parameter) accordingly.
1112 static size_t next_src_region(MoveAndUpdateClosure& closure,
1113 SpaceId& src_space_id,
1114 HeapWord*& src_space_top,
1115 HeapWord* end_addr);
1117 // Decrement the destination count for each non-empty source region in the
1118 // range [beg_region, region(region_align_up(end_addr))). If the destination
1119 // count for a region goes to 0 and it needs to be filled, enqueue it.
1120 static void decrement_destination_counts(ParCompactionManager* cm,
1121 SpaceId src_space_id,
1122 size_t beg_region,
1123 HeapWord* end_addr);
1125 // Fill a region, copying objects from one or more source regions.
1126 static void fill_region(ParCompactionManager* cm, size_t region_idx);
1127 static void fill_and_update_region(ParCompactionManager* cm, size_t region) {
1128 fill_region(cm, region);
1129 }
1131 // Update the deferred objects in the space.
1132 static void update_deferred_objects(ParCompactionManager* cm, SpaceId id);
1134 static ParMarkBitMap* mark_bitmap() { return &_mark_bitmap; }
1135 static ParallelCompactData& summary_data() { return _summary_data; }
1137 // Reference Processing
1138 static ReferenceProcessor* const ref_processor() { return _ref_processor; }
1140 // Return the SpaceId for the given address.
1141 static SpaceId space_id(HeapWord* addr);
1143 // Time since last full gc (in milliseconds).
1144 static jlong millis_since_last_gc();
1146 static void print_on_error(outputStream* st);
1148 #ifndef PRODUCT
1149 // Debugging support.
1150 static const char* space_names[last_space_id];
1151 static void print_region_ranges();
1152 static void print_dense_prefix_stats(const char* const algorithm,
1153 const SpaceId id,
1154 const bool maximum_compaction,
1155 HeapWord* const addr);
1156 static void summary_phase_msg(SpaceId dst_space_id,
1157 HeapWord* dst_beg, HeapWord* dst_end,
1158 SpaceId src_space_id,
1159 HeapWord* src_beg, HeapWord* src_end);
1160 #endif // #ifndef PRODUCT
1162 #ifdef ASSERT
1163 // Sanity check the new location of a word in the heap.
1164 static inline void check_new_location(HeapWord* old_addr, HeapWord* new_addr);
1165 // Verify that all the regions have been emptied.
1166 static void verify_complete(SpaceId space_id);
1167 #endif // #ifdef ASSERT
1168 };
1170 inline bool PSParallelCompact::mark_obj(oop obj) {
1171 const int obj_size = obj->size();
1172 if (mark_bitmap()->mark_obj(obj, obj_size)) {
1173 _summary_data.add_obj(obj, obj_size);
1174 return true;
1175 } else {
1176 return false;
1177 }
1178 }
1180 inline bool PSParallelCompact::is_marked(oop obj) {
1181 return mark_bitmap()->is_marked(obj);
1182 }
1184 template <class T>
1185 inline void PSParallelCompact::follow_root(ParCompactionManager* cm, T* p) {
1186 assert(!Universe::heap()->is_in_reserved(p),
1187 "roots shouldn't be things within the heap");
1189 T heap_oop = oopDesc::load_heap_oop(p);
1190 if (!oopDesc::is_null(heap_oop)) {
1191 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
1192 if (mark_bitmap()->is_unmarked(obj)) {
1193 if (mark_obj(obj)) {
1194 obj->follow_contents(cm);
1195 }
1196 }
1197 }
1198 cm->follow_marking_stacks();
1199 }
1201 template <class T>
1202 inline void PSParallelCompact::mark_and_push(ParCompactionManager* cm, T* p) {
1203 T heap_oop = oopDesc::load_heap_oop(p);
1204 if (!oopDesc::is_null(heap_oop)) {
1205 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
1206 if (mark_bitmap()->is_unmarked(obj) && mark_obj(obj)) {
1207 cm->push(obj);
1208 }
1209 }
1210 }
1212 template <class T>
1213 inline void PSParallelCompact::adjust_pointer(T* p) {
1214 T heap_oop = oopDesc::load_heap_oop(p);
1215 if (!oopDesc::is_null(heap_oop)) {
1216 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
1217 oop new_obj = (oop)summary_data().calc_new_pointer(obj);
1218 assert(new_obj != NULL, // is forwarding ptr?
1219 "should be forwarded");
1220 // Just always do the update unconditionally?
1221 if (new_obj != NULL) {
1222 assert(Universe::heap()->is_in_reserved(new_obj),
1223 "should be in object space");
1224 oopDesc::encode_store_heap_oop_not_null(p, new_obj);
1225 }
1226 }
1227 }
1229 template <class T>
1230 inline void PSParallelCompact::KeepAliveClosure::do_oop_work(T* p) {
1231 mark_and_push(_compaction_manager, p);
1232 }
1234 inline bool PSParallelCompact::print_phases() {
1235 return _print_phases;
1236 }
1238 inline double PSParallelCompact::normal_distribution(double density) {
1239 assert(_dwl_initialized, "uninitialized");
1240 const double squared_term = (density - _dwl_mean) / _dwl_std_dev;
1241 return _dwl_first_term * exp(-0.5 * squared_term * squared_term);
1242 }
1244 inline bool
1245 PSParallelCompact::dead_space_crosses_boundary(const RegionData* region,
1246 idx_t bit)
1247 {
1248 assert(bit > 0, "cannot call this for the first bit/region");
1249 assert(_summary_data.region_to_addr(region) == _mark_bitmap.bit_to_addr(bit),
1250 "sanity check");
1252 // Dead space crosses the boundary if (1) a partial object does not extend
1253 // onto the region, (2) an object does not start at the beginning of the
1254 // region, and (3) an object does not end at the end of the prior region.
1255 return region->partial_obj_size() == 0 &&
1256 !_mark_bitmap.is_obj_beg(bit) &&
1257 !_mark_bitmap.is_obj_end(bit - 1);
1258 }
1260 inline bool
1261 PSParallelCompact::is_in(HeapWord* p, HeapWord* beg_addr, HeapWord* end_addr) {
1262 return p >= beg_addr && p < end_addr;
1263 }
1265 inline bool
1266 PSParallelCompact::is_in(oop* p, HeapWord* beg_addr, HeapWord* end_addr) {
1267 return is_in((HeapWord*)p, beg_addr, end_addr);
1268 }
1270 inline MutableSpace* PSParallelCompact::space(SpaceId id) {
1271 assert(id < last_space_id, "id out of range");
1272 return _space_info[id].space();
1273 }
1275 inline HeapWord* PSParallelCompact::new_top(SpaceId id) {
1276 assert(id < last_space_id, "id out of range");
1277 return _space_info[id].new_top();
1278 }
1280 inline HeapWord* PSParallelCompact::dense_prefix(SpaceId id) {
1281 assert(id < last_space_id, "id out of range");
1282 return _space_info[id].dense_prefix();
1283 }
1285 inline ObjectStartArray* PSParallelCompact::start_array(SpaceId id) {
1286 assert(id < last_space_id, "id out of range");
1287 return _space_info[id].start_array();
1288 }
1290 #ifdef ASSERT
1291 inline void
1292 PSParallelCompact::check_new_location(HeapWord* old_addr, HeapWord* new_addr)
1293 {
1294 assert(old_addr >= new_addr || space_id(old_addr) != space_id(new_addr),
1295 "must move left or to a different space");
1296 assert(is_object_aligned((intptr_t)old_addr) && is_object_aligned((intptr_t)new_addr),
1297 "checking alignment");
1298 }
1299 #endif // ASSERT
1301 class MoveAndUpdateClosure: public ParMarkBitMapClosure {
1302 public:
1303 inline MoveAndUpdateClosure(ParMarkBitMap* bitmap, ParCompactionManager* cm,
1304 ObjectStartArray* start_array,
1305 HeapWord* destination, size_t words);
1307 // Accessors.
1308 HeapWord* destination() const { return _destination; }
1310 // If the object will fit (size <= words_remaining()), copy it to the current
1311 // destination, update the interior oops and the start array and return either
1312 // full (if the closure is full) or incomplete. If the object will not fit,
1313 // return would_overflow.
1314 virtual IterationStatus do_addr(HeapWord* addr, size_t size);
1316 // Copy enough words to fill this closure, starting at source(). Interior
1317 // oops and the start array are not updated. Return full.
1318 IterationStatus copy_until_full();
1320 // Copy enough words to fill this closure or to the end of an object,
1321 // whichever is smaller, starting at source(). Interior oops and the start
1322 // array are not updated.
1323 void copy_partial_obj();
1325 protected:
1326 // Update variables to indicate that word_count words were processed.
1327 inline void update_state(size_t word_count);
1329 protected:
1330 ObjectStartArray* const _start_array;
1331 HeapWord* _destination; // Next addr to be written.
1332 };
1334 inline
1335 MoveAndUpdateClosure::MoveAndUpdateClosure(ParMarkBitMap* bitmap,
1336 ParCompactionManager* cm,
1337 ObjectStartArray* start_array,
1338 HeapWord* destination,
1339 size_t words) :
1340 ParMarkBitMapClosure(bitmap, cm, words), _start_array(start_array)
1341 {
1342 _destination = destination;
1343 }
1345 inline void MoveAndUpdateClosure::update_state(size_t words)
1346 {
1347 decrement_words_remaining(words);
1348 _source += words;
1349 _destination += words;
1350 }
1352 class UpdateOnlyClosure: public ParMarkBitMapClosure {
1353 private:
1354 const PSParallelCompact::SpaceId _space_id;
1355 ObjectStartArray* const _start_array;
1357 public:
1358 UpdateOnlyClosure(ParMarkBitMap* mbm,
1359 ParCompactionManager* cm,
1360 PSParallelCompact::SpaceId space_id);
1362 // Update the object.
1363 virtual IterationStatus do_addr(HeapWord* addr, size_t words);
1365 inline void do_addr(HeapWord* addr);
1366 };
1368 inline void UpdateOnlyClosure::do_addr(HeapWord* addr)
1369 {
1370 _start_array->allocate_block(addr);
1371 oop(addr)->update_contents(compaction_manager());
1372 }
1374 class FillClosure: public ParMarkBitMapClosure
1375 {
1376 public:
1377 FillClosure(ParCompactionManager* cm, PSParallelCompact::SpaceId space_id) :
1378 ParMarkBitMapClosure(PSParallelCompact::mark_bitmap(), cm),
1379 _start_array(PSParallelCompact::start_array(space_id))
1380 {
1381 assert(space_id == PSParallelCompact::old_space_id,
1382 "cannot use FillClosure in the young gen");
1383 }
1385 virtual IterationStatus do_addr(HeapWord* addr, size_t size) {
1386 CollectedHeap::fill_with_objects(addr, size);
1387 HeapWord* const end = addr + size;
1388 do {
1389 _start_array->allocate_block(addr);
1390 addr += oop(addr)->size();
1391 } while (addr < end);
1392 return ParMarkBitMap::incomplete;
1393 }
1395 private:
1396 ObjectStartArray* const _start_array;
1397 };
1399 #endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPARALLELCOMPACT_HPP