src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp

Thu, 22 Sep 2011 10:57:37 -0700

author
johnc
date
Thu, 22 Sep 2011 10:57:37 -0700
changeset 3175
4dfb2df418f2
parent 2783
eda9eb483d29
child 3298
7913e93dca52
permissions
-rw-r--r--

6484982: G1: process references during evacuation pauses
Summary: G1 now uses two reference processors - one is used by concurrent marking and the other is used by STW GCs (both full and incremental evacuation pauses). In an evacuation pause, the reference processor is embedded into the closures used to scan objects. Doing so causes causes reference objects to be 'discovered' by the reference processor. At the end of the evacuation pause, these discovered reference objects are processed - preserving (and copying) referent objects (and their reachable graphs) as appropriate.
Reviewed-by: ysr, jwilhelm, brutisso, stefank, tonyp

duke@435 1 /*
stefank@2534 2 * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPARALLELCOMPACT_HPP
stefank@2314 26 #define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPARALLELCOMPACT_HPP
stefank@2314 27
stefank@2314 28 #include "gc_implementation/parallelScavenge/objectStartArray.hpp"
stefank@2314 29 #include "gc_implementation/parallelScavenge/parMarkBitMap.hpp"
stefank@2314 30 #include "gc_implementation/parallelScavenge/psCompactionManager.hpp"
stefank@2314 31 #include "gc_implementation/shared/collectorCounters.hpp"
stefank@2314 32 #include "gc_implementation/shared/markSweep.hpp"
stefank@2314 33 #include "gc_implementation/shared/mutableSpace.hpp"
stefank@2314 34 #include "memory/sharedHeap.hpp"
stefank@2314 35 #include "oops/oop.hpp"
stefank@2314 36
duke@435 37 class ParallelScavengeHeap;
duke@435 38 class PSAdaptiveSizePolicy;
duke@435 39 class PSYoungGen;
duke@435 40 class PSOldGen;
duke@435 41 class PSPermGen;
duke@435 42 class ParCompactionManager;
duke@435 43 class ParallelTaskTerminator;
duke@435 44 class PSParallelCompact;
duke@435 45 class GCTaskManager;
duke@435 46 class GCTaskQueue;
duke@435 47 class PreGCValues;
duke@435 48 class MoveAndUpdateClosure;
duke@435 49 class RefProcTaskExecutor;
duke@435 50
jcoomes@917 51 // The SplitInfo class holds the information needed to 'split' a source region
jcoomes@917 52 // so that the live data can be copied to two destination *spaces*. Normally,
jcoomes@917 53 // all the live data in a region is copied to a single destination space (e.g.,
jcoomes@917 54 // everything live in a region in eden is copied entirely into the old gen).
jcoomes@917 55 // However, when the heap is nearly full, all the live data in eden may not fit
jcoomes@917 56 // into the old gen. Copying only some of the regions from eden to old gen
jcoomes@917 57 // requires finding a region that does not contain a partial object (i.e., no
jcoomes@917 58 // live object crosses the region boundary) somewhere near the last object that
jcoomes@917 59 // does fit into the old gen. Since it's not always possible to find such a
jcoomes@917 60 // region, splitting is necessary for predictable behavior.
jcoomes@917 61 //
jcoomes@917 62 // A region is always split at the end of the partial object. This avoids
jcoomes@917 63 // additional tests when calculating the new location of a pointer, which is a
jcoomes@917 64 // very hot code path. The partial object and everything to its left will be
jcoomes@917 65 // copied to another space (call it dest_space_1). The live data to the right
jcoomes@917 66 // of the partial object will be copied either within the space itself, or to a
jcoomes@917 67 // different destination space (distinct from dest_space_1).
jcoomes@917 68 //
jcoomes@917 69 // Split points are identified during the summary phase, when region
jcoomes@917 70 // destinations are computed: data about the split, including the
jcoomes@917 71 // partial_object_size, is recorded in a SplitInfo record and the
jcoomes@917 72 // partial_object_size field in the summary data is set to zero. The zeroing is
jcoomes@917 73 // possible (and necessary) since the partial object will move to a different
jcoomes@917 74 // destination space than anything to its right, thus the partial object should
jcoomes@917 75 // not affect the locations of any objects to its right.
jcoomes@917 76 //
jcoomes@917 77 // The recorded data is used during the compaction phase, but only rarely: when
jcoomes@917 78 // the partial object on the split region will be copied across a destination
jcoomes@917 79 // region boundary. This test is made once each time a region is filled, and is
jcoomes@917 80 // a simple address comparison, so the overhead is negligible (see
jcoomes@917 81 // PSParallelCompact::first_src_addr()).
jcoomes@917 82 //
jcoomes@917 83 // Notes:
jcoomes@917 84 //
jcoomes@917 85 // Only regions with partial objects are split; a region without a partial
jcoomes@917 86 // object does not need any extra bookkeeping.
jcoomes@917 87 //
jcoomes@917 88 // At most one region is split per space, so the amount of data required is
jcoomes@917 89 // constant.
jcoomes@917 90 //
jcoomes@917 91 // A region is split only when the destination space would overflow. Once that
jcoomes@917 92 // happens, the destination space is abandoned and no other data (even from
jcoomes@917 93 // other source spaces) is targeted to that destination space. Abandoning the
jcoomes@917 94 // destination space may leave a somewhat large unused area at the end, if a
jcoomes@917 95 // large object caused the overflow.
jcoomes@917 96 //
jcoomes@917 97 // Future work:
jcoomes@917 98 //
jcoomes@917 99 // More bookkeeping would be required to continue to use the destination space.
jcoomes@917 100 // The most general solution would allow data from regions in two different
jcoomes@917 101 // source spaces to be "joined" in a single destination region. At the very
jcoomes@917 102 // least, additional code would be required in next_src_region() to detect the
jcoomes@917 103 // join and skip to an out-of-order source region. If the join region was also
jcoomes@917 104 // the last destination region to which a split region was copied (the most
jcoomes@917 105 // likely case), then additional work would be needed to get fill_region() to
jcoomes@917 106 // stop iteration and switch to a new source region at the right point. Basic
jcoomes@917 107 // idea would be to use a fake value for the top of the source space. It is
jcoomes@917 108 // doable, if a bit tricky.
jcoomes@917 109 //
jcoomes@917 110 // A simpler (but less general) solution would fill the remainder of the
jcoomes@917 111 // destination region with a dummy object and continue filling the next
jcoomes@917 112 // destination region.
jcoomes@917 113
jcoomes@917 114 class SplitInfo
jcoomes@917 115 {
jcoomes@917 116 public:
jcoomes@917 117 // Return true if this split info is valid (i.e., if a split has been
jcoomes@917 118 // recorded). The very first region cannot have a partial object and thus is
jcoomes@917 119 // never split, so 0 is the 'invalid' value.
jcoomes@917 120 bool is_valid() const { return _src_region_idx > 0; }
jcoomes@917 121
jcoomes@917 122 // Return true if this split holds data for the specified source region.
jcoomes@917 123 inline bool is_split(size_t source_region) const;
jcoomes@917 124
jcoomes@917 125 // The index of the split region, the size of the partial object on that
jcoomes@917 126 // region and the destination of the partial object.
jcoomes@917 127 size_t src_region_idx() const { return _src_region_idx; }
jcoomes@917 128 size_t partial_obj_size() const { return _partial_obj_size; }
jcoomes@917 129 HeapWord* destination() const { return _destination; }
jcoomes@917 130
jcoomes@917 131 // The destination count of the partial object referenced by this split
jcoomes@917 132 // (either 1 or 2). This must be added to the destination count of the
jcoomes@917 133 // remainder of the source region.
jcoomes@917 134 unsigned int destination_count() const { return _destination_count; }
jcoomes@917 135
jcoomes@917 136 // If a word within the partial object will be written to the first word of a
jcoomes@917 137 // destination region, this is the address of the destination region;
jcoomes@917 138 // otherwise this is NULL.
jcoomes@917 139 HeapWord* dest_region_addr() const { return _dest_region_addr; }
jcoomes@917 140
jcoomes@917 141 // If a word within the partial object will be written to the first word of a
jcoomes@917 142 // destination region, this is the address of that word within the partial
jcoomes@917 143 // object; otherwise this is NULL.
jcoomes@917 144 HeapWord* first_src_addr() const { return _first_src_addr; }
jcoomes@917 145
jcoomes@917 146 // Record the data necessary to split the region src_region_idx.
jcoomes@917 147 void record(size_t src_region_idx, size_t partial_obj_size,
jcoomes@917 148 HeapWord* destination);
jcoomes@917 149
jcoomes@917 150 void clear();
jcoomes@917 151
jcoomes@917 152 DEBUG_ONLY(void verify_clear();)
jcoomes@917 153
jcoomes@917 154 private:
jcoomes@917 155 size_t _src_region_idx;
jcoomes@917 156 size_t _partial_obj_size;
jcoomes@917 157 HeapWord* _destination;
jcoomes@917 158 unsigned int _destination_count;
jcoomes@917 159 HeapWord* _dest_region_addr;
jcoomes@917 160 HeapWord* _first_src_addr;
jcoomes@917 161 };
jcoomes@917 162
jcoomes@917 163 inline bool SplitInfo::is_split(size_t region_idx) const
jcoomes@917 164 {
jcoomes@917 165 return _src_region_idx == region_idx && is_valid();
jcoomes@917 166 }
jcoomes@917 167
duke@435 168 class SpaceInfo
duke@435 169 {
duke@435 170 public:
duke@435 171 MutableSpace* space() const { return _space; }
duke@435 172
duke@435 173 // Where the free space will start after the collection. Valid only after the
duke@435 174 // summary phase completes.
duke@435 175 HeapWord* new_top() const { return _new_top; }
duke@435 176
duke@435 177 // Allows new_top to be set.
duke@435 178 HeapWord** new_top_addr() { return &_new_top; }
duke@435 179
duke@435 180 // Where the smallest allowable dense prefix ends (used only for perm gen).
duke@435 181 HeapWord* min_dense_prefix() const { return _min_dense_prefix; }
duke@435 182
duke@435 183 // Where the dense prefix ends, or the compacted region begins.
duke@435 184 HeapWord* dense_prefix() const { return _dense_prefix; }
duke@435 185
duke@435 186 // The start array for the (generation containing the) space, or NULL if there
duke@435 187 // is no start array.
duke@435 188 ObjectStartArray* start_array() const { return _start_array; }
duke@435 189
jcoomes@917 190 SplitInfo& split_info() { return _split_info; }
jcoomes@917 191
duke@435 192 void set_space(MutableSpace* s) { _space = s; }
duke@435 193 void set_new_top(HeapWord* addr) { _new_top = addr; }
duke@435 194 void set_min_dense_prefix(HeapWord* addr) { _min_dense_prefix = addr; }
duke@435 195 void set_dense_prefix(HeapWord* addr) { _dense_prefix = addr; }
duke@435 196 void set_start_array(ObjectStartArray* s) { _start_array = s; }
duke@435 197
jcoomes@917 198 void publish_new_top() const { _space->set_top(_new_top); }
jcoomes@917 199
duke@435 200 private:
duke@435 201 MutableSpace* _space;
duke@435 202 HeapWord* _new_top;
duke@435 203 HeapWord* _min_dense_prefix;
duke@435 204 HeapWord* _dense_prefix;
duke@435 205 ObjectStartArray* _start_array;
jcoomes@917 206 SplitInfo _split_info;
duke@435 207 };
duke@435 208
duke@435 209 class ParallelCompactData
duke@435 210 {
duke@435 211 public:
duke@435 212 // Sizes are in HeapWords, unless indicated otherwise.
jcoomes@810 213 static const size_t Log2RegionSize;
jcoomes@810 214 static const size_t RegionSize;
jcoomes@810 215 static const size_t RegionSizeBytes;
duke@435 216
jcoomes@810 217 // Mask for the bits in a size_t to get an offset within a region.
jcoomes@810 218 static const size_t RegionSizeOffsetMask;
jcoomes@810 219 // Mask for the bits in a pointer to get an offset within a region.
jcoomes@810 220 static const size_t RegionAddrOffsetMask;
jcoomes@810 221 // Mask for the bits in a pointer to get the address of the start of a region.
jcoomes@810 222 static const size_t RegionAddrMask;
duke@435 223
jcoomes@810 224 class RegionData
duke@435 225 {
duke@435 226 public:
jcoomes@810 227 // Destination address of the region.
duke@435 228 HeapWord* destination() const { return _destination; }
duke@435 229
jcoomes@810 230 // The first region containing data destined for this region.
jcoomes@810 231 size_t source_region() const { return _source_region; }
duke@435 232
jcoomes@810 233 // The object (if any) starting in this region and ending in a different
jcoomes@810 234 // region that could not be updated during the main (parallel) compaction
duke@435 235 // phase. This is different from _partial_obj_addr, which is an object that
jcoomes@810 236 // extends onto a source region. However, the two uses do not overlap in
duke@435 237 // time, so the same field is used to save space.
duke@435 238 HeapWord* deferred_obj_addr() const { return _partial_obj_addr; }
duke@435 239
jcoomes@810 240 // The starting address of the partial object extending onto the region.
duke@435 241 HeapWord* partial_obj_addr() const { return _partial_obj_addr; }
duke@435 242
jcoomes@810 243 // Size of the partial object extending onto the region (words).
duke@435 244 size_t partial_obj_size() const { return _partial_obj_size; }
duke@435 245
jcoomes@810 246 // Size of live data that lies within this region due to objects that start
jcoomes@810 247 // in this region (words). This does not include the partial object
jcoomes@810 248 // extending onto the region (if any), or the part of an object that extends
jcoomes@810 249 // onto the next region (if any).
duke@435 250 size_t live_obj_size() const { return _dc_and_los & los_mask; }
duke@435 251
jcoomes@810 252 // Total live data that lies within the region (words).
duke@435 253 size_t data_size() const { return partial_obj_size() + live_obj_size(); }
duke@435 254
jcoomes@810 255 // The destination_count is the number of other regions to which data from
jcoomes@810 256 // this region will be copied. At the end of the summary phase, the valid
duke@435 257 // values of destination_count are
duke@435 258 //
jcoomes@810 259 // 0 - data from the region will be compacted completely into itself, or the
jcoomes@810 260 // region is empty. The region can be claimed and then filled.
jcoomes@810 261 // 1 - data from the region will be compacted into 1 other region; some
jcoomes@810 262 // data from the region may also be compacted into the region itself.
jcoomes@810 263 // 2 - data from the region will be copied to 2 other regions.
duke@435 264 //
jcoomes@810 265 // During compaction as regions are emptied, the destination_count is
duke@435 266 // decremented (atomically) and when it reaches 0, it can be claimed and
duke@435 267 // then filled.
duke@435 268 //
jcoomes@810 269 // A region is claimed for processing by atomically changing the
jcoomes@810 270 // destination_count to the claimed value (dc_claimed). After a region has
duke@435 271 // been filled, the destination_count should be set to the completed value
duke@435 272 // (dc_completed).
duke@435 273 inline uint destination_count() const;
duke@435 274 inline uint destination_count_raw() const;
duke@435 275
jcoomes@810 276 // The location of the java heap data that corresponds to this region.
duke@435 277 inline HeapWord* data_location() const;
duke@435 278
jcoomes@810 279 // The highest address referenced by objects in this region.
duke@435 280 inline HeapWord* highest_ref() const;
duke@435 281
jcoomes@810 282 // Whether this region is available to be claimed, has been claimed, or has
duke@435 283 // been completed.
duke@435 284 //
jcoomes@810 285 // Minor subtlety: claimed() returns true if the region is marked
jcoomes@810 286 // completed(), which is desirable since a region must be claimed before it
duke@435 287 // can be completed.
duke@435 288 bool available() const { return _dc_and_los < dc_one; }
duke@435 289 bool claimed() const { return _dc_and_los >= dc_claimed; }
duke@435 290 bool completed() const { return _dc_and_los >= dc_completed; }
duke@435 291
duke@435 292 // These are not atomic.
duke@435 293 void set_destination(HeapWord* addr) { _destination = addr; }
jcoomes@810 294 void set_source_region(size_t region) { _source_region = region; }
duke@435 295 void set_deferred_obj_addr(HeapWord* addr) { _partial_obj_addr = addr; }
duke@435 296 void set_partial_obj_addr(HeapWord* addr) { _partial_obj_addr = addr; }
duke@435 297 void set_partial_obj_size(size_t words) {
jcoomes@810 298 _partial_obj_size = (region_sz_t) words;
duke@435 299 }
duke@435 300
duke@435 301 inline void set_destination_count(uint count);
duke@435 302 inline void set_live_obj_size(size_t words);
duke@435 303 inline void set_data_location(HeapWord* addr);
duke@435 304 inline void set_completed();
duke@435 305 inline bool claim_unsafe();
duke@435 306
duke@435 307 // These are atomic.
duke@435 308 inline void add_live_obj(size_t words);
duke@435 309 inline void set_highest_ref(HeapWord* addr);
duke@435 310 inline void decrement_destination_count();
duke@435 311 inline bool claim();
duke@435 312
duke@435 313 private:
jcoomes@810 314 // The type used to represent object sizes within a region.
jcoomes@810 315 typedef uint region_sz_t;
duke@435 316
duke@435 317 // Constants for manipulating the _dc_and_los field, which holds both the
duke@435 318 // destination count and live obj size. The live obj size lives at the
duke@435 319 // least significant end so no masking is necessary when adding.
jcoomes@810 320 static const region_sz_t dc_shift; // Shift amount.
jcoomes@810 321 static const region_sz_t dc_mask; // Mask for destination count.
jcoomes@810 322 static const region_sz_t dc_one; // 1, shifted appropriately.
jcoomes@810 323 static const region_sz_t dc_claimed; // Region has been claimed.
jcoomes@810 324 static const region_sz_t dc_completed; // Region has been completed.
jcoomes@810 325 static const region_sz_t los_mask; // Mask for live obj size.
duke@435 326
jcoomes@810 327 HeapWord* _destination;
jcoomes@810 328 size_t _source_region;
jcoomes@810 329 HeapWord* _partial_obj_addr;
jcoomes@810 330 region_sz_t _partial_obj_size;
jcoomes@810 331 region_sz_t volatile _dc_and_los;
duke@435 332 #ifdef ASSERT
duke@435 333 // These enable optimizations that are only partially implemented. Use
duke@435 334 // debug builds to prevent the code fragments from breaking.
jcoomes@810 335 HeapWord* _data_location;
jcoomes@810 336 HeapWord* _highest_ref;
duke@435 337 #endif // #ifdef ASSERT
duke@435 338
duke@435 339 #ifdef ASSERT
duke@435 340 public:
jcoomes@810 341 uint _pushed; // 0 until region is pushed onto a worker's stack
duke@435 342 private:
duke@435 343 #endif
duke@435 344 };
duke@435 345
duke@435 346 public:
duke@435 347 ParallelCompactData();
duke@435 348 bool initialize(MemRegion covered_region);
duke@435 349
jcoomes@810 350 size_t region_count() const { return _region_count; }
duke@435 351
jcoomes@810 352 // Convert region indices to/from RegionData pointers.
jcoomes@810 353 inline RegionData* region(size_t region_idx) const;
jcoomes@810 354 inline size_t region(const RegionData* const region_ptr) const;
duke@435 355
jcoomes@810 356 // Returns true if the given address is contained within the region
jcoomes@810 357 bool region_contains(size_t region_index, HeapWord* addr);
duke@435 358
duke@435 359 void add_obj(HeapWord* addr, size_t len);
duke@435 360 void add_obj(oop p, size_t len) { add_obj((HeapWord*)p, len); }
duke@435 361
jcoomes@810 362 // Fill in the regions covering [beg, end) so that no data moves; i.e., the
jcoomes@810 363 // destination of region n is simply the start of region n. The argument beg
jcoomes@810 364 // must be region-aligned; end need not be.
duke@435 365 void summarize_dense_prefix(HeapWord* beg, HeapWord* end);
duke@435 366
jcoomes@917 367 HeapWord* summarize_split_space(size_t src_region, SplitInfo& split_info,
jcoomes@917 368 HeapWord* destination, HeapWord* target_end,
jcoomes@917 369 HeapWord** target_next);
jcoomes@917 370 bool summarize(SplitInfo& split_info,
duke@435 371 HeapWord* source_beg, HeapWord* source_end,
jcoomes@917 372 HeapWord** source_next,
jcoomes@917 373 HeapWord* target_beg, HeapWord* target_end,
jcoomes@917 374 HeapWord** target_next);
duke@435 375
duke@435 376 void clear();
jcoomes@810 377 void clear_range(size_t beg_region, size_t end_region);
duke@435 378 void clear_range(HeapWord* beg, HeapWord* end) {
jcoomes@810 379 clear_range(addr_to_region_idx(beg), addr_to_region_idx(end));
duke@435 380 }
duke@435 381
jcoomes@810 382 // Return the number of words between addr and the start of the region
duke@435 383 // containing addr.
jcoomes@810 384 inline size_t region_offset(const HeapWord* addr) const;
duke@435 385
jcoomes@810 386 // Convert addresses to/from a region index or region pointer.
jcoomes@810 387 inline size_t addr_to_region_idx(const HeapWord* addr) const;
jcoomes@810 388 inline RegionData* addr_to_region_ptr(const HeapWord* addr) const;
jcoomes@810 389 inline HeapWord* region_to_addr(size_t region) const;
jcoomes@810 390 inline HeapWord* region_to_addr(size_t region, size_t offset) const;
jcoomes@810 391 inline HeapWord* region_to_addr(const RegionData* region) const;
duke@435 392
jcoomes@810 393 inline HeapWord* region_align_down(HeapWord* addr) const;
jcoomes@810 394 inline HeapWord* region_align_up(HeapWord* addr) const;
jcoomes@810 395 inline bool is_region_aligned(HeapWord* addr) const;
duke@435 396
duke@435 397 // Return the address one past the end of the partial object.
jcoomes@810 398 HeapWord* partial_obj_end(size_t region_idx) const;
duke@435 399
duke@435 400 // Return the new location of the object p after the
duke@435 401 // the compaction.
duke@435 402 HeapWord* calc_new_pointer(HeapWord* addr);
duke@435 403
duke@435 404 HeapWord* calc_new_pointer(oop p) {
duke@435 405 return calc_new_pointer((HeapWord*) p);
duke@435 406 }
duke@435 407
duke@435 408 // Return the updated address for the given klass
duke@435 409 klassOop calc_new_klass(klassOop);
duke@435 410
duke@435 411 #ifdef ASSERT
duke@435 412 void verify_clear(const PSVirtualSpace* vspace);
duke@435 413 void verify_clear();
duke@435 414 #endif // #ifdef ASSERT
duke@435 415
duke@435 416 private:
jcoomes@810 417 bool initialize_region_data(size_t region_size);
duke@435 418 PSVirtualSpace* create_vspace(size_t count, size_t element_size);
duke@435 419
duke@435 420 private:
duke@435 421 HeapWord* _region_start;
duke@435 422 #ifdef ASSERT
duke@435 423 HeapWord* _region_end;
duke@435 424 #endif // #ifdef ASSERT
duke@435 425
jcoomes@810 426 PSVirtualSpace* _region_vspace;
jcoomes@810 427 RegionData* _region_data;
jcoomes@810 428 size_t _region_count;
duke@435 429 };
duke@435 430
duke@435 431 inline uint
jcoomes@810 432 ParallelCompactData::RegionData::destination_count_raw() const
duke@435 433 {
duke@435 434 return _dc_and_los & dc_mask;
duke@435 435 }
duke@435 436
duke@435 437 inline uint
jcoomes@810 438 ParallelCompactData::RegionData::destination_count() const
duke@435 439 {
duke@435 440 return destination_count_raw() >> dc_shift;
duke@435 441 }
duke@435 442
duke@435 443 inline void
jcoomes@810 444 ParallelCompactData::RegionData::set_destination_count(uint count)
duke@435 445 {
duke@435 446 assert(count <= (dc_completed >> dc_shift), "count too large");
jcoomes@810 447 const region_sz_t live_sz = (region_sz_t) live_obj_size();
duke@435 448 _dc_and_los = (count << dc_shift) | live_sz;
duke@435 449 }
duke@435 450
jcoomes@810 451 inline void ParallelCompactData::RegionData::set_live_obj_size(size_t words)
duke@435 452 {
duke@435 453 assert(words <= los_mask, "would overflow");
jcoomes@810 454 _dc_and_los = destination_count_raw() | (region_sz_t)words;
duke@435 455 }
duke@435 456
jcoomes@810 457 inline void ParallelCompactData::RegionData::decrement_destination_count()
duke@435 458 {
duke@435 459 assert(_dc_and_los < dc_claimed, "already claimed");
duke@435 460 assert(_dc_and_los >= dc_one, "count would go negative");
duke@435 461 Atomic::add((int)dc_mask, (volatile int*)&_dc_and_los);
duke@435 462 }
duke@435 463
jcoomes@810 464 inline HeapWord* ParallelCompactData::RegionData::data_location() const
duke@435 465 {
duke@435 466 DEBUG_ONLY(return _data_location;)
duke@435 467 NOT_DEBUG(return NULL;)
duke@435 468 }
duke@435 469
jcoomes@810 470 inline HeapWord* ParallelCompactData::RegionData::highest_ref() const
duke@435 471 {
duke@435 472 DEBUG_ONLY(return _highest_ref;)
duke@435 473 NOT_DEBUG(return NULL;)
duke@435 474 }
duke@435 475
jcoomes@810 476 inline void ParallelCompactData::RegionData::set_data_location(HeapWord* addr)
duke@435 477 {
duke@435 478 DEBUG_ONLY(_data_location = addr;)
duke@435 479 }
duke@435 480
jcoomes@810 481 inline void ParallelCompactData::RegionData::set_completed()
duke@435 482 {
duke@435 483 assert(claimed(), "must be claimed first");
jcoomes@810 484 _dc_and_los = dc_completed | (region_sz_t) live_obj_size();
duke@435 485 }
duke@435 486
jcoomes@810 487 // MT-unsafe claiming of a region. Should only be used during single threaded
duke@435 488 // execution.
jcoomes@810 489 inline bool ParallelCompactData::RegionData::claim_unsafe()
duke@435 490 {
duke@435 491 if (available()) {
duke@435 492 _dc_and_los |= dc_claimed;
duke@435 493 return true;
duke@435 494 }
duke@435 495 return false;
duke@435 496 }
duke@435 497
jcoomes@810 498 inline void ParallelCompactData::RegionData::add_live_obj(size_t words)
duke@435 499 {
duke@435 500 assert(words <= (size_t)los_mask - live_obj_size(), "overflow");
duke@435 501 Atomic::add((int) words, (volatile int*) &_dc_and_los);
duke@435 502 }
duke@435 503
jcoomes@810 504 inline void ParallelCompactData::RegionData::set_highest_ref(HeapWord* addr)
duke@435 505 {
duke@435 506 #ifdef ASSERT
duke@435 507 HeapWord* tmp = _highest_ref;
duke@435 508 while (addr > tmp) {
duke@435 509 tmp = (HeapWord*)Atomic::cmpxchg_ptr(addr, &_highest_ref, tmp);
duke@435 510 }
duke@435 511 #endif // #ifdef ASSERT
duke@435 512 }
duke@435 513
jcoomes@810 514 inline bool ParallelCompactData::RegionData::claim()
duke@435 515 {
duke@435 516 const int los = (int) live_obj_size();
duke@435 517 const int old = Atomic::cmpxchg(dc_claimed | los,
duke@435 518 (volatile int*) &_dc_and_los, los);
duke@435 519 return old == los;
duke@435 520 }
duke@435 521
jcoomes@810 522 inline ParallelCompactData::RegionData*
jcoomes@810 523 ParallelCompactData::region(size_t region_idx) const
duke@435 524 {
jcoomes@810 525 assert(region_idx <= region_count(), "bad arg");
jcoomes@810 526 return _region_data + region_idx;
duke@435 527 }
duke@435 528
duke@435 529 inline size_t
jcoomes@810 530 ParallelCompactData::region(const RegionData* const region_ptr) const
duke@435 531 {
jcoomes@810 532 assert(region_ptr >= _region_data, "bad arg");
jcoomes@810 533 assert(region_ptr <= _region_data + region_count(), "bad arg");
jcoomes@810 534 return pointer_delta(region_ptr, _region_data, sizeof(RegionData));
duke@435 535 }
duke@435 536
duke@435 537 inline size_t
jcoomes@810 538 ParallelCompactData::region_offset(const HeapWord* addr) const
duke@435 539 {
duke@435 540 assert(addr >= _region_start, "bad addr");
duke@435 541 assert(addr <= _region_end, "bad addr");
jcoomes@810 542 return (size_t(addr) & RegionAddrOffsetMask) >> LogHeapWordSize;
duke@435 543 }
duke@435 544
duke@435 545 inline size_t
jcoomes@810 546 ParallelCompactData::addr_to_region_idx(const HeapWord* addr) const
duke@435 547 {
duke@435 548 assert(addr >= _region_start, "bad addr");
duke@435 549 assert(addr <= _region_end, "bad addr");
jcoomes@810 550 return pointer_delta(addr, _region_start) >> Log2RegionSize;
duke@435 551 }
duke@435 552
jcoomes@810 553 inline ParallelCompactData::RegionData*
jcoomes@810 554 ParallelCompactData::addr_to_region_ptr(const HeapWord* addr) const
duke@435 555 {
jcoomes@810 556 return region(addr_to_region_idx(addr));
duke@435 557 }
duke@435 558
duke@435 559 inline HeapWord*
jcoomes@810 560 ParallelCompactData::region_to_addr(size_t region) const
duke@435 561 {
jcoomes@810 562 assert(region <= _region_count, "region out of range");
jcoomes@810 563 return _region_start + (region << Log2RegionSize);
duke@435 564 }
duke@435 565
duke@435 566 inline HeapWord*
jcoomes@810 567 ParallelCompactData::region_to_addr(const RegionData* region) const
duke@435 568 {
jcoomes@810 569 return region_to_addr(pointer_delta(region, _region_data,
jcoomes@810 570 sizeof(RegionData)));
duke@435 571 }
duke@435 572
duke@435 573 inline HeapWord*
jcoomes@810 574 ParallelCompactData::region_to_addr(size_t region, size_t offset) const
duke@435 575 {
jcoomes@810 576 assert(region <= _region_count, "region out of range");
jcoomes@810 577 assert(offset < RegionSize, "offset too big"); // This may be too strict.
jcoomes@810 578 return region_to_addr(region) + offset;
duke@435 579 }
duke@435 580
duke@435 581 inline HeapWord*
jcoomes@810 582 ParallelCompactData::region_align_down(HeapWord* addr) const
duke@435 583 {
duke@435 584 assert(addr >= _region_start, "bad addr");
jcoomes@810 585 assert(addr < _region_end + RegionSize, "bad addr");
jcoomes@810 586 return (HeapWord*)(size_t(addr) & RegionAddrMask);
duke@435 587 }
duke@435 588
duke@435 589 inline HeapWord*
jcoomes@810 590 ParallelCompactData::region_align_up(HeapWord* addr) const
duke@435 591 {
duke@435 592 assert(addr >= _region_start, "bad addr");
duke@435 593 assert(addr <= _region_end, "bad addr");
jcoomes@810 594 return region_align_down(addr + RegionSizeOffsetMask);
duke@435 595 }
duke@435 596
duke@435 597 inline bool
jcoomes@810 598 ParallelCompactData::is_region_aligned(HeapWord* addr) const
duke@435 599 {
jcoomes@810 600 return region_offset(addr) == 0;
duke@435 601 }
duke@435 602
duke@435 603 // Abstract closure for use with ParMarkBitMap::iterate(), which will invoke the
duke@435 604 // do_addr() method.
duke@435 605 //
duke@435 606 // The closure is initialized with the number of heap words to process
duke@435 607 // (words_remaining()), and becomes 'full' when it reaches 0. The do_addr()
duke@435 608 // methods in subclasses should update the total as words are processed. Since
duke@435 609 // only one subclass actually uses this mechanism to terminate iteration, the
duke@435 610 // default initial value is > 0. The implementation is here and not in the
duke@435 611 // single subclass that uses it to avoid making is_full() virtual, and thus
duke@435 612 // adding a virtual call per live object.
duke@435 613
duke@435 614 class ParMarkBitMapClosure: public StackObj {
duke@435 615 public:
duke@435 616 typedef ParMarkBitMap::idx_t idx_t;
duke@435 617 typedef ParMarkBitMap::IterationStatus IterationStatus;
duke@435 618
duke@435 619 public:
duke@435 620 inline ParMarkBitMapClosure(ParMarkBitMap* mbm, ParCompactionManager* cm,
duke@435 621 size_t words = max_uintx);
duke@435 622
duke@435 623 inline ParCompactionManager* compaction_manager() const;
duke@435 624 inline ParMarkBitMap* bitmap() const;
duke@435 625 inline size_t words_remaining() const;
duke@435 626 inline bool is_full() const;
duke@435 627 inline HeapWord* source() const;
duke@435 628
duke@435 629 inline void set_source(HeapWord* addr);
duke@435 630
duke@435 631 virtual IterationStatus do_addr(HeapWord* addr, size_t words) = 0;
duke@435 632
duke@435 633 protected:
duke@435 634 inline void decrement_words_remaining(size_t words);
duke@435 635
duke@435 636 private:
duke@435 637 ParMarkBitMap* const _bitmap;
duke@435 638 ParCompactionManager* const _compaction_manager;
duke@435 639 DEBUG_ONLY(const size_t _initial_words_remaining;) // Useful in debugger.
duke@435 640 size_t _words_remaining; // Words left to copy.
duke@435 641
duke@435 642 protected:
duke@435 643 HeapWord* _source; // Next addr that would be read.
duke@435 644 };
duke@435 645
duke@435 646 inline
duke@435 647 ParMarkBitMapClosure::ParMarkBitMapClosure(ParMarkBitMap* bitmap,
duke@435 648 ParCompactionManager* cm,
duke@435 649 size_t words):
duke@435 650 _bitmap(bitmap), _compaction_manager(cm)
duke@435 651 #ifdef ASSERT
duke@435 652 , _initial_words_remaining(words)
duke@435 653 #endif
duke@435 654 {
duke@435 655 _words_remaining = words;
duke@435 656 _source = NULL;
duke@435 657 }
duke@435 658
duke@435 659 inline ParCompactionManager* ParMarkBitMapClosure::compaction_manager() const {
duke@435 660 return _compaction_manager;
duke@435 661 }
duke@435 662
duke@435 663 inline ParMarkBitMap* ParMarkBitMapClosure::bitmap() const {
duke@435 664 return _bitmap;
duke@435 665 }
duke@435 666
duke@435 667 inline size_t ParMarkBitMapClosure::words_remaining() const {
duke@435 668 return _words_remaining;
duke@435 669 }
duke@435 670
duke@435 671 inline bool ParMarkBitMapClosure::is_full() const {
duke@435 672 return words_remaining() == 0;
duke@435 673 }
duke@435 674
duke@435 675 inline HeapWord* ParMarkBitMapClosure::source() const {
duke@435 676 return _source;
duke@435 677 }
duke@435 678
duke@435 679 inline void ParMarkBitMapClosure::set_source(HeapWord* addr) {
duke@435 680 _source = addr;
duke@435 681 }
duke@435 682
duke@435 683 inline void ParMarkBitMapClosure::decrement_words_remaining(size_t words) {
duke@435 684 assert(_words_remaining >= words, "processed too many words");
duke@435 685 _words_remaining -= words;
duke@435 686 }
duke@435 687
jcoomes@810 688 // The UseParallelOldGC collector is a stop-the-world garbage collector that
jcoomes@810 689 // does parts of the collection using parallel threads. The collection includes
jcoomes@810 690 // the tenured generation and the young generation. The permanent generation is
jcoomes@810 691 // collected at the same time as the other two generations but the permanent
jcoomes@810 692 // generation is collect by a single GC thread. The permanent generation is
jcoomes@810 693 // collected serially because of the requirement that during the processing of a
jcoomes@810 694 // klass AAA, any objects reference by AAA must already have been processed.
jcoomes@810 695 // This requirement is enforced by a left (lower address) to right (higher
jcoomes@810 696 // address) sliding compaction.
jmasa@698 697 //
jmasa@698 698 // There are four phases of the collection.
jmasa@698 699 //
jmasa@698 700 // - marking phase
jmasa@698 701 // - summary phase
jmasa@698 702 // - compacting phase
jmasa@698 703 // - clean up phase
jmasa@698 704 //
jmasa@698 705 // Roughly speaking these phases correspond, respectively, to
jmasa@698 706 // - mark all the live objects
jmasa@698 707 // - calculate the destination of each object at the end of the collection
jmasa@698 708 // - move the objects to their destination
jmasa@698 709 // - update some references and reinitialize some variables
jmasa@698 710 //
jcoomes@810 711 // These three phases are invoked in PSParallelCompact::invoke_no_policy(). The
jcoomes@810 712 // marking phase is implemented in PSParallelCompact::marking_phase() and does a
jcoomes@810 713 // complete marking of the heap. The summary phase is implemented in
jcoomes@810 714 // PSParallelCompact::summary_phase(). The move and update phase is implemented
jcoomes@810 715 // in PSParallelCompact::compact().
jmasa@698 716 //
jcoomes@810 717 // A space that is being collected is divided into regions and with each region
jcoomes@810 718 // is associated an object of type ParallelCompactData. Each region is of a
jcoomes@810 719 // fixed size and typically will contain more than 1 object and may have parts
jcoomes@810 720 // of objects at the front and back of the region.
jmasa@698 721 //
jcoomes@810 722 // region -----+---------------------+----------
jmasa@698 723 // objects covered [ AAA )[ BBB )[ CCC )[ DDD )
jmasa@698 724 //
jcoomes@810 725 // The marking phase does a complete marking of all live objects in the heap.
jcoomes@810 726 // The marking also compiles the size of the data for all live objects covered
jcoomes@810 727 // by the region. This size includes the part of any live object spanning onto
jcoomes@810 728 // the region (part of AAA if it is live) from the front, all live objects
jcoomes@810 729 // contained in the region (BBB and/or CCC if they are live), and the part of
jcoomes@810 730 // any live objects covered by the region that extends off the region (part of
jcoomes@810 731 // DDD if it is live). The marking phase uses multiple GC threads and marking
jcoomes@810 732 // is done in a bit array of type ParMarkBitMap. The marking of the bit map is
jcoomes@810 733 // done atomically as is the accumulation of the size of the live objects
jcoomes@810 734 // covered by a region.
jmasa@698 735 //
jcoomes@810 736 // The summary phase calculates the total live data to the left of each region
jcoomes@810 737 // XXX. Based on that total and the bottom of the space, it can calculate the
jcoomes@810 738 // starting location of the live data in XXX. The summary phase calculates for
jcoomes@810 739 // each region XXX quantites such as
jmasa@698 740 //
jcoomes@810 741 // - the amount of live data at the beginning of a region from an object
jcoomes@810 742 // entering the region.
jcoomes@810 743 // - the location of the first live data on the region
jcoomes@810 744 // - a count of the number of regions receiving live data from XXX.
jmasa@698 745 //
jmasa@698 746 // See ParallelCompactData for precise details. The summary phase also
jcoomes@810 747 // calculates the dense prefix for the compaction. The dense prefix is a
jcoomes@810 748 // portion at the beginning of the space that is not moved. The objects in the
jcoomes@810 749 // dense prefix do need to have their object references updated. See method
jcoomes@810 750 // summarize_dense_prefix().
jmasa@698 751 //
jmasa@698 752 // The summary phase is done using 1 GC thread.
jmasa@698 753 //
jcoomes@810 754 // The compaction phase moves objects to their new location and updates all
jcoomes@810 755 // references in the object.
jmasa@698 756 //
jcoomes@810 757 // A current exception is that objects that cross a region boundary are moved
jcoomes@810 758 // but do not have their references updated. References are not updated because
jcoomes@810 759 // it cannot easily be determined if the klass pointer KKK for the object AAA
jcoomes@810 760 // has been updated. KKK likely resides in a region to the left of the region
jcoomes@810 761 // containing AAA. These AAA's have there references updated at the end in a
jcoomes@810 762 // clean up phase. See the method PSParallelCompact::update_deferred_objects().
jcoomes@810 763 // An alternate strategy is being investigated for this deferral of updating.
jmasa@698 764 //
jcoomes@810 765 // Compaction is done on a region basis. A region that is ready to be filled is
jcoomes@810 766 // put on a ready list and GC threads take region off the list and fill them. A
jcoomes@810 767 // region is ready to be filled if it empty of live objects. Such a region may
jcoomes@810 768 // have been initially empty (only contained dead objects) or may have had all
jcoomes@810 769 // its live objects copied out already. A region that compacts into itself is
jcoomes@810 770 // also ready for filling. The ready list is initially filled with empty
jcoomes@810 771 // regions and regions compacting into themselves. There is always at least 1
jcoomes@810 772 // region that can be put on the ready list. The regions are atomically added
jcoomes@810 773 // and removed from the ready list.
jcoomes@810 774
duke@435 775 class PSParallelCompact : AllStatic {
duke@435 776 public:
duke@435 777 // Convenient access to type names.
duke@435 778 typedef ParMarkBitMap::idx_t idx_t;
jcoomes@810 779 typedef ParallelCompactData::RegionData RegionData;
duke@435 780
duke@435 781 typedef enum {
duke@435 782 perm_space_id, old_space_id, eden_space_id,
duke@435 783 from_space_id, to_space_id, last_space_id
duke@435 784 } SpaceId;
duke@435 785
duke@435 786 public:
coleenp@548 787 // Inline closure decls
duke@435 788 //
duke@435 789 class IsAliveClosure: public BoolObjectClosure {
duke@435 790 public:
coleenp@548 791 virtual void do_object(oop p);
coleenp@548 792 virtual bool do_object_b(oop p);
duke@435 793 };
duke@435 794
duke@435 795 class KeepAliveClosure: public OopClosure {
coleenp@548 796 private:
coleenp@548 797 ParCompactionManager* _compaction_manager;
coleenp@548 798 protected:
coleenp@548 799 template <class T> inline void do_oop_work(T* p);
coleenp@548 800 public:
coleenp@548 801 KeepAliveClosure(ParCompactionManager* cm) : _compaction_manager(cm) { }
coleenp@548 802 virtual void do_oop(oop* p);
coleenp@548 803 virtual void do_oop(narrowOop* p);
coleenp@548 804 };
coleenp@548 805
coleenp@548 806 // Current unused
coleenp@548 807 class FollowRootClosure: public OopsInGenClosure {
coleenp@548 808 private:
duke@435 809 ParCompactionManager* _compaction_manager;
duke@435 810 public:
coleenp@548 811 FollowRootClosure(ParCompactionManager* cm) : _compaction_manager(cm) { }
coleenp@548 812 virtual void do_oop(oop* p);
coleenp@548 813 virtual void do_oop(narrowOop* p);
jrose@1424 814 };
duke@435 815
duke@435 816 class FollowStackClosure: public VoidClosure {
coleenp@548 817 private:
duke@435 818 ParCompactionManager* _compaction_manager;
duke@435 819 public:
coleenp@548 820 FollowStackClosure(ParCompactionManager* cm) : _compaction_manager(cm) { }
coleenp@548 821 virtual void do_void();
duke@435 822 };
duke@435 823
duke@435 824 class AdjustPointerClosure: public OopsInGenClosure {
coleenp@548 825 private:
duke@435 826 bool _is_root;
duke@435 827 public:
coleenp@548 828 AdjustPointerClosure(bool is_root) : _is_root(is_root) { }
coleenp@548 829 virtual void do_oop(oop* p);
coleenp@548 830 virtual void do_oop(narrowOop* p);
jrose@1424 831 // do not walk from thread stacks to the code cache on this phase
jrose@1424 832 virtual void do_code_blob(CodeBlob* cb) const { }
duke@435 833 };
duke@435 834
duke@435 835 // Closure for verifying update of pointers. Does not
duke@435 836 // have any side effects.
duke@435 837 class VerifyUpdateClosure: public ParMarkBitMapClosure {
duke@435 838 const MutableSpace* _space; // Is this ever used?
duke@435 839
duke@435 840 public:
duke@435 841 VerifyUpdateClosure(ParCompactionManager* cm, const MutableSpace* sp) :
duke@435 842 ParMarkBitMapClosure(PSParallelCompact::mark_bitmap(), cm), _space(sp)
duke@435 843 { }
duke@435 844
duke@435 845 virtual IterationStatus do_addr(HeapWord* addr, size_t words);
duke@435 846
duke@435 847 const MutableSpace* space() { return _space; }
duke@435 848 };
duke@435 849
duke@435 850 // Closure for updating objects altered for debug checking
duke@435 851 class ResetObjectsClosure: public ParMarkBitMapClosure {
duke@435 852 public:
duke@435 853 ResetObjectsClosure(ParCompactionManager* cm):
duke@435 854 ParMarkBitMapClosure(PSParallelCompact::mark_bitmap(), cm)
duke@435 855 { }
duke@435 856
duke@435 857 virtual IterationStatus do_addr(HeapWord* addr, size_t words);
duke@435 858 };
duke@435 859
duke@435 860 friend class KeepAliveClosure;
duke@435 861 friend class FollowStackClosure;
duke@435 862 friend class AdjustPointerClosure;
duke@435 863 friend class FollowRootClosure;
duke@435 864 friend class instanceKlassKlass;
duke@435 865 friend class RefProcTaskProxy;
duke@435 866
duke@435 867 private:
duke@435 868 static elapsedTimer _accumulated_time;
duke@435 869 static unsigned int _total_invocations;
duke@435 870 static unsigned int _maximum_compaction_gc_num;
duke@435 871 static jlong _time_of_last_gc; // ms
duke@435 872 static CollectorCounters* _counters;
duke@435 873 static ParMarkBitMap _mark_bitmap;
duke@435 874 static ParallelCompactData _summary_data;
duke@435 875 static IsAliveClosure _is_alive_closure;
duke@435 876 static SpaceInfo _space_info[last_space_id];
duke@435 877 static bool _print_phases;
duke@435 878 static AdjustPointerClosure _adjust_root_pointer_closure;
duke@435 879 static AdjustPointerClosure _adjust_pointer_closure;
duke@435 880
duke@435 881 // Reference processing (used in ...follow_contents)
duke@435 882 static ReferenceProcessor* _ref_processor;
duke@435 883
duke@435 884 // Updated location of intArrayKlassObj.
duke@435 885 static klassOop _updated_int_array_klass_obj;
duke@435 886
duke@435 887 // Values computed at initialization and used by dead_wood_limiter().
duke@435 888 static double _dwl_mean;
duke@435 889 static double _dwl_std_dev;
duke@435 890 static double _dwl_first_term;
duke@435 891 static double _dwl_adjustment;
duke@435 892 #ifdef ASSERT
duke@435 893 static bool _dwl_initialized;
duke@435 894 #endif // #ifdef ASSERT
duke@435 895
duke@435 896 private:
duke@435 897 // Closure accessors
coleenp@548 898 static OopClosure* adjust_pointer_closure() { return (OopClosure*)&_adjust_pointer_closure; }
duke@435 899 static OopClosure* adjust_root_pointer_closure() { return (OopClosure*)&_adjust_root_pointer_closure; }
coleenp@548 900 static BoolObjectClosure* is_alive_closure() { return (BoolObjectClosure*)&_is_alive_closure; }
duke@435 901
duke@435 902 static void initialize_space_info();
duke@435 903
duke@435 904 // Return true if details about individual phases should be printed.
duke@435 905 static inline bool print_phases();
duke@435 906
duke@435 907 // Clear the marking bitmap and summary data that cover the specified space.
duke@435 908 static void clear_data_covering_space(SpaceId id);
duke@435 909
duke@435 910 static void pre_compact(PreGCValues* pre_gc_values);
duke@435 911 static void post_compact();
duke@435 912
duke@435 913 // Mark live objects
duke@435 914 static void marking_phase(ParCompactionManager* cm,
duke@435 915 bool maximum_heap_compaction);
ysr@1376 916 static void follow_weak_klass_links();
ysr@1376 917 static void follow_mdo_weak_refs();
duke@435 918
coleenp@548 919 template <class T> static inline void adjust_pointer(T* p, bool is_root);
duke@435 920 static void adjust_root_pointer(oop* p) { adjust_pointer(p, true); }
duke@435 921
coleenp@548 922 template <class T>
coleenp@548 923 static inline void follow_root(ParCompactionManager* cm, T* p);
duke@435 924
duke@435 925 // Compute the dense prefix for the designated space. This is an experimental
duke@435 926 // implementation currently not used in production.
duke@435 927 static HeapWord* compute_dense_prefix_via_density(const SpaceId id,
duke@435 928 bool maximum_compaction);
duke@435 929
duke@435 930 // Methods used to compute the dense prefix.
duke@435 931
duke@435 932 // Compute the value of the normal distribution at x = density. The mean and
duke@435 933 // standard deviation are values saved by initialize_dead_wood_limiter().
duke@435 934 static inline double normal_distribution(double density);
duke@435 935
duke@435 936 // Initialize the static vars used by dead_wood_limiter().
duke@435 937 static void initialize_dead_wood_limiter();
duke@435 938
duke@435 939 // Return the percentage of space that can be treated as "dead wood" (i.e.,
duke@435 940 // not reclaimed).
duke@435 941 static double dead_wood_limiter(double density, size_t min_percent);
duke@435 942
jcoomes@810 943 // Find the first (left-most) region in the range [beg, end) that has at least
duke@435 944 // dead_words of dead space to the left. The argument beg must be the first
jcoomes@810 945 // region in the space that is not completely live.
jcoomes@810 946 static RegionData* dead_wood_limit_region(const RegionData* beg,
jcoomes@810 947 const RegionData* end,
jcoomes@810 948 size_t dead_words);
duke@435 949
jcoomes@810 950 // Return a pointer to the first region in the range [beg, end) that is not
duke@435 951 // completely full.
jcoomes@810 952 static RegionData* first_dead_space_region(const RegionData* beg,
jcoomes@810 953 const RegionData* end);
duke@435 954
duke@435 955 // Return a value indicating the benefit or 'yield' if the compacted region
duke@435 956 // were to start (or equivalently if the dense prefix were to end) at the
jcoomes@810 957 // candidate region. Higher values are better.
duke@435 958 //
duke@435 959 // The value is based on the amount of space reclaimed vs. the costs of (a)
duke@435 960 // updating references in the dense prefix plus (b) copying objects and
duke@435 961 // updating references in the compacted region.
jcoomes@810 962 static inline double reclaimed_ratio(const RegionData* const candidate,
duke@435 963 HeapWord* const bottom,
duke@435 964 HeapWord* const top,
duke@435 965 HeapWord* const new_top);
duke@435 966
duke@435 967 // Compute the dense prefix for the designated space.
duke@435 968 static HeapWord* compute_dense_prefix(const SpaceId id,
duke@435 969 bool maximum_compaction);
duke@435 970
jcoomes@810 971 // Return true if dead space crosses onto the specified Region; bit must be
jcoomes@810 972 // the bit index corresponding to the first word of the Region.
jcoomes@810 973 static inline bool dead_space_crosses_boundary(const RegionData* region,
duke@435 974 idx_t bit);
duke@435 975
duke@435 976 // Summary phase utility routine to fill dead space (if any) at the dense
duke@435 977 // prefix boundary. Should only be called if the the dense prefix is
duke@435 978 // non-empty.
duke@435 979 static void fill_dense_prefix_end(SpaceId id);
duke@435 980
jcoomes@917 981 // Clear the summary data source_region field for the specified addresses.
jcoomes@917 982 static void clear_source_region(HeapWord* beg_addr, HeapWord* end_addr);
jcoomes@917 983
jcoomes@918 984 #ifndef PRODUCT
jcoomes@918 985 // Routines to provoke splitting a young gen space (ParallelOldGCSplitALot).
jcoomes@918 986
jcoomes@918 987 // Fill the region [start, start + words) with live object(s). Only usable
jcoomes@918 988 // for the old and permanent generations.
jcoomes@918 989 static void fill_with_live_objects(SpaceId id, HeapWord* const start,
jcoomes@918 990 size_t words);
jcoomes@918 991 // Include the new objects in the summary data.
jcoomes@918 992 static void summarize_new_objects(SpaceId id, HeapWord* start);
jcoomes@918 993
jcoomes@931 994 // Add live objects to a survivor space since it's rare that both survivors
jcoomes@931 995 // are non-empty.
jcoomes@931 996 static void provoke_split_fill_survivor(SpaceId id);
jcoomes@931 997
jcoomes@918 998 // Add live objects and/or choose the dense prefix to provoke splitting.
jcoomes@918 999 static void provoke_split(bool & maximum_compaction);
jcoomes@918 1000 #endif
jcoomes@918 1001
duke@435 1002 static void summarize_spaces_quick();
duke@435 1003 static void summarize_space(SpaceId id, bool maximum_compaction);
duke@435 1004 static void summary_phase(ParCompactionManager* cm, bool maximum_compaction);
duke@435 1005
duke@435 1006 // Adjust addresses in roots. Does not adjust addresses in heap.
duke@435 1007 static void adjust_roots();
duke@435 1008
duke@435 1009 // Serial code executed in preparation for the compaction phase.
duke@435 1010 static void compact_prologue();
duke@435 1011
duke@435 1012 // Move objects to new locations.
duke@435 1013 static void compact_perm(ParCompactionManager* cm);
duke@435 1014 static void compact();
duke@435 1015
jcoomes@810 1016 // Add available regions to the stack and draining tasks to the task queue.
jcoomes@810 1017 static void enqueue_region_draining_tasks(GCTaskQueue* q,
jcoomes@810 1018 uint parallel_gc_threads);
duke@435 1019
duke@435 1020 // Add dense prefix update tasks to the task queue.
duke@435 1021 static void enqueue_dense_prefix_tasks(GCTaskQueue* q,
duke@435 1022 uint parallel_gc_threads);
duke@435 1023
jcoomes@810 1024 // Add region stealing tasks to the task queue.
jcoomes@810 1025 static void enqueue_region_stealing_tasks(
duke@435 1026 GCTaskQueue* q,
duke@435 1027 ParallelTaskTerminator* terminator_ptr,
duke@435 1028 uint parallel_gc_threads);
duke@435 1029
duke@435 1030 // If objects are left in eden after a collection, try to move the boundary
duke@435 1031 // and absorb them into the old gen. Returns true if eden was emptied.
duke@435 1032 static bool absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
duke@435 1033 PSYoungGen* young_gen,
duke@435 1034 PSOldGen* old_gen);
duke@435 1035
duke@435 1036 // Reset time since last full gc
duke@435 1037 static void reset_millis_since_last_gc();
duke@435 1038
duke@435 1039 protected:
duke@435 1040 #ifdef VALIDATE_MARK_SWEEP
coleenp@548 1041 static GrowableArray<void*>* _root_refs_stack;
duke@435 1042 static GrowableArray<oop> * _live_oops;
duke@435 1043 static GrowableArray<oop> * _live_oops_moved_to;
duke@435 1044 static GrowableArray<size_t>* _live_oops_size;
duke@435 1045 static size_t _live_oops_index;
duke@435 1046 static size_t _live_oops_index_at_perm;
coleenp@548 1047 static GrowableArray<void*>* _other_refs_stack;
coleenp@548 1048 static GrowableArray<void*>* _adjusted_pointers;
duke@435 1049 static bool _pointer_tracking;
duke@435 1050 static bool _root_tracking;
duke@435 1051
duke@435 1052 // The following arrays are saved since the time of the last GC and
duke@435 1053 // assist in tracking down problems where someone has done an errant
duke@435 1054 // store into the heap, usually to an oop that wasn't properly
duke@435 1055 // handleized across a GC. If we crash or otherwise fail before the
duke@435 1056 // next GC, we can query these arrays to find out the object we had
duke@435 1057 // intended to do the store to (assuming it is still alive) and the
duke@435 1058 // offset within that object. Covered under RecordMarkSweepCompaction.
duke@435 1059 static GrowableArray<HeapWord*> * _cur_gc_live_oops;
duke@435 1060 static GrowableArray<HeapWord*> * _cur_gc_live_oops_moved_to;
duke@435 1061 static GrowableArray<size_t>* _cur_gc_live_oops_size;
duke@435 1062 static GrowableArray<HeapWord*> * _last_gc_live_oops;
duke@435 1063 static GrowableArray<HeapWord*> * _last_gc_live_oops_moved_to;
duke@435 1064 static GrowableArray<size_t>* _last_gc_live_oops_size;
duke@435 1065 #endif
duke@435 1066
duke@435 1067 public:
duke@435 1068 class MarkAndPushClosure: public OopClosure {
coleenp@548 1069 private:
duke@435 1070 ParCompactionManager* _compaction_manager;
duke@435 1071 public:
coleenp@548 1072 MarkAndPushClosure(ParCompactionManager* cm) : _compaction_manager(cm) { }
coleenp@548 1073 virtual void do_oop(oop* p);
coleenp@548 1074 virtual void do_oop(narrowOop* p);
duke@435 1075 };
duke@435 1076
duke@435 1077 PSParallelCompact();
duke@435 1078
duke@435 1079 // Convenient accessor for Universe::heap().
duke@435 1080 static ParallelScavengeHeap* gc_heap() {
duke@435 1081 return (ParallelScavengeHeap*)Universe::heap();
duke@435 1082 }
duke@435 1083
duke@435 1084 static void invoke(bool maximum_heap_compaction);
duke@435 1085 static void invoke_no_policy(bool maximum_heap_compaction);
duke@435 1086
duke@435 1087 static void post_initialize();
duke@435 1088 // Perform initialization for PSParallelCompact that requires
duke@435 1089 // allocations. This should be called during the VM initialization
duke@435 1090 // at a pointer where it would be appropriate to return a JNI_ENOMEM
duke@435 1091 // in the event of a failure.
duke@435 1092 static bool initialize();
duke@435 1093
duke@435 1094 // Public accessors
duke@435 1095 static elapsedTimer* accumulated_time() { return &_accumulated_time; }
duke@435 1096 static unsigned int total_invocations() { return _total_invocations; }
duke@435 1097 static CollectorCounters* counters() { return _counters; }
duke@435 1098
duke@435 1099 // Used to add tasks
duke@435 1100 static GCTaskManager* const gc_task_manager();
duke@435 1101 static klassOop updated_int_array_klass_obj() {
duke@435 1102 return _updated_int_array_klass_obj;
duke@435 1103 }
duke@435 1104
duke@435 1105 // Marking support
duke@435 1106 static inline bool mark_obj(oop obj);
coleenp@548 1107 // Check mark and maybe push on marking stack
coleenp@548 1108 template <class T> static inline void mark_and_push(ParCompactionManager* cm,
coleenp@548 1109 T* p);
duke@435 1110
duke@435 1111 // Compaction support.
duke@435 1112 // Return true if p is in the range [beg_addr, end_addr).
duke@435 1113 static inline bool is_in(HeapWord* p, HeapWord* beg_addr, HeapWord* end_addr);
duke@435 1114 static inline bool is_in(oop* p, HeapWord* beg_addr, HeapWord* end_addr);
duke@435 1115
duke@435 1116 // Convenience wrappers for per-space data kept in _space_info.
duke@435 1117 static inline MutableSpace* space(SpaceId space_id);
duke@435 1118 static inline HeapWord* new_top(SpaceId space_id);
duke@435 1119 static inline HeapWord* dense_prefix(SpaceId space_id);
duke@435 1120 static inline ObjectStartArray* start_array(SpaceId space_id);
duke@435 1121
duke@435 1122 // Return true if the klass should be updated.
duke@435 1123 static inline bool should_update_klass(klassOop k);
duke@435 1124
duke@435 1125 // Move and update the live objects in the specified space.
duke@435 1126 static void move_and_update(ParCompactionManager* cm, SpaceId space_id);
duke@435 1127
jcoomes@810 1128 // Process the end of the given region range in the dense prefix.
duke@435 1129 // This includes saving any object not updated.
jcoomes@810 1130 static void dense_prefix_regions_epilogue(ParCompactionManager* cm,
jcoomes@810 1131 size_t region_start_index,
jcoomes@810 1132 size_t region_end_index,
jcoomes@810 1133 idx_t exiting_object_offset,
jcoomes@810 1134 idx_t region_offset_start,
jcoomes@810 1135 idx_t region_offset_end);
duke@435 1136
jcoomes@810 1137 // Update a region in the dense prefix. For each live object
jcoomes@810 1138 // in the region, update it's interior references. For each
duke@435 1139 // dead object, fill it with deadwood. Dead space at the end
jcoomes@810 1140 // of a region range will be filled to the start of the next
jcoomes@810 1141 // live object regardless of the region_index_end. None of the
duke@435 1142 // objects in the dense prefix move and dead space is dead
duke@435 1143 // (holds only dead objects that don't need any processing), so
duke@435 1144 // dead space can be filled in any order.
duke@435 1145 static void update_and_deadwood_in_dense_prefix(ParCompactionManager* cm,
duke@435 1146 SpaceId space_id,
jcoomes@810 1147 size_t region_index_start,
jcoomes@810 1148 size_t region_index_end);
duke@435 1149
duke@435 1150 // Return the address of the count + 1st live word in the range [beg, end).
duke@435 1151 static HeapWord* skip_live_words(HeapWord* beg, HeapWord* end, size_t count);
duke@435 1152
duke@435 1153 // Return the address of the word to be copied to dest_addr, which must be
jcoomes@810 1154 // aligned to a region boundary.
duke@435 1155 static HeapWord* first_src_addr(HeapWord* const dest_addr,
jcoomes@917 1156 SpaceId src_space_id,
jcoomes@810 1157 size_t src_region_idx);
duke@435 1158
jcoomes@810 1159 // Determine the next source region, set closure.source() to the start of the
jcoomes@810 1160 // new region return the region index. Parameter end_addr is the address one
duke@435 1161 // beyond the end of source range just processed. If necessary, switch to a
duke@435 1162 // new source space and set src_space_id (in-out parameter) and src_space_top
duke@435 1163 // (out parameter) accordingly.
jcoomes@810 1164 static size_t next_src_region(MoveAndUpdateClosure& closure,
jcoomes@810 1165 SpaceId& src_space_id,
jcoomes@810 1166 HeapWord*& src_space_top,
jcoomes@810 1167 HeapWord* end_addr);
duke@435 1168
jcoomes@810 1169 // Decrement the destination count for each non-empty source region in the
jcoomes@930 1170 // range [beg_region, region(region_align_up(end_addr))). If the destination
jcoomes@930 1171 // count for a region goes to 0 and it needs to be filled, enqueue it.
duke@435 1172 static void decrement_destination_counts(ParCompactionManager* cm,
jcoomes@930 1173 SpaceId src_space_id,
jcoomes@810 1174 size_t beg_region,
duke@435 1175 HeapWord* end_addr);
duke@435 1176
jcoomes@810 1177 // Fill a region, copying objects from one or more source regions.
jcoomes@810 1178 static void fill_region(ParCompactionManager* cm, size_t region_idx);
jcoomes@810 1179 static void fill_and_update_region(ParCompactionManager* cm, size_t region) {
jcoomes@810 1180 fill_region(cm, region);
duke@435 1181 }
duke@435 1182
duke@435 1183 // Update the deferred objects in the space.
duke@435 1184 static void update_deferred_objects(ParCompactionManager* cm, SpaceId id);
duke@435 1185
duke@435 1186 // Mark pointer and follow contents.
coleenp@548 1187 template <class T>
coleenp@548 1188 static inline void mark_and_follow(ParCompactionManager* cm, T* p);
duke@435 1189
duke@435 1190 static ParMarkBitMap* mark_bitmap() { return &_mark_bitmap; }
duke@435 1191 static ParallelCompactData& summary_data() { return _summary_data; }
duke@435 1192
coleenp@548 1193 static inline void adjust_pointer(oop* p) { adjust_pointer(p, false); }
coleenp@548 1194 static inline void adjust_pointer(narrowOop* p) { adjust_pointer(p, false); }
coleenp@548 1195
duke@435 1196 // Reference Processing
duke@435 1197 static ReferenceProcessor* const ref_processor() { return _ref_processor; }
duke@435 1198
duke@435 1199 // Return the SpaceId for the given address.
duke@435 1200 static SpaceId space_id(HeapWord* addr);
duke@435 1201
duke@435 1202 // Time since last full gc (in milliseconds).
duke@435 1203 static jlong millis_since_last_gc();
duke@435 1204
duke@435 1205 #ifdef VALIDATE_MARK_SWEEP
coleenp@548 1206 static void track_adjusted_pointer(void* p, bool isroot);
coleenp@548 1207 static void check_adjust_pointer(void* p);
duke@435 1208 static void track_interior_pointers(oop obj);
duke@435 1209 static void check_interior_pointers();
duke@435 1210
duke@435 1211 static void reset_live_oop_tracking(bool at_perm);
duke@435 1212 static void register_live_oop(oop p, size_t size);
duke@435 1213 static void validate_live_oop(oop p, size_t size);
duke@435 1214 static void live_oop_moved_to(HeapWord* q, size_t size, HeapWord* compaction_top);
duke@435 1215 static void compaction_complete();
duke@435 1216
duke@435 1217 // Querying operation of RecordMarkSweepCompaction results.
duke@435 1218 // Finds and prints the current base oop and offset for a word
duke@435 1219 // within an oop that was live during the last GC. Helpful for
duke@435 1220 // tracking down heap stomps.
duke@435 1221 static void print_new_location_of_heap_address(HeapWord* q);
duke@435 1222 #endif // #ifdef VALIDATE_MARK_SWEEP
duke@435 1223
duke@435 1224 // Call backs for class unloading
duke@435 1225 // Update subklass/sibling/implementor links at end of marking.
duke@435 1226 static void revisit_weak_klass_link(ParCompactionManager* cm, Klass* k);
duke@435 1227
ysr@1376 1228 // Clear unmarked oops in MDOs at the end of marking.
ysr@1376 1229 static void revisit_mdo(ParCompactionManager* cm, DataLayout* p);
ysr@1376 1230
duke@435 1231 #ifndef PRODUCT
duke@435 1232 // Debugging support.
duke@435 1233 static const char* space_names[last_space_id];
jcoomes@810 1234 static void print_region_ranges();
duke@435 1235 static void print_dense_prefix_stats(const char* const algorithm,
duke@435 1236 const SpaceId id,
duke@435 1237 const bool maximum_compaction,
duke@435 1238 HeapWord* const addr);
jcoomes@917 1239 static void summary_phase_msg(SpaceId dst_space_id,
jcoomes@917 1240 HeapWord* dst_beg, HeapWord* dst_end,
jcoomes@917 1241 SpaceId src_space_id,
jcoomes@917 1242 HeapWord* src_beg, HeapWord* src_end);
duke@435 1243 #endif // #ifndef PRODUCT
duke@435 1244
duke@435 1245 #ifdef ASSERT
jcoomes@930 1246 // Sanity check the new location of a word in the heap.
jcoomes@930 1247 static inline void check_new_location(HeapWord* old_addr, HeapWord* new_addr);
jcoomes@810 1248 // Verify that all the regions have been emptied.
duke@435 1249 static void verify_complete(SpaceId space_id);
duke@435 1250 #endif // #ifdef ASSERT
duke@435 1251 };
duke@435 1252
coleenp@548 1253 inline bool PSParallelCompact::mark_obj(oop obj) {
duke@435 1254 const int obj_size = obj->size();
duke@435 1255 if (mark_bitmap()->mark_obj(obj, obj_size)) {
duke@435 1256 _summary_data.add_obj(obj, obj_size);
duke@435 1257 return true;
duke@435 1258 } else {
duke@435 1259 return false;
duke@435 1260 }
duke@435 1261 }
duke@435 1262
coleenp@548 1263 template <class T>
coleenp@548 1264 inline void PSParallelCompact::follow_root(ParCompactionManager* cm, T* p) {
coleenp@548 1265 assert(!Universe::heap()->is_in_reserved(p),
coleenp@548 1266 "roots shouldn't be things within the heap");
coleenp@548 1267 #ifdef VALIDATE_MARK_SWEEP
coleenp@548 1268 if (ValidateMarkSweep) {
coleenp@548 1269 guarantee(!_root_refs_stack->contains(p), "should only be in here once");
coleenp@548 1270 _root_refs_stack->push(p);
coleenp@548 1271 }
coleenp@548 1272 #endif
coleenp@548 1273 T heap_oop = oopDesc::load_heap_oop(p);
coleenp@548 1274 if (!oopDesc::is_null(heap_oop)) {
coleenp@548 1275 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
coleenp@548 1276 if (mark_bitmap()->is_unmarked(obj)) {
coleenp@548 1277 if (mark_obj(obj)) {
coleenp@548 1278 obj->follow_contents(cm);
coleenp@548 1279 }
coleenp@548 1280 }
coleenp@548 1281 }
jcoomes@1746 1282 cm->follow_marking_stacks();
coleenp@548 1283 }
coleenp@548 1284
coleenp@548 1285 template <class T>
coleenp@548 1286 inline void PSParallelCompact::mark_and_follow(ParCompactionManager* cm,
coleenp@548 1287 T* p) {
coleenp@548 1288 T heap_oop = oopDesc::load_heap_oop(p);
coleenp@548 1289 if (!oopDesc::is_null(heap_oop)) {
coleenp@548 1290 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
coleenp@548 1291 if (mark_bitmap()->is_unmarked(obj)) {
coleenp@548 1292 if (mark_obj(obj)) {
coleenp@548 1293 obj->follow_contents(cm);
coleenp@548 1294 }
coleenp@548 1295 }
coleenp@548 1296 }
coleenp@548 1297 }
coleenp@548 1298
coleenp@548 1299 template <class T>
coleenp@548 1300 inline void PSParallelCompact::mark_and_push(ParCompactionManager* cm, T* p) {
coleenp@548 1301 T heap_oop = oopDesc::load_heap_oop(p);
coleenp@548 1302 if (!oopDesc::is_null(heap_oop)) {
coleenp@548 1303 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
jcoomes@1993 1304 if (mark_bitmap()->is_unmarked(obj) && mark_obj(obj)) {
jcoomes@1993 1305 cm->push(obj);
coleenp@548 1306 }
coleenp@548 1307 }
coleenp@548 1308 }
coleenp@548 1309
coleenp@548 1310 template <class T>
coleenp@548 1311 inline void PSParallelCompact::adjust_pointer(T* p, bool isroot) {
coleenp@548 1312 T heap_oop = oopDesc::load_heap_oop(p);
coleenp@548 1313 if (!oopDesc::is_null(heap_oop)) {
coleenp@548 1314 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
coleenp@548 1315 oop new_obj = (oop)summary_data().calc_new_pointer(obj);
coleenp@548 1316 assert(new_obj != NULL || // is forwarding ptr?
coleenp@548 1317 obj->is_shared(), // never forwarded?
coleenp@548 1318 "should be forwarded");
coleenp@548 1319 // Just always do the update unconditionally?
coleenp@548 1320 if (new_obj != NULL) {
coleenp@548 1321 assert(Universe::heap()->is_in_reserved(new_obj),
coleenp@548 1322 "should be in object space");
coleenp@548 1323 oopDesc::encode_store_heap_oop_not_null(p, new_obj);
coleenp@548 1324 }
coleenp@548 1325 }
coleenp@548 1326 VALIDATE_MARK_SWEEP_ONLY(track_adjusted_pointer(p, isroot));
coleenp@548 1327 }
coleenp@548 1328
coleenp@548 1329 template <class T>
coleenp@548 1330 inline void PSParallelCompact::KeepAliveClosure::do_oop_work(T* p) {
coleenp@548 1331 #ifdef VALIDATE_MARK_SWEEP
coleenp@548 1332 if (ValidateMarkSweep) {
coleenp@548 1333 if (!Universe::heap()->is_in_reserved(p)) {
coleenp@548 1334 _root_refs_stack->push(p);
coleenp@548 1335 } else {
coleenp@548 1336 _other_refs_stack->push(p);
coleenp@548 1337 }
coleenp@548 1338 }
coleenp@548 1339 #endif
coleenp@548 1340 mark_and_push(_compaction_manager, p);
coleenp@548 1341 }
coleenp@548 1342
coleenp@548 1343 inline bool PSParallelCompact::print_phases() {
duke@435 1344 return _print_phases;
duke@435 1345 }
duke@435 1346
coleenp@548 1347 inline double PSParallelCompact::normal_distribution(double density) {
duke@435 1348 assert(_dwl_initialized, "uninitialized");
duke@435 1349 const double squared_term = (density - _dwl_mean) / _dwl_std_dev;
duke@435 1350 return _dwl_first_term * exp(-0.5 * squared_term * squared_term);
duke@435 1351 }
duke@435 1352
duke@435 1353 inline bool
jcoomes@810 1354 PSParallelCompact::dead_space_crosses_boundary(const RegionData* region,
duke@435 1355 idx_t bit)
duke@435 1356 {
jcoomes@810 1357 assert(bit > 0, "cannot call this for the first bit/region");
jcoomes@810 1358 assert(_summary_data.region_to_addr(region) == _mark_bitmap.bit_to_addr(bit),
duke@435 1359 "sanity check");
duke@435 1360
duke@435 1361 // Dead space crosses the boundary if (1) a partial object does not extend
jcoomes@810 1362 // onto the region, (2) an object does not start at the beginning of the
jcoomes@810 1363 // region, and (3) an object does not end at the end of the prior region.
jcoomes@810 1364 return region->partial_obj_size() == 0 &&
duke@435 1365 !_mark_bitmap.is_obj_beg(bit) &&
duke@435 1366 !_mark_bitmap.is_obj_end(bit - 1);
duke@435 1367 }
duke@435 1368
duke@435 1369 inline bool
duke@435 1370 PSParallelCompact::is_in(HeapWord* p, HeapWord* beg_addr, HeapWord* end_addr) {
duke@435 1371 return p >= beg_addr && p < end_addr;
duke@435 1372 }
duke@435 1373
duke@435 1374 inline bool
duke@435 1375 PSParallelCompact::is_in(oop* p, HeapWord* beg_addr, HeapWord* end_addr) {
duke@435 1376 return is_in((HeapWord*)p, beg_addr, end_addr);
duke@435 1377 }
duke@435 1378
duke@435 1379 inline MutableSpace* PSParallelCompact::space(SpaceId id) {
duke@435 1380 assert(id < last_space_id, "id out of range");
duke@435 1381 return _space_info[id].space();
duke@435 1382 }
duke@435 1383
duke@435 1384 inline HeapWord* PSParallelCompact::new_top(SpaceId id) {
duke@435 1385 assert(id < last_space_id, "id out of range");
duke@435 1386 return _space_info[id].new_top();
duke@435 1387 }
duke@435 1388
duke@435 1389 inline HeapWord* PSParallelCompact::dense_prefix(SpaceId id) {
duke@435 1390 assert(id < last_space_id, "id out of range");
duke@435 1391 return _space_info[id].dense_prefix();
duke@435 1392 }
duke@435 1393
duke@435 1394 inline ObjectStartArray* PSParallelCompact::start_array(SpaceId id) {
duke@435 1395 assert(id < last_space_id, "id out of range");
duke@435 1396 return _space_info[id].start_array();
duke@435 1397 }
duke@435 1398
duke@435 1399 inline bool PSParallelCompact::should_update_klass(klassOop k) {
duke@435 1400 return ((HeapWord*) k) >= dense_prefix(perm_space_id);
duke@435 1401 }
duke@435 1402
jcoomes@930 1403 #ifdef ASSERT
jcoomes@930 1404 inline void
jcoomes@930 1405 PSParallelCompact::check_new_location(HeapWord* old_addr, HeapWord* new_addr)
jcoomes@930 1406 {
jcoomes@930 1407 assert(old_addr >= new_addr || space_id(old_addr) != space_id(new_addr),
jcoomes@930 1408 "must move left or to a different space");
kvn@1926 1409 assert(is_object_aligned((intptr_t)old_addr) && is_object_aligned((intptr_t)new_addr),
kvn@1926 1410 "checking alignment");
jcoomes@930 1411 }
jcoomes@930 1412 #endif // ASSERT
jcoomes@930 1413
duke@435 1414 class MoveAndUpdateClosure: public ParMarkBitMapClosure {
duke@435 1415 public:
duke@435 1416 inline MoveAndUpdateClosure(ParMarkBitMap* bitmap, ParCompactionManager* cm,
duke@435 1417 ObjectStartArray* start_array,
duke@435 1418 HeapWord* destination, size_t words);
duke@435 1419
duke@435 1420 // Accessors.
duke@435 1421 HeapWord* destination() const { return _destination; }
duke@435 1422
duke@435 1423 // If the object will fit (size <= words_remaining()), copy it to the current
duke@435 1424 // destination, update the interior oops and the start array and return either
duke@435 1425 // full (if the closure is full) or incomplete. If the object will not fit,
duke@435 1426 // return would_overflow.
duke@435 1427 virtual IterationStatus do_addr(HeapWord* addr, size_t size);
duke@435 1428
duke@435 1429 // Copy enough words to fill this closure, starting at source(). Interior
duke@435 1430 // oops and the start array are not updated. Return full.
duke@435 1431 IterationStatus copy_until_full();
duke@435 1432
duke@435 1433 // Copy enough words to fill this closure or to the end of an object,
duke@435 1434 // whichever is smaller, starting at source(). Interior oops and the start
duke@435 1435 // array are not updated.
duke@435 1436 void copy_partial_obj();
duke@435 1437
duke@435 1438 protected:
duke@435 1439 // Update variables to indicate that word_count words were processed.
duke@435 1440 inline void update_state(size_t word_count);
duke@435 1441
duke@435 1442 protected:
duke@435 1443 ObjectStartArray* const _start_array;
duke@435 1444 HeapWord* _destination; // Next addr to be written.
duke@435 1445 };
duke@435 1446
duke@435 1447 inline
duke@435 1448 MoveAndUpdateClosure::MoveAndUpdateClosure(ParMarkBitMap* bitmap,
duke@435 1449 ParCompactionManager* cm,
duke@435 1450 ObjectStartArray* start_array,
duke@435 1451 HeapWord* destination,
duke@435 1452 size_t words) :
duke@435 1453 ParMarkBitMapClosure(bitmap, cm, words), _start_array(start_array)
duke@435 1454 {
duke@435 1455 _destination = destination;
duke@435 1456 }
duke@435 1457
duke@435 1458 inline void MoveAndUpdateClosure::update_state(size_t words)
duke@435 1459 {
duke@435 1460 decrement_words_remaining(words);
duke@435 1461 _source += words;
duke@435 1462 _destination += words;
duke@435 1463 }
duke@435 1464
duke@435 1465 class UpdateOnlyClosure: public ParMarkBitMapClosure {
duke@435 1466 private:
duke@435 1467 const PSParallelCompact::SpaceId _space_id;
duke@435 1468 ObjectStartArray* const _start_array;
duke@435 1469
duke@435 1470 public:
duke@435 1471 UpdateOnlyClosure(ParMarkBitMap* mbm,
duke@435 1472 ParCompactionManager* cm,
duke@435 1473 PSParallelCompact::SpaceId space_id);
duke@435 1474
duke@435 1475 // Update the object.
duke@435 1476 virtual IterationStatus do_addr(HeapWord* addr, size_t words);
duke@435 1477
duke@435 1478 inline void do_addr(HeapWord* addr);
duke@435 1479 };
duke@435 1480
coleenp@548 1481 inline void UpdateOnlyClosure::do_addr(HeapWord* addr)
coleenp@548 1482 {
duke@435 1483 _start_array->allocate_block(addr);
duke@435 1484 oop(addr)->update_contents(compaction_manager());
duke@435 1485 }
duke@435 1486
jcoomes@916 1487 class FillClosure: public ParMarkBitMapClosure
jcoomes@916 1488 {
jcoomes@916 1489 public:
coleenp@548 1490 FillClosure(ParCompactionManager* cm, PSParallelCompact::SpaceId space_id) :
duke@435 1491 ParMarkBitMapClosure(PSParallelCompact::mark_bitmap(), cm),
jcoomes@916 1492 _start_array(PSParallelCompact::start_array(space_id))
jcoomes@916 1493 {
jcoomes@916 1494 assert(space_id == PSParallelCompact::perm_space_id ||
jcoomes@916 1495 space_id == PSParallelCompact::old_space_id,
duke@435 1496 "cannot use FillClosure in the young gen");
duke@435 1497 }
duke@435 1498
duke@435 1499 virtual IterationStatus do_addr(HeapWord* addr, size_t size) {
jcoomes@916 1500 CollectedHeap::fill_with_objects(addr, size);
jcoomes@916 1501 HeapWord* const end = addr + size;
jcoomes@916 1502 do {
jcoomes@916 1503 _start_array->allocate_block(addr);
jcoomes@916 1504 addr += oop(addr)->size();
jcoomes@916 1505 } while (addr < end);
duke@435 1506 return ParMarkBitMap::incomplete;
duke@435 1507 }
duke@435 1508
duke@435 1509 private:
jcoomes@916 1510 ObjectStartArray* const _start_array;
duke@435 1511 };
stefank@2314 1512
stefank@2314 1513 #endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPARALLELCOMPACT_HPP

mercurial