src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp

Tue, 07 Oct 2008 11:01:35 -0700

author
trims
date
Tue, 07 Oct 2008 11:01:35 -0700
changeset 815
eb28cf662f56
parent 811
0166ac265d53
child 916
7d7a7c599c17
permissions
-rw-r--r--

Merge

duke@435 1 /*
xdono@631 2 * Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
duke@435 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
duke@435 20 * CA 95054 USA or visit www.sun.com if you need additional information or
duke@435 21 * have any questions.
duke@435 22 *
duke@435 23 */
duke@435 24
duke@435 25 class ParallelScavengeHeap;
duke@435 26 class PSAdaptiveSizePolicy;
duke@435 27 class PSYoungGen;
duke@435 28 class PSOldGen;
duke@435 29 class PSPermGen;
duke@435 30 class ParCompactionManager;
duke@435 31 class ParallelTaskTerminator;
duke@435 32 class PSParallelCompact;
duke@435 33 class GCTaskManager;
duke@435 34 class GCTaskQueue;
duke@435 35 class PreGCValues;
duke@435 36 class MoveAndUpdateClosure;
duke@435 37 class RefProcTaskExecutor;
duke@435 38
duke@435 39 class SpaceInfo
duke@435 40 {
duke@435 41 public:
duke@435 42 MutableSpace* space() const { return _space; }
duke@435 43
duke@435 44 // Where the free space will start after the collection. Valid only after the
duke@435 45 // summary phase completes.
duke@435 46 HeapWord* new_top() const { return _new_top; }
duke@435 47
duke@435 48 // Allows new_top to be set.
duke@435 49 HeapWord** new_top_addr() { return &_new_top; }
duke@435 50
duke@435 51 // Where the smallest allowable dense prefix ends (used only for perm gen).
duke@435 52 HeapWord* min_dense_prefix() const { return _min_dense_prefix; }
duke@435 53
duke@435 54 // Where the dense prefix ends, or the compacted region begins.
duke@435 55 HeapWord* dense_prefix() const { return _dense_prefix; }
duke@435 56
duke@435 57 // The start array for the (generation containing the) space, or NULL if there
duke@435 58 // is no start array.
duke@435 59 ObjectStartArray* start_array() const { return _start_array; }
duke@435 60
duke@435 61 void set_space(MutableSpace* s) { _space = s; }
duke@435 62 void set_new_top(HeapWord* addr) { _new_top = addr; }
duke@435 63 void set_min_dense_prefix(HeapWord* addr) { _min_dense_prefix = addr; }
duke@435 64 void set_dense_prefix(HeapWord* addr) { _dense_prefix = addr; }
duke@435 65 void set_start_array(ObjectStartArray* s) { _start_array = s; }
duke@435 66
duke@435 67 private:
duke@435 68 MutableSpace* _space;
duke@435 69 HeapWord* _new_top;
duke@435 70 HeapWord* _min_dense_prefix;
duke@435 71 HeapWord* _dense_prefix;
duke@435 72 ObjectStartArray* _start_array;
duke@435 73 };
duke@435 74
duke@435 75 class ParallelCompactData
duke@435 76 {
duke@435 77 public:
duke@435 78 // Sizes are in HeapWords, unless indicated otherwise.
jcoomes@810 79 static const size_t Log2RegionSize;
jcoomes@810 80 static const size_t RegionSize;
jcoomes@810 81 static const size_t RegionSizeBytes;
duke@435 82
jcoomes@810 83 // Mask for the bits in a size_t to get an offset within a region.
jcoomes@810 84 static const size_t RegionSizeOffsetMask;
jcoomes@810 85 // Mask for the bits in a pointer to get an offset within a region.
jcoomes@810 86 static const size_t RegionAddrOffsetMask;
jcoomes@810 87 // Mask for the bits in a pointer to get the address of the start of a region.
jcoomes@810 88 static const size_t RegionAddrMask;
duke@435 89
jcoomes@810 90 class RegionData
duke@435 91 {
duke@435 92 public:
jcoomes@810 93 // Destination address of the region.
duke@435 94 HeapWord* destination() const { return _destination; }
duke@435 95
jcoomes@810 96 // The first region containing data destined for this region.
jcoomes@810 97 size_t source_region() const { return _source_region; }
duke@435 98
jcoomes@810 99 // The object (if any) starting in this region and ending in a different
jcoomes@810 100 // region that could not be updated during the main (parallel) compaction
duke@435 101 // phase. This is different from _partial_obj_addr, which is an object that
jcoomes@810 102 // extends onto a source region. However, the two uses do not overlap in
duke@435 103 // time, so the same field is used to save space.
duke@435 104 HeapWord* deferred_obj_addr() const { return _partial_obj_addr; }
duke@435 105
jcoomes@810 106 // The starting address of the partial object extending onto the region.
duke@435 107 HeapWord* partial_obj_addr() const { return _partial_obj_addr; }
duke@435 108
jcoomes@810 109 // Size of the partial object extending onto the region (words).
duke@435 110 size_t partial_obj_size() const { return _partial_obj_size; }
duke@435 111
jcoomes@810 112 // Size of live data that lies within this region due to objects that start
jcoomes@810 113 // in this region (words). This does not include the partial object
jcoomes@810 114 // extending onto the region (if any), or the part of an object that extends
jcoomes@810 115 // onto the next region (if any).
duke@435 116 size_t live_obj_size() const { return _dc_and_los & los_mask; }
duke@435 117
jcoomes@810 118 // Total live data that lies within the region (words).
duke@435 119 size_t data_size() const { return partial_obj_size() + live_obj_size(); }
duke@435 120
jcoomes@810 121 // The destination_count is the number of other regions to which data from
jcoomes@810 122 // this region will be copied. At the end of the summary phase, the valid
duke@435 123 // values of destination_count are
duke@435 124 //
jcoomes@810 125 // 0 - data from the region will be compacted completely into itself, or the
jcoomes@810 126 // region is empty. The region can be claimed and then filled.
jcoomes@810 127 // 1 - data from the region will be compacted into 1 other region; some
jcoomes@810 128 // data from the region may also be compacted into the region itself.
jcoomes@810 129 // 2 - data from the region will be copied to 2 other regions.
duke@435 130 //
jcoomes@810 131 // During compaction as regions are emptied, the destination_count is
duke@435 132 // decremented (atomically) and when it reaches 0, it can be claimed and
duke@435 133 // then filled.
duke@435 134 //
jcoomes@810 135 // A region is claimed for processing by atomically changing the
jcoomes@810 136 // destination_count to the claimed value (dc_claimed). After a region has
duke@435 137 // been filled, the destination_count should be set to the completed value
duke@435 138 // (dc_completed).
duke@435 139 inline uint destination_count() const;
duke@435 140 inline uint destination_count_raw() const;
duke@435 141
jcoomes@810 142 // The location of the java heap data that corresponds to this region.
duke@435 143 inline HeapWord* data_location() const;
duke@435 144
jcoomes@810 145 // The highest address referenced by objects in this region.
duke@435 146 inline HeapWord* highest_ref() const;
duke@435 147
jcoomes@810 148 // Whether this region is available to be claimed, has been claimed, or has
duke@435 149 // been completed.
duke@435 150 //
jcoomes@810 151 // Minor subtlety: claimed() returns true if the region is marked
jcoomes@810 152 // completed(), which is desirable since a region must be claimed before it
duke@435 153 // can be completed.
duke@435 154 bool available() const { return _dc_and_los < dc_one; }
duke@435 155 bool claimed() const { return _dc_and_los >= dc_claimed; }
duke@435 156 bool completed() const { return _dc_and_los >= dc_completed; }
duke@435 157
duke@435 158 // These are not atomic.
duke@435 159 void set_destination(HeapWord* addr) { _destination = addr; }
jcoomes@810 160 void set_source_region(size_t region) { _source_region = region; }
duke@435 161 void set_deferred_obj_addr(HeapWord* addr) { _partial_obj_addr = addr; }
duke@435 162 void set_partial_obj_addr(HeapWord* addr) { _partial_obj_addr = addr; }
duke@435 163 void set_partial_obj_size(size_t words) {
jcoomes@810 164 _partial_obj_size = (region_sz_t) words;
duke@435 165 }
duke@435 166
duke@435 167 inline void set_destination_count(uint count);
duke@435 168 inline void set_live_obj_size(size_t words);
duke@435 169 inline void set_data_location(HeapWord* addr);
duke@435 170 inline void set_completed();
duke@435 171 inline bool claim_unsafe();
duke@435 172
duke@435 173 // These are atomic.
duke@435 174 inline void add_live_obj(size_t words);
duke@435 175 inline void set_highest_ref(HeapWord* addr);
duke@435 176 inline void decrement_destination_count();
duke@435 177 inline bool claim();
duke@435 178
duke@435 179 private:
jcoomes@810 180 // The type used to represent object sizes within a region.
jcoomes@810 181 typedef uint region_sz_t;
duke@435 182
duke@435 183 // Constants for manipulating the _dc_and_los field, which holds both the
duke@435 184 // destination count and live obj size. The live obj size lives at the
duke@435 185 // least significant end so no masking is necessary when adding.
jcoomes@810 186 static const region_sz_t dc_shift; // Shift amount.
jcoomes@810 187 static const region_sz_t dc_mask; // Mask for destination count.
jcoomes@810 188 static const region_sz_t dc_one; // 1, shifted appropriately.
jcoomes@810 189 static const region_sz_t dc_claimed; // Region has been claimed.
jcoomes@810 190 static const region_sz_t dc_completed; // Region has been completed.
jcoomes@810 191 static const region_sz_t los_mask; // Mask for live obj size.
duke@435 192
jcoomes@810 193 HeapWord* _destination;
jcoomes@810 194 size_t _source_region;
jcoomes@810 195 HeapWord* _partial_obj_addr;
jcoomes@810 196 region_sz_t _partial_obj_size;
jcoomes@810 197 region_sz_t volatile _dc_and_los;
duke@435 198 #ifdef ASSERT
duke@435 199 // These enable optimizations that are only partially implemented. Use
duke@435 200 // debug builds to prevent the code fragments from breaking.
jcoomes@810 201 HeapWord* _data_location;
jcoomes@810 202 HeapWord* _highest_ref;
duke@435 203 #endif // #ifdef ASSERT
duke@435 204
duke@435 205 #ifdef ASSERT
duke@435 206 public:
jcoomes@810 207 uint _pushed; // 0 until region is pushed onto a worker's stack
duke@435 208 private:
duke@435 209 #endif
duke@435 210 };
duke@435 211
duke@435 212 public:
duke@435 213 ParallelCompactData();
duke@435 214 bool initialize(MemRegion covered_region);
duke@435 215
jcoomes@810 216 size_t region_count() const { return _region_count; }
duke@435 217
jcoomes@810 218 // Convert region indices to/from RegionData pointers.
jcoomes@810 219 inline RegionData* region(size_t region_idx) const;
jcoomes@810 220 inline size_t region(const RegionData* const region_ptr) const;
duke@435 221
jcoomes@810 222 // Returns true if the given address is contained within the region
jcoomes@810 223 bool region_contains(size_t region_index, HeapWord* addr);
duke@435 224
duke@435 225 void add_obj(HeapWord* addr, size_t len);
duke@435 226 void add_obj(oop p, size_t len) { add_obj((HeapWord*)p, len); }
duke@435 227
jcoomes@810 228 // Fill in the regions covering [beg, end) so that no data moves; i.e., the
jcoomes@810 229 // destination of region n is simply the start of region n. The argument beg
jcoomes@810 230 // must be region-aligned; end need not be.
duke@435 231 void summarize_dense_prefix(HeapWord* beg, HeapWord* end);
duke@435 232
duke@435 233 bool summarize(HeapWord* target_beg, HeapWord* target_end,
duke@435 234 HeapWord* source_beg, HeapWord* source_end,
duke@435 235 HeapWord** target_next, HeapWord** source_next = 0);
duke@435 236
duke@435 237 void clear();
jcoomes@810 238 void clear_range(size_t beg_region, size_t end_region);
duke@435 239 void clear_range(HeapWord* beg, HeapWord* end) {
jcoomes@810 240 clear_range(addr_to_region_idx(beg), addr_to_region_idx(end));
duke@435 241 }
duke@435 242
jcoomes@810 243 // Return the number of words between addr and the start of the region
duke@435 244 // containing addr.
jcoomes@810 245 inline size_t region_offset(const HeapWord* addr) const;
duke@435 246
jcoomes@810 247 // Convert addresses to/from a region index or region pointer.
jcoomes@810 248 inline size_t addr_to_region_idx(const HeapWord* addr) const;
jcoomes@810 249 inline RegionData* addr_to_region_ptr(const HeapWord* addr) const;
jcoomes@810 250 inline HeapWord* region_to_addr(size_t region) const;
jcoomes@810 251 inline HeapWord* region_to_addr(size_t region, size_t offset) const;
jcoomes@810 252 inline HeapWord* region_to_addr(const RegionData* region) const;
duke@435 253
jcoomes@810 254 inline HeapWord* region_align_down(HeapWord* addr) const;
jcoomes@810 255 inline HeapWord* region_align_up(HeapWord* addr) const;
jcoomes@810 256 inline bool is_region_aligned(HeapWord* addr) const;
duke@435 257
duke@435 258 // Return the address one past the end of the partial object.
jcoomes@810 259 HeapWord* partial_obj_end(size_t region_idx) const;
duke@435 260
duke@435 261 // Return the new location of the object p after the
duke@435 262 // the compaction.
duke@435 263 HeapWord* calc_new_pointer(HeapWord* addr);
duke@435 264
duke@435 265 HeapWord* calc_new_pointer(oop p) {
duke@435 266 return calc_new_pointer((HeapWord*) p);
duke@435 267 }
duke@435 268
duke@435 269 // Return the updated address for the given klass
duke@435 270 klassOop calc_new_klass(klassOop);
duke@435 271
duke@435 272 #ifdef ASSERT
duke@435 273 void verify_clear(const PSVirtualSpace* vspace);
duke@435 274 void verify_clear();
duke@435 275 #endif // #ifdef ASSERT
duke@435 276
duke@435 277 private:
jcoomes@810 278 bool initialize_region_data(size_t region_size);
duke@435 279 PSVirtualSpace* create_vspace(size_t count, size_t element_size);
duke@435 280
duke@435 281 private:
duke@435 282 HeapWord* _region_start;
duke@435 283 #ifdef ASSERT
duke@435 284 HeapWord* _region_end;
duke@435 285 #endif // #ifdef ASSERT
duke@435 286
jcoomes@810 287 PSVirtualSpace* _region_vspace;
jcoomes@810 288 RegionData* _region_data;
jcoomes@810 289 size_t _region_count;
duke@435 290 };
duke@435 291
duke@435 292 inline uint
jcoomes@810 293 ParallelCompactData::RegionData::destination_count_raw() const
duke@435 294 {
duke@435 295 return _dc_and_los & dc_mask;
duke@435 296 }
duke@435 297
duke@435 298 inline uint
jcoomes@810 299 ParallelCompactData::RegionData::destination_count() const
duke@435 300 {
duke@435 301 return destination_count_raw() >> dc_shift;
duke@435 302 }
duke@435 303
duke@435 304 inline void
jcoomes@810 305 ParallelCompactData::RegionData::set_destination_count(uint count)
duke@435 306 {
duke@435 307 assert(count <= (dc_completed >> dc_shift), "count too large");
jcoomes@810 308 const region_sz_t live_sz = (region_sz_t) live_obj_size();
duke@435 309 _dc_and_los = (count << dc_shift) | live_sz;
duke@435 310 }
duke@435 311
jcoomes@810 312 inline void ParallelCompactData::RegionData::set_live_obj_size(size_t words)
duke@435 313 {
duke@435 314 assert(words <= los_mask, "would overflow");
jcoomes@810 315 _dc_and_los = destination_count_raw() | (region_sz_t)words;
duke@435 316 }
duke@435 317
jcoomes@810 318 inline void ParallelCompactData::RegionData::decrement_destination_count()
duke@435 319 {
duke@435 320 assert(_dc_and_los < dc_claimed, "already claimed");
duke@435 321 assert(_dc_and_los >= dc_one, "count would go negative");
duke@435 322 Atomic::add((int)dc_mask, (volatile int*)&_dc_and_los);
duke@435 323 }
duke@435 324
jcoomes@810 325 inline HeapWord* ParallelCompactData::RegionData::data_location() const
duke@435 326 {
duke@435 327 DEBUG_ONLY(return _data_location;)
duke@435 328 NOT_DEBUG(return NULL;)
duke@435 329 }
duke@435 330
jcoomes@810 331 inline HeapWord* ParallelCompactData::RegionData::highest_ref() const
duke@435 332 {
duke@435 333 DEBUG_ONLY(return _highest_ref;)
duke@435 334 NOT_DEBUG(return NULL;)
duke@435 335 }
duke@435 336
jcoomes@810 337 inline void ParallelCompactData::RegionData::set_data_location(HeapWord* addr)
duke@435 338 {
duke@435 339 DEBUG_ONLY(_data_location = addr;)
duke@435 340 }
duke@435 341
jcoomes@810 342 inline void ParallelCompactData::RegionData::set_completed()
duke@435 343 {
duke@435 344 assert(claimed(), "must be claimed first");
jcoomes@810 345 _dc_and_los = dc_completed | (region_sz_t) live_obj_size();
duke@435 346 }
duke@435 347
jcoomes@810 348 // MT-unsafe claiming of a region. Should only be used during single threaded
duke@435 349 // execution.
jcoomes@810 350 inline bool ParallelCompactData::RegionData::claim_unsafe()
duke@435 351 {
duke@435 352 if (available()) {
duke@435 353 _dc_and_los |= dc_claimed;
duke@435 354 return true;
duke@435 355 }
duke@435 356 return false;
duke@435 357 }
duke@435 358
jcoomes@810 359 inline void ParallelCompactData::RegionData::add_live_obj(size_t words)
duke@435 360 {
duke@435 361 assert(words <= (size_t)los_mask - live_obj_size(), "overflow");
duke@435 362 Atomic::add((int) words, (volatile int*) &_dc_and_los);
duke@435 363 }
duke@435 364
jcoomes@810 365 inline void ParallelCompactData::RegionData::set_highest_ref(HeapWord* addr)
duke@435 366 {
duke@435 367 #ifdef ASSERT
duke@435 368 HeapWord* tmp = _highest_ref;
duke@435 369 while (addr > tmp) {
duke@435 370 tmp = (HeapWord*)Atomic::cmpxchg_ptr(addr, &_highest_ref, tmp);
duke@435 371 }
duke@435 372 #endif // #ifdef ASSERT
duke@435 373 }
duke@435 374
jcoomes@810 375 inline bool ParallelCompactData::RegionData::claim()
duke@435 376 {
duke@435 377 const int los = (int) live_obj_size();
duke@435 378 const int old = Atomic::cmpxchg(dc_claimed | los,
duke@435 379 (volatile int*) &_dc_and_los, los);
duke@435 380 return old == los;
duke@435 381 }
duke@435 382
jcoomes@810 383 inline ParallelCompactData::RegionData*
jcoomes@810 384 ParallelCompactData::region(size_t region_idx) const
duke@435 385 {
jcoomes@810 386 assert(region_idx <= region_count(), "bad arg");
jcoomes@810 387 return _region_data + region_idx;
duke@435 388 }
duke@435 389
duke@435 390 inline size_t
jcoomes@810 391 ParallelCompactData::region(const RegionData* const region_ptr) const
duke@435 392 {
jcoomes@810 393 assert(region_ptr >= _region_data, "bad arg");
jcoomes@810 394 assert(region_ptr <= _region_data + region_count(), "bad arg");
jcoomes@810 395 return pointer_delta(region_ptr, _region_data, sizeof(RegionData));
duke@435 396 }
duke@435 397
duke@435 398 inline size_t
jcoomes@810 399 ParallelCompactData::region_offset(const HeapWord* addr) const
duke@435 400 {
duke@435 401 assert(addr >= _region_start, "bad addr");
duke@435 402 assert(addr <= _region_end, "bad addr");
jcoomes@810 403 return (size_t(addr) & RegionAddrOffsetMask) >> LogHeapWordSize;
duke@435 404 }
duke@435 405
duke@435 406 inline size_t
jcoomes@810 407 ParallelCompactData::addr_to_region_idx(const HeapWord* addr) const
duke@435 408 {
duke@435 409 assert(addr >= _region_start, "bad addr");
duke@435 410 assert(addr <= _region_end, "bad addr");
jcoomes@810 411 return pointer_delta(addr, _region_start) >> Log2RegionSize;
duke@435 412 }
duke@435 413
jcoomes@810 414 inline ParallelCompactData::RegionData*
jcoomes@810 415 ParallelCompactData::addr_to_region_ptr(const HeapWord* addr) const
duke@435 416 {
jcoomes@810 417 return region(addr_to_region_idx(addr));
duke@435 418 }
duke@435 419
duke@435 420 inline HeapWord*
jcoomes@810 421 ParallelCompactData::region_to_addr(size_t region) const
duke@435 422 {
jcoomes@810 423 assert(region <= _region_count, "region out of range");
jcoomes@810 424 return _region_start + (region << Log2RegionSize);
duke@435 425 }
duke@435 426
duke@435 427 inline HeapWord*
jcoomes@810 428 ParallelCompactData::region_to_addr(const RegionData* region) const
duke@435 429 {
jcoomes@810 430 return region_to_addr(pointer_delta(region, _region_data,
jcoomes@810 431 sizeof(RegionData)));
duke@435 432 }
duke@435 433
duke@435 434 inline HeapWord*
jcoomes@810 435 ParallelCompactData::region_to_addr(size_t region, size_t offset) const
duke@435 436 {
jcoomes@810 437 assert(region <= _region_count, "region out of range");
jcoomes@810 438 assert(offset < RegionSize, "offset too big"); // This may be too strict.
jcoomes@810 439 return region_to_addr(region) + offset;
duke@435 440 }
duke@435 441
duke@435 442 inline HeapWord*
jcoomes@810 443 ParallelCompactData::region_align_down(HeapWord* addr) const
duke@435 444 {
duke@435 445 assert(addr >= _region_start, "bad addr");
jcoomes@810 446 assert(addr < _region_end + RegionSize, "bad addr");
jcoomes@810 447 return (HeapWord*)(size_t(addr) & RegionAddrMask);
duke@435 448 }
duke@435 449
duke@435 450 inline HeapWord*
jcoomes@810 451 ParallelCompactData::region_align_up(HeapWord* addr) const
duke@435 452 {
duke@435 453 assert(addr >= _region_start, "bad addr");
duke@435 454 assert(addr <= _region_end, "bad addr");
jcoomes@810 455 return region_align_down(addr + RegionSizeOffsetMask);
duke@435 456 }
duke@435 457
duke@435 458 inline bool
jcoomes@810 459 ParallelCompactData::is_region_aligned(HeapWord* addr) const
duke@435 460 {
jcoomes@810 461 return region_offset(addr) == 0;
duke@435 462 }
duke@435 463
duke@435 464 // Abstract closure for use with ParMarkBitMap::iterate(), which will invoke the
duke@435 465 // do_addr() method.
duke@435 466 //
duke@435 467 // The closure is initialized with the number of heap words to process
duke@435 468 // (words_remaining()), and becomes 'full' when it reaches 0. The do_addr()
duke@435 469 // methods in subclasses should update the total as words are processed. Since
duke@435 470 // only one subclass actually uses this mechanism to terminate iteration, the
duke@435 471 // default initial value is > 0. The implementation is here and not in the
duke@435 472 // single subclass that uses it to avoid making is_full() virtual, and thus
duke@435 473 // adding a virtual call per live object.
duke@435 474
duke@435 475 class ParMarkBitMapClosure: public StackObj {
duke@435 476 public:
duke@435 477 typedef ParMarkBitMap::idx_t idx_t;
duke@435 478 typedef ParMarkBitMap::IterationStatus IterationStatus;
duke@435 479
duke@435 480 public:
duke@435 481 inline ParMarkBitMapClosure(ParMarkBitMap* mbm, ParCompactionManager* cm,
duke@435 482 size_t words = max_uintx);
duke@435 483
duke@435 484 inline ParCompactionManager* compaction_manager() const;
duke@435 485 inline ParMarkBitMap* bitmap() const;
duke@435 486 inline size_t words_remaining() const;
duke@435 487 inline bool is_full() const;
duke@435 488 inline HeapWord* source() const;
duke@435 489
duke@435 490 inline void set_source(HeapWord* addr);
duke@435 491
duke@435 492 virtual IterationStatus do_addr(HeapWord* addr, size_t words) = 0;
duke@435 493
duke@435 494 protected:
duke@435 495 inline void decrement_words_remaining(size_t words);
duke@435 496
duke@435 497 private:
duke@435 498 ParMarkBitMap* const _bitmap;
duke@435 499 ParCompactionManager* const _compaction_manager;
duke@435 500 DEBUG_ONLY(const size_t _initial_words_remaining;) // Useful in debugger.
duke@435 501 size_t _words_remaining; // Words left to copy.
duke@435 502
duke@435 503 protected:
duke@435 504 HeapWord* _source; // Next addr that would be read.
duke@435 505 };
duke@435 506
duke@435 507 inline
duke@435 508 ParMarkBitMapClosure::ParMarkBitMapClosure(ParMarkBitMap* bitmap,
duke@435 509 ParCompactionManager* cm,
duke@435 510 size_t words):
duke@435 511 _bitmap(bitmap), _compaction_manager(cm)
duke@435 512 #ifdef ASSERT
duke@435 513 , _initial_words_remaining(words)
duke@435 514 #endif
duke@435 515 {
duke@435 516 _words_remaining = words;
duke@435 517 _source = NULL;
duke@435 518 }
duke@435 519
duke@435 520 inline ParCompactionManager* ParMarkBitMapClosure::compaction_manager() const {
duke@435 521 return _compaction_manager;
duke@435 522 }
duke@435 523
duke@435 524 inline ParMarkBitMap* ParMarkBitMapClosure::bitmap() const {
duke@435 525 return _bitmap;
duke@435 526 }
duke@435 527
duke@435 528 inline size_t ParMarkBitMapClosure::words_remaining() const {
duke@435 529 return _words_remaining;
duke@435 530 }
duke@435 531
duke@435 532 inline bool ParMarkBitMapClosure::is_full() const {
duke@435 533 return words_remaining() == 0;
duke@435 534 }
duke@435 535
duke@435 536 inline HeapWord* ParMarkBitMapClosure::source() const {
duke@435 537 return _source;
duke@435 538 }
duke@435 539
duke@435 540 inline void ParMarkBitMapClosure::set_source(HeapWord* addr) {
duke@435 541 _source = addr;
duke@435 542 }
duke@435 543
duke@435 544 inline void ParMarkBitMapClosure::decrement_words_remaining(size_t words) {
duke@435 545 assert(_words_remaining >= words, "processed too many words");
duke@435 546 _words_remaining -= words;
duke@435 547 }
duke@435 548
jcoomes@810 549 // The UseParallelOldGC collector is a stop-the-world garbage collector that
jcoomes@810 550 // does parts of the collection using parallel threads. The collection includes
jcoomes@810 551 // the tenured generation and the young generation. The permanent generation is
jcoomes@810 552 // collected at the same time as the other two generations but the permanent
jcoomes@810 553 // generation is collect by a single GC thread. The permanent generation is
jcoomes@810 554 // collected serially because of the requirement that during the processing of a
jcoomes@810 555 // klass AAA, any objects reference by AAA must already have been processed.
jcoomes@810 556 // This requirement is enforced by a left (lower address) to right (higher
jcoomes@810 557 // address) sliding compaction.
jmasa@698 558 //
jmasa@698 559 // There are four phases of the collection.
jmasa@698 560 //
jmasa@698 561 // - marking phase
jmasa@698 562 // - summary phase
jmasa@698 563 // - compacting phase
jmasa@698 564 // - clean up phase
jmasa@698 565 //
jmasa@698 566 // Roughly speaking these phases correspond, respectively, to
jmasa@698 567 // - mark all the live objects
jmasa@698 568 // - calculate the destination of each object at the end of the collection
jmasa@698 569 // - move the objects to their destination
jmasa@698 570 // - update some references and reinitialize some variables
jmasa@698 571 //
jcoomes@810 572 // These three phases are invoked in PSParallelCompact::invoke_no_policy(). The
jcoomes@810 573 // marking phase is implemented in PSParallelCompact::marking_phase() and does a
jcoomes@810 574 // complete marking of the heap. The summary phase is implemented in
jcoomes@810 575 // PSParallelCompact::summary_phase(). The move and update phase is implemented
jcoomes@810 576 // in PSParallelCompact::compact().
jmasa@698 577 //
jcoomes@810 578 // A space that is being collected is divided into regions and with each region
jcoomes@810 579 // is associated an object of type ParallelCompactData. Each region is of a
jcoomes@810 580 // fixed size and typically will contain more than 1 object and may have parts
jcoomes@810 581 // of objects at the front and back of the region.
jmasa@698 582 //
jcoomes@810 583 // region -----+---------------------+----------
jmasa@698 584 // objects covered [ AAA )[ BBB )[ CCC )[ DDD )
jmasa@698 585 //
jcoomes@810 586 // The marking phase does a complete marking of all live objects in the heap.
jcoomes@810 587 // The marking also compiles the size of the data for all live objects covered
jcoomes@810 588 // by the region. This size includes the part of any live object spanning onto
jcoomes@810 589 // the region (part of AAA if it is live) from the front, all live objects
jcoomes@810 590 // contained in the region (BBB and/or CCC if they are live), and the part of
jcoomes@810 591 // any live objects covered by the region that extends off the region (part of
jcoomes@810 592 // DDD if it is live). The marking phase uses multiple GC threads and marking
jcoomes@810 593 // is done in a bit array of type ParMarkBitMap. The marking of the bit map is
jcoomes@810 594 // done atomically as is the accumulation of the size of the live objects
jcoomes@810 595 // covered by a region.
jmasa@698 596 //
jcoomes@810 597 // The summary phase calculates the total live data to the left of each region
jcoomes@810 598 // XXX. Based on that total and the bottom of the space, it can calculate the
jcoomes@810 599 // starting location of the live data in XXX. The summary phase calculates for
jcoomes@810 600 // each region XXX quantites such as
jmasa@698 601 //
jcoomes@810 602 // - the amount of live data at the beginning of a region from an object
jcoomes@810 603 // entering the region.
jcoomes@810 604 // - the location of the first live data on the region
jcoomes@810 605 // - a count of the number of regions receiving live data from XXX.
jmasa@698 606 //
jmasa@698 607 // See ParallelCompactData for precise details. The summary phase also
jcoomes@810 608 // calculates the dense prefix for the compaction. The dense prefix is a
jcoomes@810 609 // portion at the beginning of the space that is not moved. The objects in the
jcoomes@810 610 // dense prefix do need to have their object references updated. See method
jcoomes@810 611 // summarize_dense_prefix().
jmasa@698 612 //
jmasa@698 613 // The summary phase is done using 1 GC thread.
jmasa@698 614 //
jcoomes@810 615 // The compaction phase moves objects to their new location and updates all
jcoomes@810 616 // references in the object.
jmasa@698 617 //
jcoomes@810 618 // A current exception is that objects that cross a region boundary are moved
jcoomes@810 619 // but do not have their references updated. References are not updated because
jcoomes@810 620 // it cannot easily be determined if the klass pointer KKK for the object AAA
jcoomes@810 621 // has been updated. KKK likely resides in a region to the left of the region
jcoomes@810 622 // containing AAA. These AAA's have there references updated at the end in a
jcoomes@810 623 // clean up phase. See the method PSParallelCompact::update_deferred_objects().
jcoomes@810 624 // An alternate strategy is being investigated for this deferral of updating.
jmasa@698 625 //
jcoomes@810 626 // Compaction is done on a region basis. A region that is ready to be filled is
jcoomes@810 627 // put on a ready list and GC threads take region off the list and fill them. A
jcoomes@810 628 // region is ready to be filled if it empty of live objects. Such a region may
jcoomes@810 629 // have been initially empty (only contained dead objects) or may have had all
jcoomes@810 630 // its live objects copied out already. A region that compacts into itself is
jcoomes@810 631 // also ready for filling. The ready list is initially filled with empty
jcoomes@810 632 // regions and regions compacting into themselves. There is always at least 1
jcoomes@810 633 // region that can be put on the ready list. The regions are atomically added
jcoomes@810 634 // and removed from the ready list.
jcoomes@810 635
duke@435 636 class PSParallelCompact : AllStatic {
duke@435 637 public:
duke@435 638 // Convenient access to type names.
duke@435 639 typedef ParMarkBitMap::idx_t idx_t;
jcoomes@810 640 typedef ParallelCompactData::RegionData RegionData;
duke@435 641
duke@435 642 typedef enum {
duke@435 643 perm_space_id, old_space_id, eden_space_id,
duke@435 644 from_space_id, to_space_id, last_space_id
duke@435 645 } SpaceId;
duke@435 646
duke@435 647 public:
coleenp@548 648 // Inline closure decls
duke@435 649 //
duke@435 650 class IsAliveClosure: public BoolObjectClosure {
duke@435 651 public:
coleenp@548 652 virtual void do_object(oop p);
coleenp@548 653 virtual bool do_object_b(oop p);
duke@435 654 };
duke@435 655
duke@435 656 class KeepAliveClosure: public OopClosure {
coleenp@548 657 private:
coleenp@548 658 ParCompactionManager* _compaction_manager;
coleenp@548 659 protected:
coleenp@548 660 template <class T> inline void do_oop_work(T* p);
coleenp@548 661 public:
coleenp@548 662 KeepAliveClosure(ParCompactionManager* cm) : _compaction_manager(cm) { }
coleenp@548 663 virtual void do_oop(oop* p);
coleenp@548 664 virtual void do_oop(narrowOop* p);
coleenp@548 665 };
coleenp@548 666
coleenp@548 667 // Current unused
coleenp@548 668 class FollowRootClosure: public OopsInGenClosure {
coleenp@548 669 private:
duke@435 670 ParCompactionManager* _compaction_manager;
duke@435 671 public:
coleenp@548 672 FollowRootClosure(ParCompactionManager* cm) : _compaction_manager(cm) { }
coleenp@548 673 virtual void do_oop(oop* p);
coleenp@548 674 virtual void do_oop(narrowOop* p);
duke@435 675 virtual const bool do_nmethods() const { return true; }
duke@435 676 };
duke@435 677
duke@435 678 class FollowStackClosure: public VoidClosure {
coleenp@548 679 private:
duke@435 680 ParCompactionManager* _compaction_manager;
duke@435 681 public:
coleenp@548 682 FollowStackClosure(ParCompactionManager* cm) : _compaction_manager(cm) { }
coleenp@548 683 virtual void do_void();
duke@435 684 };
duke@435 685
duke@435 686 class AdjustPointerClosure: public OopsInGenClosure {
coleenp@548 687 private:
duke@435 688 bool _is_root;
duke@435 689 public:
coleenp@548 690 AdjustPointerClosure(bool is_root) : _is_root(is_root) { }
coleenp@548 691 virtual void do_oop(oop* p);
coleenp@548 692 virtual void do_oop(narrowOop* p);
duke@435 693 };
duke@435 694
duke@435 695 // Closure for verifying update of pointers. Does not
duke@435 696 // have any side effects.
duke@435 697 class VerifyUpdateClosure: public ParMarkBitMapClosure {
duke@435 698 const MutableSpace* _space; // Is this ever used?
duke@435 699
duke@435 700 public:
duke@435 701 VerifyUpdateClosure(ParCompactionManager* cm, const MutableSpace* sp) :
duke@435 702 ParMarkBitMapClosure(PSParallelCompact::mark_bitmap(), cm), _space(sp)
duke@435 703 { }
duke@435 704
duke@435 705 virtual IterationStatus do_addr(HeapWord* addr, size_t words);
duke@435 706
duke@435 707 const MutableSpace* space() { return _space; }
duke@435 708 };
duke@435 709
duke@435 710 // Closure for updating objects altered for debug checking
duke@435 711 class ResetObjectsClosure: public ParMarkBitMapClosure {
duke@435 712 public:
duke@435 713 ResetObjectsClosure(ParCompactionManager* cm):
duke@435 714 ParMarkBitMapClosure(PSParallelCompact::mark_bitmap(), cm)
duke@435 715 { }
duke@435 716
duke@435 717 virtual IterationStatus do_addr(HeapWord* addr, size_t words);
duke@435 718 };
duke@435 719
duke@435 720 friend class KeepAliveClosure;
duke@435 721 friend class FollowStackClosure;
duke@435 722 friend class AdjustPointerClosure;
duke@435 723 friend class FollowRootClosure;
duke@435 724 friend class instanceKlassKlass;
duke@435 725 friend class RefProcTaskProxy;
duke@435 726
duke@435 727 private:
duke@435 728 static elapsedTimer _accumulated_time;
duke@435 729 static unsigned int _total_invocations;
duke@435 730 static unsigned int _maximum_compaction_gc_num;
duke@435 731 static jlong _time_of_last_gc; // ms
duke@435 732 static CollectorCounters* _counters;
duke@435 733 static ParMarkBitMap _mark_bitmap;
duke@435 734 static ParallelCompactData _summary_data;
duke@435 735 static IsAliveClosure _is_alive_closure;
duke@435 736 static SpaceInfo _space_info[last_space_id];
duke@435 737 static bool _print_phases;
duke@435 738 static AdjustPointerClosure _adjust_root_pointer_closure;
duke@435 739 static AdjustPointerClosure _adjust_pointer_closure;
duke@435 740
duke@435 741 // Reference processing (used in ...follow_contents)
duke@435 742 static ReferenceProcessor* _ref_processor;
duke@435 743
duke@435 744 // Updated location of intArrayKlassObj.
duke@435 745 static klassOop _updated_int_array_klass_obj;
duke@435 746
duke@435 747 // Values computed at initialization and used by dead_wood_limiter().
duke@435 748 static double _dwl_mean;
duke@435 749 static double _dwl_std_dev;
duke@435 750 static double _dwl_first_term;
duke@435 751 static double _dwl_adjustment;
duke@435 752 #ifdef ASSERT
duke@435 753 static bool _dwl_initialized;
duke@435 754 #endif // #ifdef ASSERT
duke@435 755
duke@435 756 private:
duke@435 757 // Closure accessors
coleenp@548 758 static OopClosure* adjust_pointer_closure() { return (OopClosure*)&_adjust_pointer_closure; }
duke@435 759 static OopClosure* adjust_root_pointer_closure() { return (OopClosure*)&_adjust_root_pointer_closure; }
coleenp@548 760 static BoolObjectClosure* is_alive_closure() { return (BoolObjectClosure*)&_is_alive_closure; }
duke@435 761
duke@435 762 static void initialize_space_info();
duke@435 763
duke@435 764 // Return true if details about individual phases should be printed.
duke@435 765 static inline bool print_phases();
duke@435 766
duke@435 767 // Clear the marking bitmap and summary data that cover the specified space.
duke@435 768 static void clear_data_covering_space(SpaceId id);
duke@435 769
duke@435 770 static void pre_compact(PreGCValues* pre_gc_values);
duke@435 771 static void post_compact();
duke@435 772
duke@435 773 // Mark live objects
duke@435 774 static void marking_phase(ParCompactionManager* cm,
duke@435 775 bool maximum_heap_compaction);
duke@435 776 static void follow_stack(ParCompactionManager* cm);
duke@435 777 static void follow_weak_klass_links(ParCompactionManager* cm);
duke@435 778
coleenp@548 779 template <class T> static inline void adjust_pointer(T* p, bool is_root);
duke@435 780 static void adjust_root_pointer(oop* p) { adjust_pointer(p, true); }
duke@435 781
coleenp@548 782 template <class T>
coleenp@548 783 static inline void follow_root(ParCompactionManager* cm, T* p);
duke@435 784
duke@435 785 // Compute the dense prefix for the designated space. This is an experimental
duke@435 786 // implementation currently not used in production.
duke@435 787 static HeapWord* compute_dense_prefix_via_density(const SpaceId id,
duke@435 788 bool maximum_compaction);
duke@435 789
duke@435 790 // Methods used to compute the dense prefix.
duke@435 791
duke@435 792 // Compute the value of the normal distribution at x = density. The mean and
duke@435 793 // standard deviation are values saved by initialize_dead_wood_limiter().
duke@435 794 static inline double normal_distribution(double density);
duke@435 795
duke@435 796 // Initialize the static vars used by dead_wood_limiter().
duke@435 797 static void initialize_dead_wood_limiter();
duke@435 798
duke@435 799 // Return the percentage of space that can be treated as "dead wood" (i.e.,
duke@435 800 // not reclaimed).
duke@435 801 static double dead_wood_limiter(double density, size_t min_percent);
duke@435 802
jcoomes@810 803 // Find the first (left-most) region in the range [beg, end) that has at least
duke@435 804 // dead_words of dead space to the left. The argument beg must be the first
jcoomes@810 805 // region in the space that is not completely live.
jcoomes@810 806 static RegionData* dead_wood_limit_region(const RegionData* beg,
jcoomes@810 807 const RegionData* end,
jcoomes@810 808 size_t dead_words);
duke@435 809
jcoomes@810 810 // Return a pointer to the first region in the range [beg, end) that is not
duke@435 811 // completely full.
jcoomes@810 812 static RegionData* first_dead_space_region(const RegionData* beg,
jcoomes@810 813 const RegionData* end);
duke@435 814
duke@435 815 // Return a value indicating the benefit or 'yield' if the compacted region
duke@435 816 // were to start (or equivalently if the dense prefix were to end) at the
jcoomes@810 817 // candidate region. Higher values are better.
duke@435 818 //
duke@435 819 // The value is based on the amount of space reclaimed vs. the costs of (a)
duke@435 820 // updating references in the dense prefix plus (b) copying objects and
duke@435 821 // updating references in the compacted region.
jcoomes@810 822 static inline double reclaimed_ratio(const RegionData* const candidate,
duke@435 823 HeapWord* const bottom,
duke@435 824 HeapWord* const top,
duke@435 825 HeapWord* const new_top);
duke@435 826
duke@435 827 // Compute the dense prefix for the designated space.
duke@435 828 static HeapWord* compute_dense_prefix(const SpaceId id,
duke@435 829 bool maximum_compaction);
duke@435 830
jcoomes@810 831 // Return true if dead space crosses onto the specified Region; bit must be
jcoomes@810 832 // the bit index corresponding to the first word of the Region.
jcoomes@810 833 static inline bool dead_space_crosses_boundary(const RegionData* region,
duke@435 834 idx_t bit);
duke@435 835
duke@435 836 // Summary phase utility routine to fill dead space (if any) at the dense
duke@435 837 // prefix boundary. Should only be called if the the dense prefix is
duke@435 838 // non-empty.
duke@435 839 static void fill_dense_prefix_end(SpaceId id);
duke@435 840
duke@435 841 static void summarize_spaces_quick();
duke@435 842 static void summarize_space(SpaceId id, bool maximum_compaction);
duke@435 843 static void summary_phase(ParCompactionManager* cm, bool maximum_compaction);
duke@435 844
duke@435 845 // The space that is compacted after space_id.
duke@435 846 static SpaceId next_compaction_space_id(SpaceId space_id);
duke@435 847
duke@435 848 // Adjust addresses in roots. Does not adjust addresses in heap.
duke@435 849 static void adjust_roots();
duke@435 850
duke@435 851 // Serial code executed in preparation for the compaction phase.
duke@435 852 static void compact_prologue();
duke@435 853
duke@435 854 // Move objects to new locations.
duke@435 855 static void compact_perm(ParCompactionManager* cm);
duke@435 856 static void compact();
duke@435 857
jcoomes@810 858 // Add available regions to the stack and draining tasks to the task queue.
jcoomes@810 859 static void enqueue_region_draining_tasks(GCTaskQueue* q,
jcoomes@810 860 uint parallel_gc_threads);
duke@435 861
duke@435 862 // Add dense prefix update tasks to the task queue.
duke@435 863 static void enqueue_dense_prefix_tasks(GCTaskQueue* q,
duke@435 864 uint parallel_gc_threads);
duke@435 865
jcoomes@810 866 // Add region stealing tasks to the task queue.
jcoomes@810 867 static void enqueue_region_stealing_tasks(
duke@435 868 GCTaskQueue* q,
duke@435 869 ParallelTaskTerminator* terminator_ptr,
duke@435 870 uint parallel_gc_threads);
duke@435 871
duke@435 872 // For debugging only - compacts the old gen serially
duke@435 873 static void compact_serial(ParCompactionManager* cm);
duke@435 874
duke@435 875 // If objects are left in eden after a collection, try to move the boundary
duke@435 876 // and absorb them into the old gen. Returns true if eden was emptied.
duke@435 877 static bool absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
duke@435 878 PSYoungGen* young_gen,
duke@435 879 PSOldGen* old_gen);
duke@435 880
duke@435 881 // Reset time since last full gc
duke@435 882 static void reset_millis_since_last_gc();
duke@435 883
duke@435 884 protected:
duke@435 885 #ifdef VALIDATE_MARK_SWEEP
coleenp@548 886 static GrowableArray<void*>* _root_refs_stack;
duke@435 887 static GrowableArray<oop> * _live_oops;
duke@435 888 static GrowableArray<oop> * _live_oops_moved_to;
duke@435 889 static GrowableArray<size_t>* _live_oops_size;
duke@435 890 static size_t _live_oops_index;
duke@435 891 static size_t _live_oops_index_at_perm;
coleenp@548 892 static GrowableArray<void*>* _other_refs_stack;
coleenp@548 893 static GrowableArray<void*>* _adjusted_pointers;
duke@435 894 static bool _pointer_tracking;
duke@435 895 static bool _root_tracking;
duke@435 896
duke@435 897 // The following arrays are saved since the time of the last GC and
duke@435 898 // assist in tracking down problems where someone has done an errant
duke@435 899 // store into the heap, usually to an oop that wasn't properly
duke@435 900 // handleized across a GC. If we crash or otherwise fail before the
duke@435 901 // next GC, we can query these arrays to find out the object we had
duke@435 902 // intended to do the store to (assuming it is still alive) and the
duke@435 903 // offset within that object. Covered under RecordMarkSweepCompaction.
duke@435 904 static GrowableArray<HeapWord*> * _cur_gc_live_oops;
duke@435 905 static GrowableArray<HeapWord*> * _cur_gc_live_oops_moved_to;
duke@435 906 static GrowableArray<size_t>* _cur_gc_live_oops_size;
duke@435 907 static GrowableArray<HeapWord*> * _last_gc_live_oops;
duke@435 908 static GrowableArray<HeapWord*> * _last_gc_live_oops_moved_to;
duke@435 909 static GrowableArray<size_t>* _last_gc_live_oops_size;
duke@435 910 #endif
duke@435 911
duke@435 912 public:
duke@435 913 class MarkAndPushClosure: public OopClosure {
coleenp@548 914 private:
duke@435 915 ParCompactionManager* _compaction_manager;
duke@435 916 public:
coleenp@548 917 MarkAndPushClosure(ParCompactionManager* cm) : _compaction_manager(cm) { }
coleenp@548 918 virtual void do_oop(oop* p);
coleenp@548 919 virtual void do_oop(narrowOop* p);
duke@435 920 virtual const bool do_nmethods() const { return true; }
duke@435 921 };
duke@435 922
duke@435 923 PSParallelCompact();
duke@435 924
duke@435 925 // Convenient accessor for Universe::heap().
duke@435 926 static ParallelScavengeHeap* gc_heap() {
duke@435 927 return (ParallelScavengeHeap*)Universe::heap();
duke@435 928 }
duke@435 929
duke@435 930 static void invoke(bool maximum_heap_compaction);
duke@435 931 static void invoke_no_policy(bool maximum_heap_compaction);
duke@435 932
duke@435 933 static void post_initialize();
duke@435 934 // Perform initialization for PSParallelCompact that requires
duke@435 935 // allocations. This should be called during the VM initialization
duke@435 936 // at a pointer where it would be appropriate to return a JNI_ENOMEM
duke@435 937 // in the event of a failure.
duke@435 938 static bool initialize();
duke@435 939
duke@435 940 // Public accessors
duke@435 941 static elapsedTimer* accumulated_time() { return &_accumulated_time; }
duke@435 942 static unsigned int total_invocations() { return _total_invocations; }
duke@435 943 static CollectorCounters* counters() { return _counters; }
duke@435 944
duke@435 945 // Used to add tasks
duke@435 946 static GCTaskManager* const gc_task_manager();
duke@435 947 static klassOop updated_int_array_klass_obj() {
duke@435 948 return _updated_int_array_klass_obj;
duke@435 949 }
duke@435 950
duke@435 951 // Marking support
duke@435 952 static inline bool mark_obj(oop obj);
coleenp@548 953 // Check mark and maybe push on marking stack
coleenp@548 954 template <class T> static inline void mark_and_push(ParCompactionManager* cm,
coleenp@548 955 T* p);
duke@435 956
duke@435 957 // Compaction support.
duke@435 958 // Return true if p is in the range [beg_addr, end_addr).
duke@435 959 static inline bool is_in(HeapWord* p, HeapWord* beg_addr, HeapWord* end_addr);
duke@435 960 static inline bool is_in(oop* p, HeapWord* beg_addr, HeapWord* end_addr);
duke@435 961
duke@435 962 // Convenience wrappers for per-space data kept in _space_info.
duke@435 963 static inline MutableSpace* space(SpaceId space_id);
duke@435 964 static inline HeapWord* new_top(SpaceId space_id);
duke@435 965 static inline HeapWord* dense_prefix(SpaceId space_id);
duke@435 966 static inline ObjectStartArray* start_array(SpaceId space_id);
duke@435 967
duke@435 968 // Return true if the klass should be updated.
duke@435 969 static inline bool should_update_klass(klassOop k);
duke@435 970
duke@435 971 // Move and update the live objects in the specified space.
duke@435 972 static void move_and_update(ParCompactionManager* cm, SpaceId space_id);
duke@435 973
jcoomes@810 974 // Process the end of the given region range in the dense prefix.
duke@435 975 // This includes saving any object not updated.
jcoomes@810 976 static void dense_prefix_regions_epilogue(ParCompactionManager* cm,
jcoomes@810 977 size_t region_start_index,
jcoomes@810 978 size_t region_end_index,
jcoomes@810 979 idx_t exiting_object_offset,
jcoomes@810 980 idx_t region_offset_start,
jcoomes@810 981 idx_t region_offset_end);
duke@435 982
jcoomes@810 983 // Update a region in the dense prefix. For each live object
jcoomes@810 984 // in the region, update it's interior references. For each
duke@435 985 // dead object, fill it with deadwood. Dead space at the end
jcoomes@810 986 // of a region range will be filled to the start of the next
jcoomes@810 987 // live object regardless of the region_index_end. None of the
duke@435 988 // objects in the dense prefix move and dead space is dead
duke@435 989 // (holds only dead objects that don't need any processing), so
duke@435 990 // dead space can be filled in any order.
duke@435 991 static void update_and_deadwood_in_dense_prefix(ParCompactionManager* cm,
duke@435 992 SpaceId space_id,
jcoomes@810 993 size_t region_index_start,
jcoomes@810 994 size_t region_index_end);
duke@435 995
duke@435 996 // Return the address of the count + 1st live word in the range [beg, end).
duke@435 997 static HeapWord* skip_live_words(HeapWord* beg, HeapWord* end, size_t count);
duke@435 998
duke@435 999 // Return the address of the word to be copied to dest_addr, which must be
jcoomes@810 1000 // aligned to a region boundary.
duke@435 1001 static HeapWord* first_src_addr(HeapWord* const dest_addr,
jcoomes@810 1002 size_t src_region_idx);
duke@435 1003
jcoomes@810 1004 // Determine the next source region, set closure.source() to the start of the
jcoomes@810 1005 // new region return the region index. Parameter end_addr is the address one
duke@435 1006 // beyond the end of source range just processed. If necessary, switch to a
duke@435 1007 // new source space and set src_space_id (in-out parameter) and src_space_top
duke@435 1008 // (out parameter) accordingly.
jcoomes@810 1009 static size_t next_src_region(MoveAndUpdateClosure& closure,
jcoomes@810 1010 SpaceId& src_space_id,
jcoomes@810 1011 HeapWord*& src_space_top,
jcoomes@810 1012 HeapWord* end_addr);
duke@435 1013
jcoomes@810 1014 // Decrement the destination count for each non-empty source region in the
jcoomes@810 1015 // range [beg_region, region(region_align_up(end_addr))).
duke@435 1016 static void decrement_destination_counts(ParCompactionManager* cm,
jcoomes@810 1017 size_t beg_region,
duke@435 1018 HeapWord* end_addr);
duke@435 1019
jcoomes@810 1020 // Fill a region, copying objects from one or more source regions.
jcoomes@810 1021 static void fill_region(ParCompactionManager* cm, size_t region_idx);
jcoomes@810 1022 static void fill_and_update_region(ParCompactionManager* cm, size_t region) {
jcoomes@810 1023 fill_region(cm, region);
duke@435 1024 }
duke@435 1025
duke@435 1026 // Update the deferred objects in the space.
duke@435 1027 static void update_deferred_objects(ParCompactionManager* cm, SpaceId id);
duke@435 1028
duke@435 1029 // Mark pointer and follow contents.
coleenp@548 1030 template <class T>
coleenp@548 1031 static inline void mark_and_follow(ParCompactionManager* cm, T* p);
duke@435 1032
duke@435 1033 static ParMarkBitMap* mark_bitmap() { return &_mark_bitmap; }
duke@435 1034 static ParallelCompactData& summary_data() { return _summary_data; }
duke@435 1035
coleenp@548 1036 static inline void adjust_pointer(oop* p) { adjust_pointer(p, false); }
coleenp@548 1037 static inline void adjust_pointer(narrowOop* p) { adjust_pointer(p, false); }
coleenp@548 1038
coleenp@548 1039 template <class T>
coleenp@548 1040 static inline void adjust_pointer(T* p,
duke@435 1041 HeapWord* beg_addr,
duke@435 1042 HeapWord* end_addr);
duke@435 1043
duke@435 1044 // Reference Processing
duke@435 1045 static ReferenceProcessor* const ref_processor() { return _ref_processor; }
duke@435 1046
duke@435 1047 // Return the SpaceId for the given address.
duke@435 1048 static SpaceId space_id(HeapWord* addr);
duke@435 1049
duke@435 1050 // Time since last full gc (in milliseconds).
duke@435 1051 static jlong millis_since_last_gc();
duke@435 1052
duke@435 1053 #ifdef VALIDATE_MARK_SWEEP
coleenp@548 1054 static void track_adjusted_pointer(void* p, bool isroot);
coleenp@548 1055 static void check_adjust_pointer(void* p);
duke@435 1056 static void track_interior_pointers(oop obj);
duke@435 1057 static void check_interior_pointers();
duke@435 1058
duke@435 1059 static void reset_live_oop_tracking(bool at_perm);
duke@435 1060 static void register_live_oop(oop p, size_t size);
duke@435 1061 static void validate_live_oop(oop p, size_t size);
duke@435 1062 static void live_oop_moved_to(HeapWord* q, size_t size, HeapWord* compaction_top);
duke@435 1063 static void compaction_complete();
duke@435 1064
duke@435 1065 // Querying operation of RecordMarkSweepCompaction results.
duke@435 1066 // Finds and prints the current base oop and offset for a word
duke@435 1067 // within an oop that was live during the last GC. Helpful for
duke@435 1068 // tracking down heap stomps.
duke@435 1069 static void print_new_location_of_heap_address(HeapWord* q);
duke@435 1070 #endif // #ifdef VALIDATE_MARK_SWEEP
duke@435 1071
duke@435 1072 // Call backs for class unloading
duke@435 1073 // Update subklass/sibling/implementor links at end of marking.
duke@435 1074 static void revisit_weak_klass_link(ParCompactionManager* cm, Klass* k);
duke@435 1075
duke@435 1076 #ifndef PRODUCT
duke@435 1077 // Debugging support.
duke@435 1078 static const char* space_names[last_space_id];
jcoomes@810 1079 static void print_region_ranges();
duke@435 1080 static void print_dense_prefix_stats(const char* const algorithm,
duke@435 1081 const SpaceId id,
duke@435 1082 const bool maximum_compaction,
duke@435 1083 HeapWord* const addr);
duke@435 1084 #endif // #ifndef PRODUCT
duke@435 1085
duke@435 1086 #ifdef ASSERT
jcoomes@810 1087 // Verify that all the regions have been emptied.
duke@435 1088 static void verify_complete(SpaceId space_id);
duke@435 1089 #endif // #ifdef ASSERT
duke@435 1090 };
duke@435 1091
coleenp@548 1092 inline bool PSParallelCompact::mark_obj(oop obj) {
duke@435 1093 const int obj_size = obj->size();
duke@435 1094 if (mark_bitmap()->mark_obj(obj, obj_size)) {
duke@435 1095 _summary_data.add_obj(obj, obj_size);
duke@435 1096 return true;
duke@435 1097 } else {
duke@435 1098 return false;
duke@435 1099 }
duke@435 1100 }
duke@435 1101
coleenp@548 1102 template <class T>
coleenp@548 1103 inline void PSParallelCompact::follow_root(ParCompactionManager* cm, T* p) {
coleenp@548 1104 assert(!Universe::heap()->is_in_reserved(p),
coleenp@548 1105 "roots shouldn't be things within the heap");
coleenp@548 1106 #ifdef VALIDATE_MARK_SWEEP
coleenp@548 1107 if (ValidateMarkSweep) {
coleenp@548 1108 guarantee(!_root_refs_stack->contains(p), "should only be in here once");
coleenp@548 1109 _root_refs_stack->push(p);
coleenp@548 1110 }
coleenp@548 1111 #endif
coleenp@548 1112 T heap_oop = oopDesc::load_heap_oop(p);
coleenp@548 1113 if (!oopDesc::is_null(heap_oop)) {
coleenp@548 1114 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
coleenp@548 1115 if (mark_bitmap()->is_unmarked(obj)) {
coleenp@548 1116 if (mark_obj(obj)) {
coleenp@548 1117 obj->follow_contents(cm);
coleenp@548 1118 }
coleenp@548 1119 }
coleenp@548 1120 }
coleenp@548 1121 follow_stack(cm);
coleenp@548 1122 }
coleenp@548 1123
coleenp@548 1124 template <class T>
coleenp@548 1125 inline void PSParallelCompact::mark_and_follow(ParCompactionManager* cm,
coleenp@548 1126 T* p) {
coleenp@548 1127 T heap_oop = oopDesc::load_heap_oop(p);
coleenp@548 1128 if (!oopDesc::is_null(heap_oop)) {
coleenp@548 1129 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
coleenp@548 1130 if (mark_bitmap()->is_unmarked(obj)) {
coleenp@548 1131 if (mark_obj(obj)) {
coleenp@548 1132 obj->follow_contents(cm);
coleenp@548 1133 }
coleenp@548 1134 }
coleenp@548 1135 }
coleenp@548 1136 }
coleenp@548 1137
coleenp@548 1138 template <class T>
coleenp@548 1139 inline void PSParallelCompact::mark_and_push(ParCompactionManager* cm, T* p) {
coleenp@548 1140 T heap_oop = oopDesc::load_heap_oop(p);
coleenp@548 1141 if (!oopDesc::is_null(heap_oop)) {
coleenp@548 1142 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
coleenp@548 1143 if (mark_bitmap()->is_unmarked(obj)) {
coleenp@548 1144 if (mark_obj(obj)) {
coleenp@548 1145 // This thread marked the object and owns the subsequent processing of it.
coleenp@548 1146 cm->save_for_scanning(obj);
coleenp@548 1147 }
coleenp@548 1148 }
coleenp@548 1149 }
coleenp@548 1150 }
coleenp@548 1151
coleenp@548 1152 template <class T>
coleenp@548 1153 inline void PSParallelCompact::adjust_pointer(T* p, bool isroot) {
coleenp@548 1154 T heap_oop = oopDesc::load_heap_oop(p);
coleenp@548 1155 if (!oopDesc::is_null(heap_oop)) {
coleenp@548 1156 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
coleenp@548 1157 oop new_obj = (oop)summary_data().calc_new_pointer(obj);
coleenp@548 1158 assert(new_obj != NULL || // is forwarding ptr?
coleenp@548 1159 obj->is_shared(), // never forwarded?
coleenp@548 1160 "should be forwarded");
coleenp@548 1161 // Just always do the update unconditionally?
coleenp@548 1162 if (new_obj != NULL) {
coleenp@548 1163 assert(Universe::heap()->is_in_reserved(new_obj),
coleenp@548 1164 "should be in object space");
coleenp@548 1165 oopDesc::encode_store_heap_oop_not_null(p, new_obj);
coleenp@548 1166 }
coleenp@548 1167 }
coleenp@548 1168 VALIDATE_MARK_SWEEP_ONLY(track_adjusted_pointer(p, isroot));
coleenp@548 1169 }
coleenp@548 1170
coleenp@548 1171 template <class T>
coleenp@548 1172 inline void PSParallelCompact::KeepAliveClosure::do_oop_work(T* p) {
coleenp@548 1173 #ifdef VALIDATE_MARK_SWEEP
coleenp@548 1174 if (ValidateMarkSweep) {
coleenp@548 1175 if (!Universe::heap()->is_in_reserved(p)) {
coleenp@548 1176 _root_refs_stack->push(p);
coleenp@548 1177 } else {
coleenp@548 1178 _other_refs_stack->push(p);
coleenp@548 1179 }
coleenp@548 1180 }
coleenp@548 1181 #endif
coleenp@548 1182 mark_and_push(_compaction_manager, p);
coleenp@548 1183 }
coleenp@548 1184
coleenp@548 1185 inline bool PSParallelCompact::print_phases() {
duke@435 1186 return _print_phases;
duke@435 1187 }
duke@435 1188
coleenp@548 1189 inline double PSParallelCompact::normal_distribution(double density) {
duke@435 1190 assert(_dwl_initialized, "uninitialized");
duke@435 1191 const double squared_term = (density - _dwl_mean) / _dwl_std_dev;
duke@435 1192 return _dwl_first_term * exp(-0.5 * squared_term * squared_term);
duke@435 1193 }
duke@435 1194
duke@435 1195 inline bool
jcoomes@810 1196 PSParallelCompact::dead_space_crosses_boundary(const RegionData* region,
duke@435 1197 idx_t bit)
duke@435 1198 {
jcoomes@810 1199 assert(bit > 0, "cannot call this for the first bit/region");
jcoomes@810 1200 assert(_summary_data.region_to_addr(region) == _mark_bitmap.bit_to_addr(bit),
duke@435 1201 "sanity check");
duke@435 1202
duke@435 1203 // Dead space crosses the boundary if (1) a partial object does not extend
jcoomes@810 1204 // onto the region, (2) an object does not start at the beginning of the
jcoomes@810 1205 // region, and (3) an object does not end at the end of the prior region.
jcoomes@810 1206 return region->partial_obj_size() == 0 &&
duke@435 1207 !_mark_bitmap.is_obj_beg(bit) &&
duke@435 1208 !_mark_bitmap.is_obj_end(bit - 1);
duke@435 1209 }
duke@435 1210
duke@435 1211 inline bool
duke@435 1212 PSParallelCompact::is_in(HeapWord* p, HeapWord* beg_addr, HeapWord* end_addr) {
duke@435 1213 return p >= beg_addr && p < end_addr;
duke@435 1214 }
duke@435 1215
duke@435 1216 inline bool
duke@435 1217 PSParallelCompact::is_in(oop* p, HeapWord* beg_addr, HeapWord* end_addr) {
duke@435 1218 return is_in((HeapWord*)p, beg_addr, end_addr);
duke@435 1219 }
duke@435 1220
duke@435 1221 inline MutableSpace* PSParallelCompact::space(SpaceId id) {
duke@435 1222 assert(id < last_space_id, "id out of range");
duke@435 1223 return _space_info[id].space();
duke@435 1224 }
duke@435 1225
duke@435 1226 inline HeapWord* PSParallelCompact::new_top(SpaceId id) {
duke@435 1227 assert(id < last_space_id, "id out of range");
duke@435 1228 return _space_info[id].new_top();
duke@435 1229 }
duke@435 1230
duke@435 1231 inline HeapWord* PSParallelCompact::dense_prefix(SpaceId id) {
duke@435 1232 assert(id < last_space_id, "id out of range");
duke@435 1233 return _space_info[id].dense_prefix();
duke@435 1234 }
duke@435 1235
duke@435 1236 inline ObjectStartArray* PSParallelCompact::start_array(SpaceId id) {
duke@435 1237 assert(id < last_space_id, "id out of range");
duke@435 1238 return _space_info[id].start_array();
duke@435 1239 }
duke@435 1240
duke@435 1241 inline bool PSParallelCompact::should_update_klass(klassOop k) {
duke@435 1242 return ((HeapWord*) k) >= dense_prefix(perm_space_id);
duke@435 1243 }
duke@435 1244
coleenp@548 1245 template <class T>
coleenp@548 1246 inline void PSParallelCompact::adjust_pointer(T* p,
duke@435 1247 HeapWord* beg_addr,
duke@435 1248 HeapWord* end_addr) {
coleenp@548 1249 if (is_in((HeapWord*)p, beg_addr, end_addr)) {
duke@435 1250 adjust_pointer(p);
duke@435 1251 }
duke@435 1252 }
duke@435 1253
duke@435 1254 class MoveAndUpdateClosure: public ParMarkBitMapClosure {
duke@435 1255 public:
duke@435 1256 inline MoveAndUpdateClosure(ParMarkBitMap* bitmap, ParCompactionManager* cm,
duke@435 1257 ObjectStartArray* start_array,
duke@435 1258 HeapWord* destination, size_t words);
duke@435 1259
duke@435 1260 // Accessors.
duke@435 1261 HeapWord* destination() const { return _destination; }
duke@435 1262
duke@435 1263 // If the object will fit (size <= words_remaining()), copy it to the current
duke@435 1264 // destination, update the interior oops and the start array and return either
duke@435 1265 // full (if the closure is full) or incomplete. If the object will not fit,
duke@435 1266 // return would_overflow.
duke@435 1267 virtual IterationStatus do_addr(HeapWord* addr, size_t size);
duke@435 1268
duke@435 1269 // Copy enough words to fill this closure, starting at source(). Interior
duke@435 1270 // oops and the start array are not updated. Return full.
duke@435 1271 IterationStatus copy_until_full();
duke@435 1272
duke@435 1273 // Copy enough words to fill this closure or to the end of an object,
duke@435 1274 // whichever is smaller, starting at source(). Interior oops and the start
duke@435 1275 // array are not updated.
duke@435 1276 void copy_partial_obj();
duke@435 1277
duke@435 1278 protected:
duke@435 1279 // Update variables to indicate that word_count words were processed.
duke@435 1280 inline void update_state(size_t word_count);
duke@435 1281
duke@435 1282 protected:
duke@435 1283 ObjectStartArray* const _start_array;
duke@435 1284 HeapWord* _destination; // Next addr to be written.
duke@435 1285 };
duke@435 1286
duke@435 1287 inline
duke@435 1288 MoveAndUpdateClosure::MoveAndUpdateClosure(ParMarkBitMap* bitmap,
duke@435 1289 ParCompactionManager* cm,
duke@435 1290 ObjectStartArray* start_array,
duke@435 1291 HeapWord* destination,
duke@435 1292 size_t words) :
duke@435 1293 ParMarkBitMapClosure(bitmap, cm, words), _start_array(start_array)
duke@435 1294 {
duke@435 1295 _destination = destination;
duke@435 1296 }
duke@435 1297
duke@435 1298 inline void MoveAndUpdateClosure::update_state(size_t words)
duke@435 1299 {
duke@435 1300 decrement_words_remaining(words);
duke@435 1301 _source += words;
duke@435 1302 _destination += words;
duke@435 1303 }
duke@435 1304
duke@435 1305 class UpdateOnlyClosure: public ParMarkBitMapClosure {
duke@435 1306 private:
duke@435 1307 const PSParallelCompact::SpaceId _space_id;
duke@435 1308 ObjectStartArray* const _start_array;
duke@435 1309
duke@435 1310 public:
duke@435 1311 UpdateOnlyClosure(ParMarkBitMap* mbm,
duke@435 1312 ParCompactionManager* cm,
duke@435 1313 PSParallelCompact::SpaceId space_id);
duke@435 1314
duke@435 1315 // Update the object.
duke@435 1316 virtual IterationStatus do_addr(HeapWord* addr, size_t words);
duke@435 1317
duke@435 1318 inline void do_addr(HeapWord* addr);
duke@435 1319 };
duke@435 1320
coleenp@548 1321 inline void UpdateOnlyClosure::do_addr(HeapWord* addr)
coleenp@548 1322 {
duke@435 1323 _start_array->allocate_block(addr);
duke@435 1324 oop(addr)->update_contents(compaction_manager());
duke@435 1325 }
duke@435 1326
duke@435 1327 class FillClosure: public ParMarkBitMapClosure {
coleenp@548 1328 public:
coleenp@548 1329 FillClosure(ParCompactionManager* cm, PSParallelCompact::SpaceId space_id) :
duke@435 1330 ParMarkBitMapClosure(PSParallelCompact::mark_bitmap(), cm),
duke@435 1331 _space_id(space_id),
coleenp@548 1332 _start_array(PSParallelCompact::start_array(space_id)) {
duke@435 1333 assert(_space_id == PSParallelCompact::perm_space_id ||
duke@435 1334 _space_id == PSParallelCompact::old_space_id,
duke@435 1335 "cannot use FillClosure in the young gen");
duke@435 1336 assert(bitmap() != NULL, "need a bitmap");
duke@435 1337 assert(_start_array != NULL, "need a start array");
duke@435 1338 }
duke@435 1339
duke@435 1340 void fill_region(HeapWord* addr, size_t size) {
duke@435 1341 MemRegion region(addr, size);
duke@435 1342 SharedHeap::fill_region_with_object(region);
duke@435 1343 _start_array->allocate_block(addr);
duke@435 1344 }
duke@435 1345
duke@435 1346 virtual IterationStatus do_addr(HeapWord* addr, size_t size) {
duke@435 1347 fill_region(addr, size);
duke@435 1348 return ParMarkBitMap::incomplete;
duke@435 1349 }
duke@435 1350
duke@435 1351 private:
duke@435 1352 const PSParallelCompact::SpaceId _space_id;
duke@435 1353 ObjectStartArray* const _start_array;
duke@435 1354 };

mercurial