src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp

Fri, 28 Mar 2008 23:35:42 -0700

author
jcoomes
date
Fri, 28 Mar 2008 23:35:42 -0700
changeset 514
82db0859acbe
parent 435
a61af66fc99e
child 548
ba764ed4b6f2
permissions
-rw-r--r--

6642862: Code cache allocation fails with large pages after 6588638
Reviewed-by: apetrusenko

duke@435 1 /*
duke@435 2 * Copyright 2005-2007 Sun Microsystems, Inc. All Rights Reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
duke@435 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
duke@435 20 * CA 95054 USA or visit www.sun.com if you need additional information or
duke@435 21 * have any questions.
duke@435 22 *
duke@435 23 */
duke@435 24
duke@435 25 class ParallelScavengeHeap;
duke@435 26 class PSAdaptiveSizePolicy;
duke@435 27 class PSYoungGen;
duke@435 28 class PSOldGen;
duke@435 29 class PSPermGen;
duke@435 30 class ParCompactionManager;
duke@435 31 class ParallelTaskTerminator;
duke@435 32 class PSParallelCompact;
duke@435 33 class GCTaskManager;
duke@435 34 class GCTaskQueue;
duke@435 35 class PreGCValues;
duke@435 36 class MoveAndUpdateClosure;
duke@435 37 class RefProcTaskExecutor;
duke@435 38
duke@435 39 class SpaceInfo
duke@435 40 {
duke@435 41 public:
duke@435 42 MutableSpace* space() const { return _space; }
duke@435 43
duke@435 44 // Where the free space will start after the collection. Valid only after the
duke@435 45 // summary phase completes.
duke@435 46 HeapWord* new_top() const { return _new_top; }
duke@435 47
duke@435 48 // Allows new_top to be set.
duke@435 49 HeapWord** new_top_addr() { return &_new_top; }
duke@435 50
duke@435 51 // Where the smallest allowable dense prefix ends (used only for perm gen).
duke@435 52 HeapWord* min_dense_prefix() const { return _min_dense_prefix; }
duke@435 53
duke@435 54 // Where the dense prefix ends, or the compacted region begins.
duke@435 55 HeapWord* dense_prefix() const { return _dense_prefix; }
duke@435 56
duke@435 57 // The start array for the (generation containing the) space, or NULL if there
duke@435 58 // is no start array.
duke@435 59 ObjectStartArray* start_array() const { return _start_array; }
duke@435 60
duke@435 61 void set_space(MutableSpace* s) { _space = s; }
duke@435 62 void set_new_top(HeapWord* addr) { _new_top = addr; }
duke@435 63 void set_min_dense_prefix(HeapWord* addr) { _min_dense_prefix = addr; }
duke@435 64 void set_dense_prefix(HeapWord* addr) { _dense_prefix = addr; }
duke@435 65 void set_start_array(ObjectStartArray* s) { _start_array = s; }
duke@435 66
duke@435 67 private:
duke@435 68 MutableSpace* _space;
duke@435 69 HeapWord* _new_top;
duke@435 70 HeapWord* _min_dense_prefix;
duke@435 71 HeapWord* _dense_prefix;
duke@435 72 ObjectStartArray* _start_array;
duke@435 73 };
duke@435 74
duke@435 75 class ParallelCompactData
duke@435 76 {
duke@435 77 public:
duke@435 78 // Sizes are in HeapWords, unless indicated otherwise.
duke@435 79 static const size_t Log2ChunkSize;
duke@435 80 static const size_t ChunkSize;
duke@435 81 static const size_t ChunkSizeBytes;
duke@435 82
duke@435 83 // Mask for the bits in a size_t to get an offset within a chunk.
duke@435 84 static const size_t ChunkSizeOffsetMask;
duke@435 85 // Mask for the bits in a pointer to get an offset within a chunk.
duke@435 86 static const size_t ChunkAddrOffsetMask;
duke@435 87 // Mask for the bits in a pointer to get the address of the start of a chunk.
duke@435 88 static const size_t ChunkAddrMask;
duke@435 89
duke@435 90 static const size_t Log2BlockSize;
duke@435 91 static const size_t BlockSize;
duke@435 92 static const size_t BlockOffsetMask;
duke@435 93 static const size_t BlockMask;
duke@435 94
duke@435 95 static const size_t BlocksPerChunk;
duke@435 96
duke@435 97 class ChunkData
duke@435 98 {
duke@435 99 public:
duke@435 100 // Destination address of the chunk.
duke@435 101 HeapWord* destination() const { return _destination; }
duke@435 102
duke@435 103 // The first chunk containing data destined for this chunk.
duke@435 104 size_t source_chunk() const { return _source_chunk; }
duke@435 105
duke@435 106 // The object (if any) starting in this chunk and ending in a different
duke@435 107 // chunk that could not be updated during the main (parallel) compaction
duke@435 108 // phase. This is different from _partial_obj_addr, which is an object that
duke@435 109 // extends onto a source chunk. However, the two uses do not overlap in
duke@435 110 // time, so the same field is used to save space.
duke@435 111 HeapWord* deferred_obj_addr() const { return _partial_obj_addr; }
duke@435 112
duke@435 113 // The starting address of the partial object extending onto the chunk.
duke@435 114 HeapWord* partial_obj_addr() const { return _partial_obj_addr; }
duke@435 115
duke@435 116 // Size of the partial object extending onto the chunk (words).
duke@435 117 size_t partial_obj_size() const { return _partial_obj_size; }
duke@435 118
duke@435 119 // Size of live data that lies within this chunk due to objects that start
duke@435 120 // in this chunk (words). This does not include the partial object
duke@435 121 // extending onto the chunk (if any), or the part of an object that extends
duke@435 122 // onto the next chunk (if any).
duke@435 123 size_t live_obj_size() const { return _dc_and_los & los_mask; }
duke@435 124
duke@435 125 // Total live data that lies within the chunk (words).
duke@435 126 size_t data_size() const { return partial_obj_size() + live_obj_size(); }
duke@435 127
duke@435 128 // The destination_count is the number of other chunks to which data from
duke@435 129 // this chunk will be copied. At the end of the summary phase, the valid
duke@435 130 // values of destination_count are
duke@435 131 //
duke@435 132 // 0 - data from the chunk will be compacted completely into itself, or the
duke@435 133 // chunk is empty. The chunk can be claimed and then filled.
duke@435 134 // 1 - data from the chunk will be compacted into 1 other chunk; some
duke@435 135 // data from the chunk may also be compacted into the chunk itself.
duke@435 136 // 2 - data from the chunk will be copied to 2 other chunks.
duke@435 137 //
duke@435 138 // During compaction as chunks are emptied, the destination_count is
duke@435 139 // decremented (atomically) and when it reaches 0, it can be claimed and
duke@435 140 // then filled.
duke@435 141 //
duke@435 142 // A chunk is claimed for processing by atomically changing the
duke@435 143 // destination_count to the claimed value (dc_claimed). After a chunk has
duke@435 144 // been filled, the destination_count should be set to the completed value
duke@435 145 // (dc_completed).
duke@435 146 inline uint destination_count() const;
duke@435 147 inline uint destination_count_raw() const;
duke@435 148
duke@435 149 // The location of the java heap data that corresponds to this chunk.
duke@435 150 inline HeapWord* data_location() const;
duke@435 151
duke@435 152 // The highest address referenced by objects in this chunk.
duke@435 153 inline HeapWord* highest_ref() const;
duke@435 154
duke@435 155 // Whether this chunk is available to be claimed, has been claimed, or has
duke@435 156 // been completed.
duke@435 157 //
duke@435 158 // Minor subtlety: claimed() returns true if the chunk is marked
duke@435 159 // completed(), which is desirable since a chunk must be claimed before it
duke@435 160 // can be completed.
duke@435 161 bool available() const { return _dc_and_los < dc_one; }
duke@435 162 bool claimed() const { return _dc_and_los >= dc_claimed; }
duke@435 163 bool completed() const { return _dc_and_los >= dc_completed; }
duke@435 164
duke@435 165 // These are not atomic.
duke@435 166 void set_destination(HeapWord* addr) { _destination = addr; }
duke@435 167 void set_source_chunk(size_t chunk) { _source_chunk = chunk; }
duke@435 168 void set_deferred_obj_addr(HeapWord* addr) { _partial_obj_addr = addr; }
duke@435 169 void set_partial_obj_addr(HeapWord* addr) { _partial_obj_addr = addr; }
duke@435 170 void set_partial_obj_size(size_t words) {
duke@435 171 _partial_obj_size = (chunk_sz_t) words;
duke@435 172 }
duke@435 173
duke@435 174 inline void set_destination_count(uint count);
duke@435 175 inline void set_live_obj_size(size_t words);
duke@435 176 inline void set_data_location(HeapWord* addr);
duke@435 177 inline void set_completed();
duke@435 178 inline bool claim_unsafe();
duke@435 179
duke@435 180 // These are atomic.
duke@435 181 inline void add_live_obj(size_t words);
duke@435 182 inline void set_highest_ref(HeapWord* addr);
duke@435 183 inline void decrement_destination_count();
duke@435 184 inline bool claim();
duke@435 185
duke@435 186 private:
duke@435 187 // The type used to represent object sizes within a chunk.
duke@435 188 typedef uint chunk_sz_t;
duke@435 189
duke@435 190 // Constants for manipulating the _dc_and_los field, which holds both the
duke@435 191 // destination count and live obj size. The live obj size lives at the
duke@435 192 // least significant end so no masking is necessary when adding.
duke@435 193 static const chunk_sz_t dc_shift; // Shift amount.
duke@435 194 static const chunk_sz_t dc_mask; // Mask for destination count.
duke@435 195 static const chunk_sz_t dc_one; // 1, shifted appropriately.
duke@435 196 static const chunk_sz_t dc_claimed; // Chunk has been claimed.
duke@435 197 static const chunk_sz_t dc_completed; // Chunk has been completed.
duke@435 198 static const chunk_sz_t los_mask; // Mask for live obj size.
duke@435 199
duke@435 200 HeapWord* _destination;
duke@435 201 size_t _source_chunk;
duke@435 202 HeapWord* _partial_obj_addr;
duke@435 203 chunk_sz_t _partial_obj_size;
duke@435 204 chunk_sz_t volatile _dc_and_los;
duke@435 205 #ifdef ASSERT
duke@435 206 // These enable optimizations that are only partially implemented. Use
duke@435 207 // debug builds to prevent the code fragments from breaking.
duke@435 208 HeapWord* _data_location;
duke@435 209 HeapWord* _highest_ref;
duke@435 210 #endif // #ifdef ASSERT
duke@435 211
duke@435 212 #ifdef ASSERT
duke@435 213 public:
duke@435 214 uint _pushed; // 0 until chunk is pushed onto a worker's stack
duke@435 215 private:
duke@435 216 #endif
duke@435 217 };
duke@435 218
duke@435 219 // 'Blocks' allow shorter sections of the bitmap to be searched. Each Block
duke@435 220 // holds an offset, which is the amount of live data in the Chunk to the left
duke@435 221 // of the first live object in the Block. This amount of live data will
duke@435 222 // include any object extending into the block. The first block in
duke@435 223 // a chunk does not include any partial object extending into the
duke@435 224 // the chunk.
duke@435 225 //
duke@435 226 // The offset also encodes the
duke@435 227 // 'parity' of the first 1 bit in the Block: a positive offset means the
duke@435 228 // first 1 bit marks the start of an object, a negative offset means the first
duke@435 229 // 1 bit marks the end of an object.
duke@435 230 class BlockData
duke@435 231 {
duke@435 232 public:
duke@435 233 typedef short int blk_ofs_t;
duke@435 234
duke@435 235 blk_ofs_t offset() const { return _offset >= 0 ? _offset : -_offset; }
duke@435 236 blk_ofs_t raw_offset() const { return _offset; }
duke@435 237 void set_first_is_start_bit(bool v) { _first_is_start_bit = v; }
duke@435 238
duke@435 239 #if 0
duke@435 240 // The need for this method was anticipated but it is
duke@435 241 // never actually used. Do not include it for now. If
duke@435 242 // it is needed, consider the problem of what is passed
duke@435 243 // as "v". To avoid warning errors the method set_start_bit_offset()
duke@435 244 // was changed to take a size_t as the parameter and to do the
duke@435 245 // check for the possible overflow. Doing the cast in these
duke@435 246 // methods better limits the potential problems because of
duke@435 247 // the size of the field to this class.
duke@435 248 void set_raw_offset(blk_ofs_t v) { _offset = v; }
duke@435 249 #endif
duke@435 250 void set_start_bit_offset(size_t val) {
duke@435 251 assert(val >= 0, "sanity");
duke@435 252 _offset = (blk_ofs_t) val;
duke@435 253 assert(val == (size_t) _offset, "Value is too large");
duke@435 254 _first_is_start_bit = true;
duke@435 255 }
duke@435 256 void set_end_bit_offset(size_t val) {
duke@435 257 assert(val >= 0, "sanity");
duke@435 258 _offset = (blk_ofs_t) val;
duke@435 259 assert(val == (size_t) _offset, "Value is too large");
duke@435 260 _offset = - _offset;
duke@435 261 _first_is_start_bit = false;
duke@435 262 }
duke@435 263 bool first_is_start_bit() {
duke@435 264 assert(_set_phase > 0, "Not initialized");
duke@435 265 return _first_is_start_bit;
duke@435 266 }
duke@435 267 bool first_is_end_bit() {
duke@435 268 assert(_set_phase > 0, "Not initialized");
duke@435 269 return !_first_is_start_bit;
duke@435 270 }
duke@435 271
duke@435 272 private:
duke@435 273 blk_ofs_t _offset;
duke@435 274 // This is temporary until the mark_bitmap is separated into
duke@435 275 // a start bit array and an end bit array.
duke@435 276 bool _first_is_start_bit;
duke@435 277 #ifdef ASSERT
duke@435 278 short _set_phase;
duke@435 279 static short _cur_phase;
duke@435 280 public:
duke@435 281 static void set_cur_phase(short v) { _cur_phase = v; }
duke@435 282 #endif
duke@435 283 };
duke@435 284
duke@435 285 public:
duke@435 286 ParallelCompactData();
duke@435 287 bool initialize(MemRegion covered_region);
duke@435 288
duke@435 289 size_t chunk_count() const { return _chunk_count; }
duke@435 290
duke@435 291 // Convert chunk indices to/from ChunkData pointers.
duke@435 292 inline ChunkData* chunk(size_t chunk_idx) const;
duke@435 293 inline size_t chunk(const ChunkData* const chunk_ptr) const;
duke@435 294
duke@435 295 // Returns true if the given address is contained within the chunk
duke@435 296 bool chunk_contains(size_t chunk_index, HeapWord* addr);
duke@435 297
duke@435 298 size_t block_count() const { return _block_count; }
duke@435 299 inline BlockData* block(size_t n) const;
duke@435 300
duke@435 301 // Returns true if the given block is in the given chunk.
duke@435 302 static bool chunk_contains_block(size_t chunk_index, size_t block_index);
duke@435 303
duke@435 304 void add_obj(HeapWord* addr, size_t len);
duke@435 305 void add_obj(oop p, size_t len) { add_obj((HeapWord*)p, len); }
duke@435 306
duke@435 307 // Fill in the chunks covering [beg, end) so that no data moves; i.e., the
duke@435 308 // destination of chunk n is simply the start of chunk n. The argument beg
duke@435 309 // must be chunk-aligned; end need not be.
duke@435 310 void summarize_dense_prefix(HeapWord* beg, HeapWord* end);
duke@435 311
duke@435 312 bool summarize(HeapWord* target_beg, HeapWord* target_end,
duke@435 313 HeapWord* source_beg, HeapWord* source_end,
duke@435 314 HeapWord** target_next, HeapWord** source_next = 0);
duke@435 315
duke@435 316 void clear();
duke@435 317 void clear_range(size_t beg_chunk, size_t end_chunk);
duke@435 318 void clear_range(HeapWord* beg, HeapWord* end) {
duke@435 319 clear_range(addr_to_chunk_idx(beg), addr_to_chunk_idx(end));
duke@435 320 }
duke@435 321
duke@435 322 // Return the number of words between addr and the start of the chunk
duke@435 323 // containing addr.
duke@435 324 inline size_t chunk_offset(const HeapWord* addr) const;
duke@435 325
duke@435 326 // Convert addresses to/from a chunk index or chunk pointer.
duke@435 327 inline size_t addr_to_chunk_idx(const HeapWord* addr) const;
duke@435 328 inline ChunkData* addr_to_chunk_ptr(const HeapWord* addr) const;
duke@435 329 inline HeapWord* chunk_to_addr(size_t chunk) const;
duke@435 330 inline HeapWord* chunk_to_addr(size_t chunk, size_t offset) const;
duke@435 331 inline HeapWord* chunk_to_addr(const ChunkData* chunk) const;
duke@435 332
duke@435 333 inline HeapWord* chunk_align_down(HeapWord* addr) const;
duke@435 334 inline HeapWord* chunk_align_up(HeapWord* addr) const;
duke@435 335 inline bool is_chunk_aligned(HeapWord* addr) const;
duke@435 336
duke@435 337 // Analogous to chunk_offset() for blocks.
duke@435 338 size_t block_offset(const HeapWord* addr) const;
duke@435 339 size_t addr_to_block_idx(const HeapWord* addr) const;
duke@435 340 size_t addr_to_block_idx(const oop obj) const {
duke@435 341 return addr_to_block_idx((HeapWord*) obj);
duke@435 342 }
duke@435 343 inline BlockData* addr_to_block_ptr(const HeapWord* addr) const;
duke@435 344 inline HeapWord* block_to_addr(size_t block) const;
duke@435 345
duke@435 346 // Return the address one past the end of the partial object.
duke@435 347 HeapWord* partial_obj_end(size_t chunk_idx) const;
duke@435 348
duke@435 349 // Return the new location of the object p after the
duke@435 350 // the compaction.
duke@435 351 HeapWord* calc_new_pointer(HeapWord* addr);
duke@435 352
duke@435 353 // Same as calc_new_pointer() using blocks.
duke@435 354 HeapWord* block_calc_new_pointer(HeapWord* addr);
duke@435 355
duke@435 356 // Same as calc_new_pointer() using chunks.
duke@435 357 HeapWord* chunk_calc_new_pointer(HeapWord* addr);
duke@435 358
duke@435 359 HeapWord* calc_new_pointer(oop p) {
duke@435 360 return calc_new_pointer((HeapWord*) p);
duke@435 361 }
duke@435 362
duke@435 363 // Return the updated address for the given klass
duke@435 364 klassOop calc_new_klass(klassOop);
duke@435 365
duke@435 366 // Given a block returns true if the partial object for the
duke@435 367 // corresponding chunk ends in the block. Returns false, otherwise
duke@435 368 // If there is no partial object, returns false.
duke@435 369 bool partial_obj_ends_in_block(size_t block_index);
duke@435 370
duke@435 371 // Returns the block index for the block
duke@435 372 static size_t block_idx(BlockData* block);
duke@435 373
duke@435 374 #ifdef ASSERT
duke@435 375 void verify_clear(const PSVirtualSpace* vspace);
duke@435 376 void verify_clear();
duke@435 377 #endif // #ifdef ASSERT
duke@435 378
duke@435 379 private:
duke@435 380 bool initialize_block_data(size_t region_size);
duke@435 381 bool initialize_chunk_data(size_t region_size);
duke@435 382 PSVirtualSpace* create_vspace(size_t count, size_t element_size);
duke@435 383
duke@435 384 private:
duke@435 385 HeapWord* _region_start;
duke@435 386 #ifdef ASSERT
duke@435 387 HeapWord* _region_end;
duke@435 388 #endif // #ifdef ASSERT
duke@435 389
duke@435 390 PSVirtualSpace* _chunk_vspace;
duke@435 391 ChunkData* _chunk_data;
duke@435 392 size_t _chunk_count;
duke@435 393
duke@435 394 PSVirtualSpace* _block_vspace;
duke@435 395 BlockData* _block_data;
duke@435 396 size_t _block_count;
duke@435 397 };
duke@435 398
duke@435 399 inline uint
duke@435 400 ParallelCompactData::ChunkData::destination_count_raw() const
duke@435 401 {
duke@435 402 return _dc_and_los & dc_mask;
duke@435 403 }
duke@435 404
duke@435 405 inline uint
duke@435 406 ParallelCompactData::ChunkData::destination_count() const
duke@435 407 {
duke@435 408 return destination_count_raw() >> dc_shift;
duke@435 409 }
duke@435 410
duke@435 411 inline void
duke@435 412 ParallelCompactData::ChunkData::set_destination_count(uint count)
duke@435 413 {
duke@435 414 assert(count <= (dc_completed >> dc_shift), "count too large");
duke@435 415 const chunk_sz_t live_sz = (chunk_sz_t) live_obj_size();
duke@435 416 _dc_and_los = (count << dc_shift) | live_sz;
duke@435 417 }
duke@435 418
duke@435 419 inline void ParallelCompactData::ChunkData::set_live_obj_size(size_t words)
duke@435 420 {
duke@435 421 assert(words <= los_mask, "would overflow");
duke@435 422 _dc_and_los = destination_count_raw() | (chunk_sz_t)words;
duke@435 423 }
duke@435 424
duke@435 425 inline void ParallelCompactData::ChunkData::decrement_destination_count()
duke@435 426 {
duke@435 427 assert(_dc_and_los < dc_claimed, "already claimed");
duke@435 428 assert(_dc_and_los >= dc_one, "count would go negative");
duke@435 429 Atomic::add((int)dc_mask, (volatile int*)&_dc_and_los);
duke@435 430 }
duke@435 431
duke@435 432 inline HeapWord* ParallelCompactData::ChunkData::data_location() const
duke@435 433 {
duke@435 434 DEBUG_ONLY(return _data_location;)
duke@435 435 NOT_DEBUG(return NULL;)
duke@435 436 }
duke@435 437
duke@435 438 inline HeapWord* ParallelCompactData::ChunkData::highest_ref() const
duke@435 439 {
duke@435 440 DEBUG_ONLY(return _highest_ref;)
duke@435 441 NOT_DEBUG(return NULL;)
duke@435 442 }
duke@435 443
duke@435 444 inline void ParallelCompactData::ChunkData::set_data_location(HeapWord* addr)
duke@435 445 {
duke@435 446 DEBUG_ONLY(_data_location = addr;)
duke@435 447 }
duke@435 448
duke@435 449 inline void ParallelCompactData::ChunkData::set_completed()
duke@435 450 {
duke@435 451 assert(claimed(), "must be claimed first");
duke@435 452 _dc_and_los = dc_completed | (chunk_sz_t) live_obj_size();
duke@435 453 }
duke@435 454
duke@435 455 // MT-unsafe claiming of a chunk. Should only be used during single threaded
duke@435 456 // execution.
duke@435 457 inline bool ParallelCompactData::ChunkData::claim_unsafe()
duke@435 458 {
duke@435 459 if (available()) {
duke@435 460 _dc_and_los |= dc_claimed;
duke@435 461 return true;
duke@435 462 }
duke@435 463 return false;
duke@435 464 }
duke@435 465
duke@435 466 inline void ParallelCompactData::ChunkData::add_live_obj(size_t words)
duke@435 467 {
duke@435 468 assert(words <= (size_t)los_mask - live_obj_size(), "overflow");
duke@435 469 Atomic::add((int) words, (volatile int*) &_dc_and_los);
duke@435 470 }
duke@435 471
duke@435 472 inline void ParallelCompactData::ChunkData::set_highest_ref(HeapWord* addr)
duke@435 473 {
duke@435 474 #ifdef ASSERT
duke@435 475 HeapWord* tmp = _highest_ref;
duke@435 476 while (addr > tmp) {
duke@435 477 tmp = (HeapWord*)Atomic::cmpxchg_ptr(addr, &_highest_ref, tmp);
duke@435 478 }
duke@435 479 #endif // #ifdef ASSERT
duke@435 480 }
duke@435 481
duke@435 482 inline bool ParallelCompactData::ChunkData::claim()
duke@435 483 {
duke@435 484 const int los = (int) live_obj_size();
duke@435 485 const int old = Atomic::cmpxchg(dc_claimed | los,
duke@435 486 (volatile int*) &_dc_and_los, los);
duke@435 487 return old == los;
duke@435 488 }
duke@435 489
duke@435 490 inline ParallelCompactData::ChunkData*
duke@435 491 ParallelCompactData::chunk(size_t chunk_idx) const
duke@435 492 {
duke@435 493 assert(chunk_idx <= chunk_count(), "bad arg");
duke@435 494 return _chunk_data + chunk_idx;
duke@435 495 }
duke@435 496
duke@435 497 inline size_t
duke@435 498 ParallelCompactData::chunk(const ChunkData* const chunk_ptr) const
duke@435 499 {
duke@435 500 assert(chunk_ptr >= _chunk_data, "bad arg");
duke@435 501 assert(chunk_ptr <= _chunk_data + chunk_count(), "bad arg");
duke@435 502 return pointer_delta(chunk_ptr, _chunk_data, sizeof(ChunkData));
duke@435 503 }
duke@435 504
duke@435 505 inline ParallelCompactData::BlockData*
duke@435 506 ParallelCompactData::block(size_t n) const {
duke@435 507 assert(n < block_count(), "bad arg");
duke@435 508 return _block_data + n;
duke@435 509 }
duke@435 510
duke@435 511 inline size_t
duke@435 512 ParallelCompactData::chunk_offset(const HeapWord* addr) const
duke@435 513 {
duke@435 514 assert(addr >= _region_start, "bad addr");
duke@435 515 assert(addr <= _region_end, "bad addr");
duke@435 516 return (size_t(addr) & ChunkAddrOffsetMask) >> LogHeapWordSize;
duke@435 517 }
duke@435 518
duke@435 519 inline size_t
duke@435 520 ParallelCompactData::addr_to_chunk_idx(const HeapWord* addr) const
duke@435 521 {
duke@435 522 assert(addr >= _region_start, "bad addr");
duke@435 523 assert(addr <= _region_end, "bad addr");
duke@435 524 return pointer_delta(addr, _region_start) >> Log2ChunkSize;
duke@435 525 }
duke@435 526
duke@435 527 inline ParallelCompactData::ChunkData*
duke@435 528 ParallelCompactData::addr_to_chunk_ptr(const HeapWord* addr) const
duke@435 529 {
duke@435 530 return chunk(addr_to_chunk_idx(addr));
duke@435 531 }
duke@435 532
duke@435 533 inline HeapWord*
duke@435 534 ParallelCompactData::chunk_to_addr(size_t chunk) const
duke@435 535 {
duke@435 536 assert(chunk <= _chunk_count, "chunk out of range");
duke@435 537 return _region_start + (chunk << Log2ChunkSize);
duke@435 538 }
duke@435 539
duke@435 540 inline HeapWord*
duke@435 541 ParallelCompactData::chunk_to_addr(const ChunkData* chunk) const
duke@435 542 {
duke@435 543 return chunk_to_addr(pointer_delta(chunk, _chunk_data, sizeof(ChunkData)));
duke@435 544 }
duke@435 545
duke@435 546 inline HeapWord*
duke@435 547 ParallelCompactData::chunk_to_addr(size_t chunk, size_t offset) const
duke@435 548 {
duke@435 549 assert(chunk <= _chunk_count, "chunk out of range");
duke@435 550 assert(offset < ChunkSize, "offset too big"); // This may be too strict.
duke@435 551 return chunk_to_addr(chunk) + offset;
duke@435 552 }
duke@435 553
duke@435 554 inline HeapWord*
duke@435 555 ParallelCompactData::chunk_align_down(HeapWord* addr) const
duke@435 556 {
duke@435 557 assert(addr >= _region_start, "bad addr");
duke@435 558 assert(addr < _region_end + ChunkSize, "bad addr");
duke@435 559 return (HeapWord*)(size_t(addr) & ChunkAddrMask);
duke@435 560 }
duke@435 561
duke@435 562 inline HeapWord*
duke@435 563 ParallelCompactData::chunk_align_up(HeapWord* addr) const
duke@435 564 {
duke@435 565 assert(addr >= _region_start, "bad addr");
duke@435 566 assert(addr <= _region_end, "bad addr");
duke@435 567 return chunk_align_down(addr + ChunkSizeOffsetMask);
duke@435 568 }
duke@435 569
duke@435 570 inline bool
duke@435 571 ParallelCompactData::is_chunk_aligned(HeapWord* addr) const
duke@435 572 {
duke@435 573 return chunk_offset(addr) == 0;
duke@435 574 }
duke@435 575
duke@435 576 inline size_t
duke@435 577 ParallelCompactData::block_offset(const HeapWord* addr) const
duke@435 578 {
duke@435 579 assert(addr >= _region_start, "bad addr");
duke@435 580 assert(addr <= _region_end, "bad addr");
duke@435 581 return pointer_delta(addr, _region_start) & BlockOffsetMask;
duke@435 582 }
duke@435 583
duke@435 584 inline size_t
duke@435 585 ParallelCompactData::addr_to_block_idx(const HeapWord* addr) const
duke@435 586 {
duke@435 587 assert(addr >= _region_start, "bad addr");
duke@435 588 assert(addr <= _region_end, "bad addr");
duke@435 589 return pointer_delta(addr, _region_start) >> Log2BlockSize;
duke@435 590 }
duke@435 591
duke@435 592 inline ParallelCompactData::BlockData*
duke@435 593 ParallelCompactData::addr_to_block_ptr(const HeapWord* addr) const
duke@435 594 {
duke@435 595 return block(addr_to_block_idx(addr));
duke@435 596 }
duke@435 597
duke@435 598 inline HeapWord*
duke@435 599 ParallelCompactData::block_to_addr(size_t block) const
duke@435 600 {
duke@435 601 assert(block < _block_count, "block out of range");
duke@435 602 return _region_start + (block << Log2BlockSize);
duke@435 603 }
duke@435 604
duke@435 605 // Abstract closure for use with ParMarkBitMap::iterate(), which will invoke the
duke@435 606 // do_addr() method.
duke@435 607 //
duke@435 608 // The closure is initialized with the number of heap words to process
duke@435 609 // (words_remaining()), and becomes 'full' when it reaches 0. The do_addr()
duke@435 610 // methods in subclasses should update the total as words are processed. Since
duke@435 611 // only one subclass actually uses this mechanism to terminate iteration, the
duke@435 612 // default initial value is > 0. The implementation is here and not in the
duke@435 613 // single subclass that uses it to avoid making is_full() virtual, and thus
duke@435 614 // adding a virtual call per live object.
duke@435 615
duke@435 616 class ParMarkBitMapClosure: public StackObj {
duke@435 617 public:
duke@435 618 typedef ParMarkBitMap::idx_t idx_t;
duke@435 619 typedef ParMarkBitMap::IterationStatus IterationStatus;
duke@435 620
duke@435 621 public:
duke@435 622 inline ParMarkBitMapClosure(ParMarkBitMap* mbm, ParCompactionManager* cm,
duke@435 623 size_t words = max_uintx);
duke@435 624
duke@435 625 inline ParCompactionManager* compaction_manager() const;
duke@435 626 inline ParMarkBitMap* bitmap() const;
duke@435 627 inline size_t words_remaining() const;
duke@435 628 inline bool is_full() const;
duke@435 629 inline HeapWord* source() const;
duke@435 630
duke@435 631 inline void set_source(HeapWord* addr);
duke@435 632
duke@435 633 virtual IterationStatus do_addr(HeapWord* addr, size_t words) = 0;
duke@435 634
duke@435 635 protected:
duke@435 636 inline void decrement_words_remaining(size_t words);
duke@435 637
duke@435 638 private:
duke@435 639 ParMarkBitMap* const _bitmap;
duke@435 640 ParCompactionManager* const _compaction_manager;
duke@435 641 DEBUG_ONLY(const size_t _initial_words_remaining;) // Useful in debugger.
duke@435 642 size_t _words_remaining; // Words left to copy.
duke@435 643
duke@435 644 protected:
duke@435 645 HeapWord* _source; // Next addr that would be read.
duke@435 646 };
duke@435 647
duke@435 648 inline
duke@435 649 ParMarkBitMapClosure::ParMarkBitMapClosure(ParMarkBitMap* bitmap,
duke@435 650 ParCompactionManager* cm,
duke@435 651 size_t words):
duke@435 652 _bitmap(bitmap), _compaction_manager(cm)
duke@435 653 #ifdef ASSERT
duke@435 654 , _initial_words_remaining(words)
duke@435 655 #endif
duke@435 656 {
duke@435 657 _words_remaining = words;
duke@435 658 _source = NULL;
duke@435 659 }
duke@435 660
duke@435 661 inline ParCompactionManager* ParMarkBitMapClosure::compaction_manager() const {
duke@435 662 return _compaction_manager;
duke@435 663 }
duke@435 664
duke@435 665 inline ParMarkBitMap* ParMarkBitMapClosure::bitmap() const {
duke@435 666 return _bitmap;
duke@435 667 }
duke@435 668
duke@435 669 inline size_t ParMarkBitMapClosure::words_remaining() const {
duke@435 670 return _words_remaining;
duke@435 671 }
duke@435 672
duke@435 673 inline bool ParMarkBitMapClosure::is_full() const {
duke@435 674 return words_remaining() == 0;
duke@435 675 }
duke@435 676
duke@435 677 inline HeapWord* ParMarkBitMapClosure::source() const {
duke@435 678 return _source;
duke@435 679 }
duke@435 680
duke@435 681 inline void ParMarkBitMapClosure::set_source(HeapWord* addr) {
duke@435 682 _source = addr;
duke@435 683 }
duke@435 684
duke@435 685 inline void ParMarkBitMapClosure::decrement_words_remaining(size_t words) {
duke@435 686 assert(_words_remaining >= words, "processed too many words");
duke@435 687 _words_remaining -= words;
duke@435 688 }
duke@435 689
duke@435 690 // Closure for updating the block data during the summary phase.
duke@435 691 class BitBlockUpdateClosure: public ParMarkBitMapClosure {
duke@435 692 // ParallelCompactData::BlockData::blk_ofs_t _live_data_left;
duke@435 693 size_t _live_data_left;
duke@435 694 size_t _cur_block;
duke@435 695 HeapWord* _chunk_start;
duke@435 696 HeapWord* _chunk_end;
duke@435 697 size_t _chunk_index;
duke@435 698
duke@435 699 public:
duke@435 700 BitBlockUpdateClosure(ParMarkBitMap* mbm,
duke@435 701 ParCompactionManager* cm,
duke@435 702 size_t chunk_index);
duke@435 703
duke@435 704 size_t cur_block() { return _cur_block; }
duke@435 705 size_t chunk_index() { return _chunk_index; }
duke@435 706 size_t live_data_left() { return _live_data_left; }
duke@435 707 // Returns true the first bit in the current block (cur_block) is
duke@435 708 // a start bit.
duke@435 709 // Returns true if the current block is within the chunk for the closure;
duke@435 710 bool chunk_contains_cur_block();
duke@435 711
duke@435 712 // Set the chunk index and related chunk values for
duke@435 713 // a new chunk.
duke@435 714 void reset_chunk(size_t chunk_index);
duke@435 715
duke@435 716 virtual IterationStatus do_addr(HeapWord* addr, size_t words);
duke@435 717 };
duke@435 718
duke@435 719 class PSParallelCompact : AllStatic {
duke@435 720 public:
duke@435 721 // Convenient access to type names.
duke@435 722 typedef ParMarkBitMap::idx_t idx_t;
duke@435 723 typedef ParallelCompactData::ChunkData ChunkData;
duke@435 724 typedef ParallelCompactData::BlockData BlockData;
duke@435 725
duke@435 726 typedef enum {
duke@435 727 perm_space_id, old_space_id, eden_space_id,
duke@435 728 from_space_id, to_space_id, last_space_id
duke@435 729 } SpaceId;
duke@435 730
duke@435 731 public:
duke@435 732 // In line closure decls
duke@435 733 //
duke@435 734
duke@435 735 class IsAliveClosure: public BoolObjectClosure {
duke@435 736 public:
duke@435 737 void do_object(oop p) { assert(false, "don't call"); }
duke@435 738 bool do_object_b(oop p) { return mark_bitmap()->is_marked(p); }
duke@435 739 };
duke@435 740
duke@435 741 class KeepAliveClosure: public OopClosure {
duke@435 742 ParCompactionManager* _compaction_manager;
duke@435 743 public:
duke@435 744 KeepAliveClosure(ParCompactionManager* cm) {
duke@435 745 _compaction_manager = cm;
duke@435 746 }
duke@435 747 void do_oop(oop* p);
duke@435 748 };
duke@435 749
duke@435 750 class FollowRootClosure: public OopsInGenClosure{
duke@435 751 ParCompactionManager* _compaction_manager;
duke@435 752 public:
duke@435 753 FollowRootClosure(ParCompactionManager* cm) {
duke@435 754 _compaction_manager = cm;
duke@435 755 }
duke@435 756 void do_oop(oop* p) { follow_root(_compaction_manager, p); }
duke@435 757 virtual const bool do_nmethods() const { return true; }
duke@435 758 };
duke@435 759
duke@435 760 class FollowStackClosure: public VoidClosure {
duke@435 761 ParCompactionManager* _compaction_manager;
duke@435 762 public:
duke@435 763 FollowStackClosure(ParCompactionManager* cm) {
duke@435 764 _compaction_manager = cm;
duke@435 765 }
duke@435 766 void do_void() { follow_stack(_compaction_manager); }
duke@435 767 };
duke@435 768
duke@435 769 class AdjustPointerClosure: public OopsInGenClosure {
duke@435 770 bool _is_root;
duke@435 771 public:
duke@435 772 AdjustPointerClosure(bool is_root) : _is_root(is_root) {}
duke@435 773 void do_oop(oop* p) { adjust_pointer(p, _is_root); }
duke@435 774 };
duke@435 775
duke@435 776 // Closure for verifying update of pointers. Does not
duke@435 777 // have any side effects.
duke@435 778 class VerifyUpdateClosure: public ParMarkBitMapClosure {
duke@435 779 const MutableSpace* _space; // Is this ever used?
duke@435 780
duke@435 781 public:
duke@435 782 VerifyUpdateClosure(ParCompactionManager* cm, const MutableSpace* sp) :
duke@435 783 ParMarkBitMapClosure(PSParallelCompact::mark_bitmap(), cm), _space(sp)
duke@435 784 { }
duke@435 785
duke@435 786 virtual IterationStatus do_addr(HeapWord* addr, size_t words);
duke@435 787
duke@435 788 const MutableSpace* space() { return _space; }
duke@435 789 };
duke@435 790
duke@435 791 // Closure for updating objects altered for debug checking
duke@435 792 class ResetObjectsClosure: public ParMarkBitMapClosure {
duke@435 793 public:
duke@435 794 ResetObjectsClosure(ParCompactionManager* cm):
duke@435 795 ParMarkBitMapClosure(PSParallelCompact::mark_bitmap(), cm)
duke@435 796 { }
duke@435 797
duke@435 798 virtual IterationStatus do_addr(HeapWord* addr, size_t words);
duke@435 799 };
duke@435 800
duke@435 801 friend class KeepAliveClosure;
duke@435 802 friend class FollowStackClosure;
duke@435 803 friend class AdjustPointerClosure;
duke@435 804 friend class FollowRootClosure;
duke@435 805 friend class instanceKlassKlass;
duke@435 806 friend class RefProcTaskProxy;
duke@435 807
duke@435 808 static void mark_and_push_internal(ParCompactionManager* cm, oop* p);
duke@435 809
duke@435 810 private:
duke@435 811 static elapsedTimer _accumulated_time;
duke@435 812 static unsigned int _total_invocations;
duke@435 813 static unsigned int _maximum_compaction_gc_num;
duke@435 814 static jlong _time_of_last_gc; // ms
duke@435 815 static CollectorCounters* _counters;
duke@435 816 static ParMarkBitMap _mark_bitmap;
duke@435 817 static ParallelCompactData _summary_data;
duke@435 818 static IsAliveClosure _is_alive_closure;
duke@435 819 static SpaceInfo _space_info[last_space_id];
duke@435 820 static bool _print_phases;
duke@435 821 static AdjustPointerClosure _adjust_root_pointer_closure;
duke@435 822 static AdjustPointerClosure _adjust_pointer_closure;
duke@435 823
duke@435 824 // Reference processing (used in ...follow_contents)
duke@435 825 static ReferenceProcessor* _ref_processor;
duke@435 826
duke@435 827 // Updated location of intArrayKlassObj.
duke@435 828 static klassOop _updated_int_array_klass_obj;
duke@435 829
duke@435 830 // Values computed at initialization and used by dead_wood_limiter().
duke@435 831 static double _dwl_mean;
duke@435 832 static double _dwl_std_dev;
duke@435 833 static double _dwl_first_term;
duke@435 834 static double _dwl_adjustment;
duke@435 835 #ifdef ASSERT
duke@435 836 static bool _dwl_initialized;
duke@435 837 #endif // #ifdef ASSERT
duke@435 838
duke@435 839 private:
duke@435 840 // Closure accessors
duke@435 841 static OopClosure* adjust_pointer_closure() { return (OopClosure*)&_adjust_pointer_closure; }
duke@435 842 static OopClosure* adjust_root_pointer_closure() { return (OopClosure*)&_adjust_root_pointer_closure; }
duke@435 843 static BoolObjectClosure* is_alive_closure() { return (BoolObjectClosure*)&_is_alive_closure; }
duke@435 844
duke@435 845 static void initialize_space_info();
duke@435 846
duke@435 847 // Return true if details about individual phases should be printed.
duke@435 848 static inline bool print_phases();
duke@435 849
duke@435 850 // Clear the marking bitmap and summary data that cover the specified space.
duke@435 851 static void clear_data_covering_space(SpaceId id);
duke@435 852
duke@435 853 static void pre_compact(PreGCValues* pre_gc_values);
duke@435 854 static void post_compact();
duke@435 855
duke@435 856 // Mark live objects
duke@435 857 static void marking_phase(ParCompactionManager* cm,
duke@435 858 bool maximum_heap_compaction);
duke@435 859 static void follow_stack(ParCompactionManager* cm);
duke@435 860 static void follow_weak_klass_links(ParCompactionManager* cm);
duke@435 861
duke@435 862 static void adjust_pointer(oop* p, bool is_root);
duke@435 863 static void adjust_root_pointer(oop* p) { adjust_pointer(p, true); }
duke@435 864
duke@435 865 static void follow_root(ParCompactionManager* cm, oop* p);
duke@435 866
duke@435 867 // Compute the dense prefix for the designated space. This is an experimental
duke@435 868 // implementation currently not used in production.
duke@435 869 static HeapWord* compute_dense_prefix_via_density(const SpaceId id,
duke@435 870 bool maximum_compaction);
duke@435 871
duke@435 872 // Methods used to compute the dense prefix.
duke@435 873
duke@435 874 // Compute the value of the normal distribution at x = density. The mean and
duke@435 875 // standard deviation are values saved by initialize_dead_wood_limiter().
duke@435 876 static inline double normal_distribution(double density);
duke@435 877
duke@435 878 // Initialize the static vars used by dead_wood_limiter().
duke@435 879 static void initialize_dead_wood_limiter();
duke@435 880
duke@435 881 // Return the percentage of space that can be treated as "dead wood" (i.e.,
duke@435 882 // not reclaimed).
duke@435 883 static double dead_wood_limiter(double density, size_t min_percent);
duke@435 884
duke@435 885 // Find the first (left-most) chunk in the range [beg, end) that has at least
duke@435 886 // dead_words of dead space to the left. The argument beg must be the first
duke@435 887 // chunk in the space that is not completely live.
duke@435 888 static ChunkData* dead_wood_limit_chunk(const ChunkData* beg,
duke@435 889 const ChunkData* end,
duke@435 890 size_t dead_words);
duke@435 891
duke@435 892 // Return a pointer to the first chunk in the range [beg, end) that is not
duke@435 893 // completely full.
duke@435 894 static ChunkData* first_dead_space_chunk(const ChunkData* beg,
duke@435 895 const ChunkData* end);
duke@435 896
duke@435 897 // Return a value indicating the benefit or 'yield' if the compacted region
duke@435 898 // were to start (or equivalently if the dense prefix were to end) at the
duke@435 899 // candidate chunk. Higher values are better.
duke@435 900 //
duke@435 901 // The value is based on the amount of space reclaimed vs. the costs of (a)
duke@435 902 // updating references in the dense prefix plus (b) copying objects and
duke@435 903 // updating references in the compacted region.
duke@435 904 static inline double reclaimed_ratio(const ChunkData* const candidate,
duke@435 905 HeapWord* const bottom,
duke@435 906 HeapWord* const top,
duke@435 907 HeapWord* const new_top);
duke@435 908
duke@435 909 // Compute the dense prefix for the designated space.
duke@435 910 static HeapWord* compute_dense_prefix(const SpaceId id,
duke@435 911 bool maximum_compaction);
duke@435 912
duke@435 913 // Return true if dead space crosses onto the specified Chunk; bit must be the
duke@435 914 // bit index corresponding to the first word of the Chunk.
duke@435 915 static inline bool dead_space_crosses_boundary(const ChunkData* chunk,
duke@435 916 idx_t bit);
duke@435 917
duke@435 918 // Summary phase utility routine to fill dead space (if any) at the dense
duke@435 919 // prefix boundary. Should only be called if the the dense prefix is
duke@435 920 // non-empty.
duke@435 921 static void fill_dense_prefix_end(SpaceId id);
duke@435 922
duke@435 923 static void summarize_spaces_quick();
duke@435 924 static void summarize_space(SpaceId id, bool maximum_compaction);
duke@435 925 static void summary_phase(ParCompactionManager* cm, bool maximum_compaction);
duke@435 926
duke@435 927 static bool block_first_offset(size_t block_index, idx_t* block_offset_ptr);
duke@435 928
duke@435 929 // Fill in the BlockData
duke@435 930 static void summarize_blocks(ParCompactionManager* cm,
duke@435 931 SpaceId first_compaction_space_id);
duke@435 932
duke@435 933 // The space that is compacted after space_id.
duke@435 934 static SpaceId next_compaction_space_id(SpaceId space_id);
duke@435 935
duke@435 936 // Adjust addresses in roots. Does not adjust addresses in heap.
duke@435 937 static void adjust_roots();
duke@435 938
duke@435 939 // Serial code executed in preparation for the compaction phase.
duke@435 940 static void compact_prologue();
duke@435 941
duke@435 942 // Move objects to new locations.
duke@435 943 static void compact_perm(ParCompactionManager* cm);
duke@435 944 static void compact();
duke@435 945
duke@435 946 // Add available chunks to the stack and draining tasks to the task queue.
duke@435 947 static void enqueue_chunk_draining_tasks(GCTaskQueue* q,
duke@435 948 uint parallel_gc_threads);
duke@435 949
duke@435 950 // Add dense prefix update tasks to the task queue.
duke@435 951 static void enqueue_dense_prefix_tasks(GCTaskQueue* q,
duke@435 952 uint parallel_gc_threads);
duke@435 953
duke@435 954 // Add chunk stealing tasks to the task queue.
duke@435 955 static void enqueue_chunk_stealing_tasks(
duke@435 956 GCTaskQueue* q,
duke@435 957 ParallelTaskTerminator* terminator_ptr,
duke@435 958 uint parallel_gc_threads);
duke@435 959
duke@435 960 // For debugging only - compacts the old gen serially
duke@435 961 static void compact_serial(ParCompactionManager* cm);
duke@435 962
duke@435 963 // If objects are left in eden after a collection, try to move the boundary
duke@435 964 // and absorb them into the old gen. Returns true if eden was emptied.
duke@435 965 static bool absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
duke@435 966 PSYoungGen* young_gen,
duke@435 967 PSOldGen* old_gen);
duke@435 968
duke@435 969 // Reset time since last full gc
duke@435 970 static void reset_millis_since_last_gc();
duke@435 971
duke@435 972 protected:
duke@435 973 #ifdef VALIDATE_MARK_SWEEP
duke@435 974 static GrowableArray<oop*>* _root_refs_stack;
duke@435 975 static GrowableArray<oop> * _live_oops;
duke@435 976 static GrowableArray<oop> * _live_oops_moved_to;
duke@435 977 static GrowableArray<size_t>* _live_oops_size;
duke@435 978 static size_t _live_oops_index;
duke@435 979 static size_t _live_oops_index_at_perm;
duke@435 980 static GrowableArray<oop*>* _other_refs_stack;
duke@435 981 static GrowableArray<oop*>* _adjusted_pointers;
duke@435 982 static bool _pointer_tracking;
duke@435 983 static bool _root_tracking;
duke@435 984
duke@435 985 // The following arrays are saved since the time of the last GC and
duke@435 986 // assist in tracking down problems where someone has done an errant
duke@435 987 // store into the heap, usually to an oop that wasn't properly
duke@435 988 // handleized across a GC. If we crash or otherwise fail before the
duke@435 989 // next GC, we can query these arrays to find out the object we had
duke@435 990 // intended to do the store to (assuming it is still alive) and the
duke@435 991 // offset within that object. Covered under RecordMarkSweepCompaction.
duke@435 992 static GrowableArray<HeapWord*> * _cur_gc_live_oops;
duke@435 993 static GrowableArray<HeapWord*> * _cur_gc_live_oops_moved_to;
duke@435 994 static GrowableArray<size_t>* _cur_gc_live_oops_size;
duke@435 995 static GrowableArray<HeapWord*> * _last_gc_live_oops;
duke@435 996 static GrowableArray<HeapWord*> * _last_gc_live_oops_moved_to;
duke@435 997 static GrowableArray<size_t>* _last_gc_live_oops_size;
duke@435 998 #endif
duke@435 999
duke@435 1000 public:
duke@435 1001 class MarkAndPushClosure: public OopClosure {
duke@435 1002 ParCompactionManager* _compaction_manager;
duke@435 1003 public:
duke@435 1004 MarkAndPushClosure(ParCompactionManager* cm) {
duke@435 1005 _compaction_manager = cm;
duke@435 1006 }
duke@435 1007 void do_oop(oop* p) { mark_and_push(_compaction_manager, p); }
duke@435 1008 virtual const bool do_nmethods() const { return true; }
duke@435 1009 };
duke@435 1010
duke@435 1011 PSParallelCompact();
duke@435 1012
duke@435 1013 // Convenient accessor for Universe::heap().
duke@435 1014 static ParallelScavengeHeap* gc_heap() {
duke@435 1015 return (ParallelScavengeHeap*)Universe::heap();
duke@435 1016 }
duke@435 1017
duke@435 1018 static void invoke(bool maximum_heap_compaction);
duke@435 1019 static void invoke_no_policy(bool maximum_heap_compaction);
duke@435 1020
duke@435 1021 static void post_initialize();
duke@435 1022 // Perform initialization for PSParallelCompact that requires
duke@435 1023 // allocations. This should be called during the VM initialization
duke@435 1024 // at a pointer where it would be appropriate to return a JNI_ENOMEM
duke@435 1025 // in the event of a failure.
duke@435 1026 static bool initialize();
duke@435 1027
duke@435 1028 // Public accessors
duke@435 1029 static elapsedTimer* accumulated_time() { return &_accumulated_time; }
duke@435 1030 static unsigned int total_invocations() { return _total_invocations; }
duke@435 1031 static CollectorCounters* counters() { return _counters; }
duke@435 1032
duke@435 1033 // Used to add tasks
duke@435 1034 static GCTaskManager* const gc_task_manager();
duke@435 1035 static klassOop updated_int_array_klass_obj() {
duke@435 1036 return _updated_int_array_klass_obj;
duke@435 1037 }
duke@435 1038
duke@435 1039 // Marking support
duke@435 1040 static inline bool mark_obj(oop obj);
duke@435 1041 static bool mark_obj(oop* p) {
duke@435 1042 if (*p != NULL) {
duke@435 1043 return mark_obj(*p);
duke@435 1044 } else {
duke@435 1045 return false;
duke@435 1046 }
duke@435 1047 }
duke@435 1048 static void mark_and_push(ParCompactionManager* cm, oop* p) {
duke@435 1049 // Check mark and maybe push on
duke@435 1050 // marking stack
duke@435 1051 oop m = *p;
duke@435 1052 if (m != NULL && mark_bitmap()->is_unmarked(m)) {
duke@435 1053 mark_and_push_internal(cm, p);
duke@435 1054 }
duke@435 1055 }
duke@435 1056
duke@435 1057 // Compaction support.
duke@435 1058 // Return true if p is in the range [beg_addr, end_addr).
duke@435 1059 static inline bool is_in(HeapWord* p, HeapWord* beg_addr, HeapWord* end_addr);
duke@435 1060 static inline bool is_in(oop* p, HeapWord* beg_addr, HeapWord* end_addr);
duke@435 1061
duke@435 1062 // Convenience wrappers for per-space data kept in _space_info.
duke@435 1063 static inline MutableSpace* space(SpaceId space_id);
duke@435 1064 static inline HeapWord* new_top(SpaceId space_id);
duke@435 1065 static inline HeapWord* dense_prefix(SpaceId space_id);
duke@435 1066 static inline ObjectStartArray* start_array(SpaceId space_id);
duke@435 1067
duke@435 1068 // Return true if the klass should be updated.
duke@435 1069 static inline bool should_update_klass(klassOop k);
duke@435 1070
duke@435 1071 // Move and update the live objects in the specified space.
duke@435 1072 static void move_and_update(ParCompactionManager* cm, SpaceId space_id);
duke@435 1073
duke@435 1074 // Process the end of the given chunk range in the dense prefix.
duke@435 1075 // This includes saving any object not updated.
duke@435 1076 static void dense_prefix_chunks_epilogue(ParCompactionManager* cm,
duke@435 1077 size_t chunk_start_index,
duke@435 1078 size_t chunk_end_index,
duke@435 1079 idx_t exiting_object_offset,
duke@435 1080 idx_t chunk_offset_start,
duke@435 1081 idx_t chunk_offset_end);
duke@435 1082
duke@435 1083 // Update a chunk in the dense prefix. For each live object
duke@435 1084 // in the chunk, update it's interior references. For each
duke@435 1085 // dead object, fill it with deadwood. Dead space at the end
duke@435 1086 // of a chunk range will be filled to the start of the next
duke@435 1087 // live object regardless of the chunk_index_end. None of the
duke@435 1088 // objects in the dense prefix move and dead space is dead
duke@435 1089 // (holds only dead objects that don't need any processing), so
duke@435 1090 // dead space can be filled in any order.
duke@435 1091 static void update_and_deadwood_in_dense_prefix(ParCompactionManager* cm,
duke@435 1092 SpaceId space_id,
duke@435 1093 size_t chunk_index_start,
duke@435 1094 size_t chunk_index_end);
duke@435 1095
duke@435 1096 // Return the address of the count + 1st live word in the range [beg, end).
duke@435 1097 static HeapWord* skip_live_words(HeapWord* beg, HeapWord* end, size_t count);
duke@435 1098
duke@435 1099 // Return the address of the word to be copied to dest_addr, which must be
duke@435 1100 // aligned to a chunk boundary.
duke@435 1101 static HeapWord* first_src_addr(HeapWord* const dest_addr,
duke@435 1102 size_t src_chunk_idx);
duke@435 1103
duke@435 1104 // Determine the next source chunk, set closure.source() to the start of the
duke@435 1105 // new chunk return the chunk index. Parameter end_addr is the address one
duke@435 1106 // beyond the end of source range just processed. If necessary, switch to a
duke@435 1107 // new source space and set src_space_id (in-out parameter) and src_space_top
duke@435 1108 // (out parameter) accordingly.
duke@435 1109 static size_t next_src_chunk(MoveAndUpdateClosure& closure,
duke@435 1110 SpaceId& src_space_id,
duke@435 1111 HeapWord*& src_space_top,
duke@435 1112 HeapWord* end_addr);
duke@435 1113
duke@435 1114 // Decrement the destination count for each non-empty source chunk in the
duke@435 1115 // range [beg_chunk, chunk(chunk_align_up(end_addr))).
duke@435 1116 static void decrement_destination_counts(ParCompactionManager* cm,
duke@435 1117 size_t beg_chunk,
duke@435 1118 HeapWord* end_addr);
duke@435 1119
duke@435 1120 // Fill a chunk, copying objects from one or more source chunks.
duke@435 1121 static void fill_chunk(ParCompactionManager* cm, size_t chunk_idx);
duke@435 1122 static void fill_and_update_chunk(ParCompactionManager* cm, size_t chunk) {
duke@435 1123 fill_chunk(cm, chunk);
duke@435 1124 }
duke@435 1125
duke@435 1126 // Update the deferred objects in the space.
duke@435 1127 static void update_deferred_objects(ParCompactionManager* cm, SpaceId id);
duke@435 1128
duke@435 1129 // Mark pointer and follow contents.
duke@435 1130 static void mark_and_follow(ParCompactionManager* cm, oop* p);
duke@435 1131
duke@435 1132 static ParMarkBitMap* mark_bitmap() { return &_mark_bitmap; }
duke@435 1133 static ParallelCompactData& summary_data() { return _summary_data; }
duke@435 1134
duke@435 1135 static inline void adjust_pointer(oop* p) { adjust_pointer(p, false); }
duke@435 1136 static inline void adjust_pointer(oop* p,
duke@435 1137 HeapWord* beg_addr,
duke@435 1138 HeapWord* end_addr);
duke@435 1139
duke@435 1140 // Reference Processing
duke@435 1141 static ReferenceProcessor* const ref_processor() { return _ref_processor; }
duke@435 1142
duke@435 1143 // Return the SpaceId for the given address.
duke@435 1144 static SpaceId space_id(HeapWord* addr);
duke@435 1145
duke@435 1146 // Time since last full gc (in milliseconds).
duke@435 1147 static jlong millis_since_last_gc();
duke@435 1148
duke@435 1149 #ifdef VALIDATE_MARK_SWEEP
duke@435 1150 static void track_adjusted_pointer(oop* p, oop newobj, bool isroot);
duke@435 1151 static void check_adjust_pointer(oop* p); // Adjust this pointer
duke@435 1152 static void track_interior_pointers(oop obj);
duke@435 1153 static void check_interior_pointers();
duke@435 1154
duke@435 1155 static void reset_live_oop_tracking(bool at_perm);
duke@435 1156 static void register_live_oop(oop p, size_t size);
duke@435 1157 static void validate_live_oop(oop p, size_t size);
duke@435 1158 static void live_oop_moved_to(HeapWord* q, size_t size, HeapWord* compaction_top);
duke@435 1159 static void compaction_complete();
duke@435 1160
duke@435 1161 // Querying operation of RecordMarkSweepCompaction results.
duke@435 1162 // Finds and prints the current base oop and offset for a word
duke@435 1163 // within an oop that was live during the last GC. Helpful for
duke@435 1164 // tracking down heap stomps.
duke@435 1165 static void print_new_location_of_heap_address(HeapWord* q);
duke@435 1166 #endif // #ifdef VALIDATE_MARK_SWEEP
duke@435 1167
duke@435 1168 // Call backs for class unloading
duke@435 1169 // Update subklass/sibling/implementor links at end of marking.
duke@435 1170 static void revisit_weak_klass_link(ParCompactionManager* cm, Klass* k);
duke@435 1171
duke@435 1172 #ifndef PRODUCT
duke@435 1173 // Debugging support.
duke@435 1174 static const char* space_names[last_space_id];
duke@435 1175 static void print_chunk_ranges();
duke@435 1176 static void print_dense_prefix_stats(const char* const algorithm,
duke@435 1177 const SpaceId id,
duke@435 1178 const bool maximum_compaction,
duke@435 1179 HeapWord* const addr);
duke@435 1180 #endif // #ifndef PRODUCT
duke@435 1181
duke@435 1182 #ifdef ASSERT
duke@435 1183 // Verify that all the chunks have been emptied.
duke@435 1184 static void verify_complete(SpaceId space_id);
duke@435 1185 #endif // #ifdef ASSERT
duke@435 1186 };
duke@435 1187
duke@435 1188 bool PSParallelCompact::mark_obj(oop obj) {
duke@435 1189 const int obj_size = obj->size();
duke@435 1190 if (mark_bitmap()->mark_obj(obj, obj_size)) {
duke@435 1191 _summary_data.add_obj(obj, obj_size);
duke@435 1192 return true;
duke@435 1193 } else {
duke@435 1194 return false;
duke@435 1195 }
duke@435 1196 }
duke@435 1197
duke@435 1198 inline bool PSParallelCompact::print_phases()
duke@435 1199 {
duke@435 1200 return _print_phases;
duke@435 1201 }
duke@435 1202
duke@435 1203 inline double PSParallelCompact::normal_distribution(double density)
duke@435 1204 {
duke@435 1205 assert(_dwl_initialized, "uninitialized");
duke@435 1206 const double squared_term = (density - _dwl_mean) / _dwl_std_dev;
duke@435 1207 return _dwl_first_term * exp(-0.5 * squared_term * squared_term);
duke@435 1208 }
duke@435 1209
duke@435 1210 inline bool
duke@435 1211 PSParallelCompact::dead_space_crosses_boundary(const ChunkData* chunk,
duke@435 1212 idx_t bit)
duke@435 1213 {
duke@435 1214 assert(bit > 0, "cannot call this for the first bit/chunk");
duke@435 1215 assert(_summary_data.chunk_to_addr(chunk) == _mark_bitmap.bit_to_addr(bit),
duke@435 1216 "sanity check");
duke@435 1217
duke@435 1218 // Dead space crosses the boundary if (1) a partial object does not extend
duke@435 1219 // onto the chunk, (2) an object does not start at the beginning of the chunk,
duke@435 1220 // and (3) an object does not end at the end of the prior chunk.
duke@435 1221 return chunk->partial_obj_size() == 0 &&
duke@435 1222 !_mark_bitmap.is_obj_beg(bit) &&
duke@435 1223 !_mark_bitmap.is_obj_end(bit - 1);
duke@435 1224 }
duke@435 1225
duke@435 1226 inline bool
duke@435 1227 PSParallelCompact::is_in(HeapWord* p, HeapWord* beg_addr, HeapWord* end_addr) {
duke@435 1228 return p >= beg_addr && p < end_addr;
duke@435 1229 }
duke@435 1230
duke@435 1231 inline bool
duke@435 1232 PSParallelCompact::is_in(oop* p, HeapWord* beg_addr, HeapWord* end_addr) {
duke@435 1233 return is_in((HeapWord*)p, beg_addr, end_addr);
duke@435 1234 }
duke@435 1235
duke@435 1236 inline MutableSpace* PSParallelCompact::space(SpaceId id) {
duke@435 1237 assert(id < last_space_id, "id out of range");
duke@435 1238 return _space_info[id].space();
duke@435 1239 }
duke@435 1240
duke@435 1241 inline HeapWord* PSParallelCompact::new_top(SpaceId id) {
duke@435 1242 assert(id < last_space_id, "id out of range");
duke@435 1243 return _space_info[id].new_top();
duke@435 1244 }
duke@435 1245
duke@435 1246 inline HeapWord* PSParallelCompact::dense_prefix(SpaceId id) {
duke@435 1247 assert(id < last_space_id, "id out of range");
duke@435 1248 return _space_info[id].dense_prefix();
duke@435 1249 }
duke@435 1250
duke@435 1251 inline ObjectStartArray* PSParallelCompact::start_array(SpaceId id) {
duke@435 1252 assert(id < last_space_id, "id out of range");
duke@435 1253 return _space_info[id].start_array();
duke@435 1254 }
duke@435 1255
duke@435 1256 inline bool PSParallelCompact::should_update_klass(klassOop k) {
duke@435 1257 return ((HeapWord*) k) >= dense_prefix(perm_space_id);
duke@435 1258 }
duke@435 1259
duke@435 1260 inline void PSParallelCompact::adjust_pointer(oop* p,
duke@435 1261 HeapWord* beg_addr,
duke@435 1262 HeapWord* end_addr) {
duke@435 1263 if (is_in(p, beg_addr, end_addr)) {
duke@435 1264 adjust_pointer(p);
duke@435 1265 }
duke@435 1266 }
duke@435 1267
duke@435 1268 class MoveAndUpdateClosure: public ParMarkBitMapClosure {
duke@435 1269 public:
duke@435 1270 inline MoveAndUpdateClosure(ParMarkBitMap* bitmap, ParCompactionManager* cm,
duke@435 1271 ObjectStartArray* start_array,
duke@435 1272 HeapWord* destination, size_t words);
duke@435 1273
duke@435 1274 // Accessors.
duke@435 1275 HeapWord* destination() const { return _destination; }
duke@435 1276
duke@435 1277 // If the object will fit (size <= words_remaining()), copy it to the current
duke@435 1278 // destination, update the interior oops and the start array and return either
duke@435 1279 // full (if the closure is full) or incomplete. If the object will not fit,
duke@435 1280 // return would_overflow.
duke@435 1281 virtual IterationStatus do_addr(HeapWord* addr, size_t size);
duke@435 1282
duke@435 1283 // Copy enough words to fill this closure, starting at source(). Interior
duke@435 1284 // oops and the start array are not updated. Return full.
duke@435 1285 IterationStatus copy_until_full();
duke@435 1286
duke@435 1287 // Copy enough words to fill this closure or to the end of an object,
duke@435 1288 // whichever is smaller, starting at source(). Interior oops and the start
duke@435 1289 // array are not updated.
duke@435 1290 void copy_partial_obj();
duke@435 1291
duke@435 1292 protected:
duke@435 1293 // Update variables to indicate that word_count words were processed.
duke@435 1294 inline void update_state(size_t word_count);
duke@435 1295
duke@435 1296 protected:
duke@435 1297 ObjectStartArray* const _start_array;
duke@435 1298 HeapWord* _destination; // Next addr to be written.
duke@435 1299 };
duke@435 1300
duke@435 1301 inline
duke@435 1302 MoveAndUpdateClosure::MoveAndUpdateClosure(ParMarkBitMap* bitmap,
duke@435 1303 ParCompactionManager* cm,
duke@435 1304 ObjectStartArray* start_array,
duke@435 1305 HeapWord* destination,
duke@435 1306 size_t words) :
duke@435 1307 ParMarkBitMapClosure(bitmap, cm, words), _start_array(start_array)
duke@435 1308 {
duke@435 1309 _destination = destination;
duke@435 1310 }
duke@435 1311
duke@435 1312 inline void MoveAndUpdateClosure::update_state(size_t words)
duke@435 1313 {
duke@435 1314 decrement_words_remaining(words);
duke@435 1315 _source += words;
duke@435 1316 _destination += words;
duke@435 1317 }
duke@435 1318
duke@435 1319 class UpdateOnlyClosure: public ParMarkBitMapClosure {
duke@435 1320 private:
duke@435 1321 const PSParallelCompact::SpaceId _space_id;
duke@435 1322 ObjectStartArray* const _start_array;
duke@435 1323
duke@435 1324 public:
duke@435 1325 UpdateOnlyClosure(ParMarkBitMap* mbm,
duke@435 1326 ParCompactionManager* cm,
duke@435 1327 PSParallelCompact::SpaceId space_id);
duke@435 1328
duke@435 1329 // Update the object.
duke@435 1330 virtual IterationStatus do_addr(HeapWord* addr, size_t words);
duke@435 1331
duke@435 1332 inline void do_addr(HeapWord* addr);
duke@435 1333 };
duke@435 1334
duke@435 1335 inline void UpdateOnlyClosure::do_addr(HeapWord* addr) {
duke@435 1336 _start_array->allocate_block(addr);
duke@435 1337 oop(addr)->update_contents(compaction_manager());
duke@435 1338 }
duke@435 1339
duke@435 1340 class FillClosure: public ParMarkBitMapClosure {
duke@435 1341 public:
duke@435 1342 FillClosure(ParCompactionManager* cm, PSParallelCompact::SpaceId space_id):
duke@435 1343 ParMarkBitMapClosure(PSParallelCompact::mark_bitmap(), cm),
duke@435 1344 _space_id(space_id),
duke@435 1345 _start_array(PSParallelCompact::start_array(space_id))
duke@435 1346 {
duke@435 1347 assert(_space_id == PSParallelCompact::perm_space_id ||
duke@435 1348 _space_id == PSParallelCompact::old_space_id,
duke@435 1349 "cannot use FillClosure in the young gen");
duke@435 1350 assert(bitmap() != NULL, "need a bitmap");
duke@435 1351 assert(_start_array != NULL, "need a start array");
duke@435 1352 }
duke@435 1353
duke@435 1354 void fill_region(HeapWord* addr, size_t size) {
duke@435 1355 MemRegion region(addr, size);
duke@435 1356 SharedHeap::fill_region_with_object(region);
duke@435 1357 _start_array->allocate_block(addr);
duke@435 1358 }
duke@435 1359
duke@435 1360 virtual IterationStatus do_addr(HeapWord* addr, size_t size) {
duke@435 1361 fill_region(addr, size);
duke@435 1362 return ParMarkBitMap::incomplete;
duke@435 1363 }
duke@435 1364
duke@435 1365 private:
duke@435 1366 const PSParallelCompact::SpaceId _space_id;
duke@435 1367 ObjectStartArray* const _start_array;
duke@435 1368 };

mercurial