218 // Mask for the bits in a pointer to get an offset within a region. |
218 // Mask for the bits in a pointer to get an offset within a region. |
219 static const size_t RegionAddrOffsetMask; |
219 static const size_t RegionAddrOffsetMask; |
220 // Mask for the bits in a pointer to get the address of the start of a region. |
220 // Mask for the bits in a pointer to get the address of the start of a region. |
221 static const size_t RegionAddrMask; |
221 static const size_t RegionAddrMask; |
222 |
222 |
|
223 static const size_t Log2BlockSize; |
|
224 static const size_t BlockSize; |
|
225 static const size_t BlockSizeBytes; |
|
226 |
|
227 static const size_t BlockSizeOffsetMask; |
|
228 static const size_t BlockAddrOffsetMask; |
|
229 static const size_t BlockAddrMask; |
|
230 |
|
231 static const size_t BlocksPerRegion; |
|
232 static const size_t Log2BlocksPerRegion; |
|
233 |
223 class RegionData |
234 class RegionData |
224 { |
235 { |
225 public: |
236 public: |
226 // Destination address of the region. |
237 // Destination address of the region. |
227 HeapWord* destination() const { return _destination; } |
238 HeapWord* destination() const { return _destination; } |
270 // been filled, the destination_count should be set to the completed value |
281 // been filled, the destination_count should be set to the completed value |
271 // (dc_completed). |
282 // (dc_completed). |
272 inline uint destination_count() const; |
283 inline uint destination_count() const; |
273 inline uint destination_count_raw() const; |
284 inline uint destination_count_raw() const; |
274 |
285 |
|
286 // Whether the block table for this region has been filled. |
|
287 inline bool blocks_filled() const; |
|
288 |
|
289 // Number of times the block table was filled. |
|
290 DEBUG_ONLY(inline size_t blocks_filled_count() const;) |
|
291 |
275 // The location of the java heap data that corresponds to this region. |
292 // The location of the java heap data that corresponds to this region. |
276 inline HeapWord* data_location() const; |
293 inline HeapWord* data_location() const; |
277 |
294 |
278 // The highest address referenced by objects in this region. |
295 // The highest address referenced by objects in this region. |
279 inline HeapWord* highest_ref() const; |
296 inline HeapWord* highest_ref() const; |
294 void set_deferred_obj_addr(HeapWord* addr) { _partial_obj_addr = addr; } |
311 void set_deferred_obj_addr(HeapWord* addr) { _partial_obj_addr = addr; } |
295 void set_partial_obj_addr(HeapWord* addr) { _partial_obj_addr = addr; } |
312 void set_partial_obj_addr(HeapWord* addr) { _partial_obj_addr = addr; } |
296 void set_partial_obj_size(size_t words) { |
313 void set_partial_obj_size(size_t words) { |
297 _partial_obj_size = (region_sz_t) words; |
314 _partial_obj_size = (region_sz_t) words; |
298 } |
315 } |
|
316 inline void set_blocks_filled(); |
299 |
317 |
300 inline void set_destination_count(uint count); |
318 inline void set_destination_count(uint count); |
301 inline void set_live_obj_size(size_t words); |
319 inline void set_live_obj_size(size_t words); |
302 inline void set_data_location(HeapWord* addr); |
320 inline void set_data_location(HeapWord* addr); |
303 inline void set_completed(); |
321 inline void set_completed(); |
326 HeapWord* _destination; |
344 HeapWord* _destination; |
327 size_t _source_region; |
345 size_t _source_region; |
328 HeapWord* _partial_obj_addr; |
346 HeapWord* _partial_obj_addr; |
329 region_sz_t _partial_obj_size; |
347 region_sz_t _partial_obj_size; |
330 region_sz_t volatile _dc_and_los; |
348 region_sz_t volatile _dc_and_los; |
|
349 bool _blocks_filled; |
|
350 |
331 #ifdef ASSERT |
351 #ifdef ASSERT |
|
352 size_t _blocks_filled_count; // Number of block table fills. |
|
353 |
332 // These enable optimizations that are only partially implemented. Use |
354 // These enable optimizations that are only partially implemented. Use |
333 // debug builds to prevent the code fragments from breaking. |
355 // debug builds to prevent the code fragments from breaking. |
334 HeapWord* _data_location; |
356 HeapWord* _data_location; |
335 HeapWord* _highest_ref; |
357 HeapWord* _highest_ref; |
336 #endif // #ifdef ASSERT |
358 #endif // #ifdef ASSERT |
337 |
359 |
338 #ifdef ASSERT |
360 #ifdef ASSERT |
339 public: |
361 public: |
340 uint _pushed; // 0 until region is pushed onto a worker's stack |
362 uint _pushed; // 0 until region is pushed onto a stack |
341 private: |
363 private: |
342 #endif |
364 #endif |
343 }; |
365 }; |
344 |
366 |
|
367 // "Blocks" allow shorter sections of the bitmap to be searched. Each Block |
|
368 // holds an offset, which is the amount of live data in the Region to the left |
|
369 // of the first live object that starts in the Block. |
|
370 class BlockData |
|
371 { |
|
372 public: |
|
373 typedef unsigned short int blk_ofs_t; |
|
374 |
|
375 blk_ofs_t offset() const { return _offset; } |
|
376 void set_offset(size_t val) { _offset = (blk_ofs_t)val; } |
|
377 |
|
378 private: |
|
379 blk_ofs_t _offset; |
|
380 }; |
|
381 |
345 public: |
382 public: |
346 ParallelCompactData(); |
383 ParallelCompactData(); |
347 bool initialize(MemRegion covered_region); |
384 bool initialize(MemRegion covered_region); |
348 |
385 |
349 size_t region_count() const { return _region_count; } |
386 size_t region_count() const { return _region_count; } |
351 |
388 |
352 // Convert region indices to/from RegionData pointers. |
389 // Convert region indices to/from RegionData pointers. |
353 inline RegionData* region(size_t region_idx) const; |
390 inline RegionData* region(size_t region_idx) const; |
354 inline size_t region(const RegionData* const region_ptr) const; |
391 inline size_t region(const RegionData* const region_ptr) const; |
355 |
392 |
356 // Returns true if the given address is contained within the region |
393 size_t block_count() const { return _block_count; } |
357 bool region_contains(size_t region_index, HeapWord* addr); |
394 inline BlockData* block(size_t block_idx) const; |
|
395 inline size_t block(const BlockData* block_ptr) const; |
358 |
396 |
359 void add_obj(HeapWord* addr, size_t len); |
397 void add_obj(HeapWord* addr, size_t len); |
360 void add_obj(oop p, size_t len) { add_obj((HeapWord*)p, len); } |
398 void add_obj(oop p, size_t len) { add_obj((HeapWord*)p, len); } |
361 |
399 |
362 // Fill in the regions covering [beg, end) so that no data moves; i.e., the |
400 // Fill in the regions covering [beg, end) so that no data moves; i.e., the |
392 |
430 |
393 inline HeapWord* region_align_down(HeapWord* addr) const; |
431 inline HeapWord* region_align_down(HeapWord* addr) const; |
394 inline HeapWord* region_align_up(HeapWord* addr) const; |
432 inline HeapWord* region_align_up(HeapWord* addr) const; |
395 inline bool is_region_aligned(HeapWord* addr) const; |
433 inline bool is_region_aligned(HeapWord* addr) const; |
396 |
434 |
|
435 // Analogous to region_offset() for blocks. |
|
436 size_t block_offset(const HeapWord* addr) const; |
|
437 size_t addr_to_block_idx(const HeapWord* addr) const; |
|
438 size_t addr_to_block_idx(const oop obj) const { |
|
439 return addr_to_block_idx((HeapWord*) obj); |
|
440 } |
|
441 inline BlockData* addr_to_block_ptr(const HeapWord* addr) const; |
|
442 inline HeapWord* block_to_addr(size_t block) const; |
|
443 inline size_t region_to_block_idx(size_t region) const; |
|
444 |
|
445 inline HeapWord* block_align_down(HeapWord* addr) const; |
|
446 inline HeapWord* block_align_up(HeapWord* addr) const; |
|
447 inline bool is_block_aligned(HeapWord* addr) const; |
|
448 |
397 // Return the address one past the end of the partial object. |
449 // Return the address one past the end of the partial object. |
398 HeapWord* partial_obj_end(size_t region_idx) const; |
450 HeapWord* partial_obj_end(size_t region_idx) const; |
399 |
451 |
400 // Return the new location of the object p after the |
452 // Return the location of the object after compaction. |
401 // the compaction. |
|
402 HeapWord* calc_new_pointer(HeapWord* addr); |
453 HeapWord* calc_new_pointer(HeapWord* addr); |
403 |
454 |
404 HeapWord* calc_new_pointer(oop p) { |
455 HeapWord* calc_new_pointer(oop p) { |
405 return calc_new_pointer((HeapWord*) p); |
456 return calc_new_pointer((HeapWord*) p); |
406 } |
457 } |
409 void verify_clear(const PSVirtualSpace* vspace); |
460 void verify_clear(const PSVirtualSpace* vspace); |
410 void verify_clear(); |
461 void verify_clear(); |
411 #endif // #ifdef ASSERT |
462 #endif // #ifdef ASSERT |
412 |
463 |
413 private: |
464 private: |
|
465 bool initialize_block_data(); |
414 bool initialize_region_data(size_t region_size); |
466 bool initialize_region_data(size_t region_size); |
415 PSVirtualSpace* create_vspace(size_t count, size_t element_size); |
467 PSVirtualSpace* create_vspace(size_t count, size_t element_size); |
416 |
468 |
417 private: |
469 private: |
418 HeapWord* _region_start; |
470 HeapWord* _region_start; |
422 |
474 |
423 PSVirtualSpace* _region_vspace; |
475 PSVirtualSpace* _region_vspace; |
424 size_t _reserved_byte_size; |
476 size_t _reserved_byte_size; |
425 RegionData* _region_data; |
477 RegionData* _region_data; |
426 size_t _region_count; |
478 size_t _region_count; |
|
479 |
|
480 PSVirtualSpace* _block_vspace; |
|
481 BlockData* _block_data; |
|
482 size_t _block_count; |
427 }; |
483 }; |
428 |
484 |
429 inline uint |
485 inline uint |
430 ParallelCompactData::RegionData::destination_count_raw() const |
486 ParallelCompactData::RegionData::destination_count_raw() const |
431 { |
487 { |
434 |
490 |
435 inline uint |
491 inline uint |
436 ParallelCompactData::RegionData::destination_count() const |
492 ParallelCompactData::RegionData::destination_count() const |
437 { |
493 { |
438 return destination_count_raw() >> dc_shift; |
494 return destination_count_raw() >> dc_shift; |
|
495 } |
|
496 |
|
497 inline bool |
|
498 ParallelCompactData::RegionData::blocks_filled() const |
|
499 { |
|
500 return _blocks_filled; |
|
501 } |
|
502 |
|
503 #ifdef ASSERT |
|
504 inline size_t |
|
505 ParallelCompactData::RegionData::blocks_filled_count() const |
|
506 { |
|
507 return _blocks_filled_count; |
|
508 } |
|
509 #endif // #ifdef ASSERT |
|
510 |
|
511 inline void |
|
512 ParallelCompactData::RegionData::set_blocks_filled() |
|
513 { |
|
514 _blocks_filled = true; |
|
515 // Debug builds count the number of times the table was filled. |
|
516 DEBUG_ONLY(Atomic::inc_ptr(&_blocks_filled_count)); |
439 } |
517 } |
440 |
518 |
441 inline void |
519 inline void |
442 ParallelCompactData::RegionData::set_destination_count(uint count) |
520 ParallelCompactData::RegionData::set_destination_count(uint count) |
443 { |
521 { |
530 assert(region_ptr >= _region_data, "bad arg"); |
608 assert(region_ptr >= _region_data, "bad arg"); |
531 assert(region_ptr <= _region_data + region_count(), "bad arg"); |
609 assert(region_ptr <= _region_data + region_count(), "bad arg"); |
532 return pointer_delta(region_ptr, _region_data, sizeof(RegionData)); |
610 return pointer_delta(region_ptr, _region_data, sizeof(RegionData)); |
533 } |
611 } |
534 |
612 |
|
613 inline ParallelCompactData::BlockData* |
|
614 ParallelCompactData::block(size_t n) const { |
|
615 assert(n < block_count(), "bad arg"); |
|
616 return _block_data + n; |
|
617 } |
|
618 |
535 inline size_t |
619 inline size_t |
536 ParallelCompactData::region_offset(const HeapWord* addr) const |
620 ParallelCompactData::region_offset(const HeapWord* addr) const |
537 { |
621 { |
538 assert(addr >= _region_start, "bad addr"); |
622 assert(addr >= _region_start, "bad addr"); |
539 assert(addr <= _region_end, "bad addr"); |
623 assert(addr <= _region_end, "bad addr"); |
594 |
678 |
595 inline bool |
679 inline bool |
596 ParallelCompactData::is_region_aligned(HeapWord* addr) const |
680 ParallelCompactData::is_region_aligned(HeapWord* addr) const |
597 { |
681 { |
598 return region_offset(addr) == 0; |
682 return region_offset(addr) == 0; |
|
683 } |
|
684 |
|
685 inline size_t |
|
686 ParallelCompactData::block_offset(const HeapWord* addr) const |
|
687 { |
|
688 assert(addr >= _region_start, "bad addr"); |
|
689 assert(addr <= _region_end, "bad addr"); |
|
690 return (size_t(addr) & BlockAddrOffsetMask) >> LogHeapWordSize; |
|
691 } |
|
692 |
|
693 inline size_t |
|
694 ParallelCompactData::addr_to_block_idx(const HeapWord* addr) const |
|
695 { |
|
696 assert(addr >= _region_start, "bad addr"); |
|
697 assert(addr <= _region_end, "bad addr"); |
|
698 return pointer_delta(addr, _region_start) >> Log2BlockSize; |
|
699 } |
|
700 |
|
701 inline ParallelCompactData::BlockData* |
|
702 ParallelCompactData::addr_to_block_ptr(const HeapWord* addr) const |
|
703 { |
|
704 return block(addr_to_block_idx(addr)); |
|
705 } |
|
706 |
|
707 inline HeapWord* |
|
708 ParallelCompactData::block_to_addr(size_t block) const |
|
709 { |
|
710 assert(block < _block_count, "block out of range"); |
|
711 return _region_start + (block << Log2BlockSize); |
|
712 } |
|
713 |
|
714 inline size_t |
|
715 ParallelCompactData::region_to_block_idx(size_t region) const |
|
716 { |
|
717 return region << Log2BlocksPerRegion; |
|
718 } |
|
719 |
|
720 inline HeapWord* |
|
721 ParallelCompactData::block_align_down(HeapWord* addr) const |
|
722 { |
|
723 assert(addr >= _region_start, "bad addr"); |
|
724 assert(addr < _region_end + RegionSize, "bad addr"); |
|
725 return (HeapWord*)(size_t(addr) & BlockAddrMask); |
|
726 } |
|
727 |
|
728 inline HeapWord* |
|
729 ParallelCompactData::block_align_up(HeapWord* addr) const |
|
730 { |
|
731 assert(addr >= _region_start, "bad addr"); |
|
732 assert(addr <= _region_end, "bad addr"); |
|
733 return block_align_down(addr + BlockSizeOffsetMask); |
|
734 } |
|
735 |
|
736 inline bool |
|
737 ParallelCompactData::is_block_aligned(HeapWord* addr) const |
|
738 { |
|
739 return block_offset(addr) == 0; |
599 } |
740 } |
600 |
741 |
601 // Abstract closure for use with ParMarkBitMap::iterate(), which will invoke the |
742 // Abstract closure for use with ParMarkBitMap::iterate(), which will invoke the |
602 // do_addr() method. |
743 // do_addr() method. |
603 // |
744 // |
773 class PSParallelCompact : AllStatic { |
914 class PSParallelCompact : AllStatic { |
774 public: |
915 public: |
775 // Convenient access to type names. |
916 // Convenient access to type names. |
776 typedef ParMarkBitMap::idx_t idx_t; |
917 typedef ParMarkBitMap::idx_t idx_t; |
777 typedef ParallelCompactData::RegionData RegionData; |
918 typedef ParallelCompactData::RegionData RegionData; |
|
919 typedef ParallelCompactData::BlockData BlockData; |
778 |
920 |
779 typedef enum { |
921 typedef enum { |
780 old_space_id, eden_space_id, |
922 old_space_id, eden_space_id, |
781 from_space_id, to_space_id, last_space_id |
923 from_space_id, to_space_id, last_space_id |
782 } SpaceId; |
924 } SpaceId; |
960 static void summary_phase(ParCompactionManager* cm, bool maximum_compaction); |
1102 static void summary_phase(ParCompactionManager* cm, bool maximum_compaction); |
961 |
1103 |
962 // Adjust addresses in roots. Does not adjust addresses in heap. |
1104 // Adjust addresses in roots. Does not adjust addresses in heap. |
963 static void adjust_roots(); |
1105 static void adjust_roots(); |
964 |
1106 |
|
1107 DEBUG_ONLY(static void write_block_fill_histogram(outputStream* const out);) |
|
1108 |
965 // Move objects to new locations. |
1109 // Move objects to new locations. |
966 static void compact_perm(ParCompactionManager* cm); |
1110 static void compact_perm(ParCompactionManager* cm); |
967 static void compact(); |
1111 static void compact(); |
968 |
1112 |
969 // Add available regions to the stack and draining tasks to the task queue. |
1113 // Add available regions to the stack and draining tasks to the task queue. |
1125 // Fill a region, copying objects from one or more source regions. |
1269 // Fill a region, copying objects from one or more source regions. |
1126 static void fill_region(ParCompactionManager* cm, size_t region_idx); |
1270 static void fill_region(ParCompactionManager* cm, size_t region_idx); |
1127 static void fill_and_update_region(ParCompactionManager* cm, size_t region) { |
1271 static void fill_and_update_region(ParCompactionManager* cm, size_t region) { |
1128 fill_region(cm, region); |
1272 fill_region(cm, region); |
1129 } |
1273 } |
|
1274 |
|
1275 // Fill in the block table for the specified region. |
|
1276 static void fill_blocks(size_t region_idx); |
1130 |
1277 |
1131 // Update the deferred objects in the space. |
1278 // Update the deferred objects in the space. |
1132 static void update_deferred_objects(ParCompactionManager* cm, SpaceId id); |
1279 static void update_deferred_objects(ParCompactionManager* cm, SpaceId id); |
1133 |
1280 |
1134 static ParMarkBitMap* mark_bitmap() { return &_mark_bitmap; } |
1281 static ParMarkBitMap* mark_bitmap() { return &_mark_bitmap; } |