141 class G1CollectedHeap : public SharedHeap { |
140 class G1CollectedHeap : public SharedHeap { |
142 friend class VM_G1CollectForAllocation; |
141 friend class VM_G1CollectForAllocation; |
143 friend class VM_GenCollectForPermanentAllocation; |
142 friend class VM_GenCollectForPermanentAllocation; |
144 friend class VM_G1CollectFull; |
143 friend class VM_G1CollectFull; |
145 friend class VM_G1IncCollectionPause; |
144 friend class VM_G1IncCollectionPause; |
146 friend class VM_G1PopRegionCollectionPause; |
|
147 friend class VMStructs; |
145 friend class VMStructs; |
148 |
146 |
149 // Closures used in implementation. |
147 // Closures used in implementation. |
150 friend class G1ParCopyHelper; |
148 friend class G1ParCopyHelper; |
151 friend class G1IsAliveClosure; |
149 friend class G1IsAliveClosure; |
250 bool check_gc_alloc_regions(); |
248 bool check_gc_alloc_regions(); |
251 |
249 |
252 // Outside of GC pauses, the number of bytes used in all regions other |
250 // Outside of GC pauses, the number of bytes used in all regions other |
253 // than the current allocation region. |
251 // than the current allocation region. |
254 size_t _summary_bytes_used; |
252 size_t _summary_bytes_used; |
255 |
|
256 // Summary information about popular objects; method to print it. |
|
257 NumberSeq _pop_obj_rc_at_copy; |
|
258 void print_popularity_summary_info() const; |
|
259 |
253 |
260 // This is used for a quick test on whether a reference points into |
254 // This is used for a quick test on whether a reference points into |
261 // the collection set or not. Basically, we have an array, with one |
255 // the collection set or not. Basically, we have an array, with one |
262 // byte per region, and that byte denotes whether the corresponding |
256 // byte per region, and that byte denotes whether the corresponding |
263 // region is in the collection set or not. The entry corresponding |
257 // region is in the collection set or not. The entry corresponding |
445 // Do an incremental collection: identify a collection set, and evacuate |
439 // Do an incremental collection: identify a collection set, and evacuate |
446 // its live objects elsewhere. |
440 // its live objects elsewhere. |
447 virtual void do_collection_pause(); |
441 virtual void do_collection_pause(); |
448 |
442 |
449 // The guts of the incremental collection pause, executed by the vm |
443 // The guts of the incremental collection pause, executed by the vm |
450 // thread. If "popular_region" is non-NULL, this pause should evacuate |
444 // thread. |
451 // this single region whose remembered set has gotten large, moving |
445 virtual void do_collection_pause_at_safepoint(); |
452 // any popular objects to one of the popular regions. |
|
453 virtual void do_collection_pause_at_safepoint(HeapRegion* popular_region); |
|
454 |
446 |
455 // Actually do the work of evacuating the collection set. |
447 // Actually do the work of evacuating the collection set. |
456 virtual void evacuate_collection_set(); |
448 virtual void evacuate_collection_set(); |
457 |
449 |
458 // If this is an appropriate right time, do a collection pause. |
450 // If this is an appropriate right time, do a collection pause. |
623 G1H_PS_NumElements |
615 G1H_PS_NumElements |
624 }; |
616 }; |
625 |
617 |
626 SubTasksDone* _process_strong_tasks; |
618 SubTasksDone* _process_strong_tasks; |
627 |
619 |
628 // Allocate space to hold a popular object. Result is guaranteed below |
|
629 // "popular_object_boundary()". Note: CURRENTLY halts the system if we |
|
630 // run out of space to hold popular objects. |
|
631 HeapWord* allocate_popular_object(size_t word_size); |
|
632 |
|
633 // The boundary between popular and non-popular objects. |
|
634 HeapWord* _popular_object_boundary; |
|
635 |
|
636 HeapRegionList* _popular_regions_to_be_evacuated; |
|
637 |
|
638 // Compute which objects in "single_region" are popular. If any are, |
|
639 // evacuate them to a popular region, leaving behind forwarding pointers, |
|
640 // and select "popular_region" as the single collection set region. |
|
641 // Otherwise, leave the collection set null. |
|
642 void popularity_pause_preamble(HeapRegion* populer_region); |
|
643 |
|
644 // Compute which objects in "single_region" are popular, and evacuate |
|
645 // them to a popular region, leaving behind forwarding pointers. |
|
646 // Returns "true" if at least one popular object is discovered and |
|
647 // evacuated. In any case, "*max_rc" is set to the maximum reference |
|
648 // count of an object in the region. |
|
649 bool compute_reference_counts_and_evac_popular(HeapRegion* populer_region, |
|
650 size_t* max_rc); |
|
651 // Subroutines used in the above. |
|
652 bool _rc_region_above; |
|
653 size_t _rc_region_diff; |
|
654 jint* obj_rc_addr(oop obj) { |
|
655 uintptr_t obj_addr = (uintptr_t)obj; |
|
656 if (_rc_region_above) { |
|
657 jint* res = (jint*)(obj_addr + _rc_region_diff); |
|
658 assert((uintptr_t)res > obj_addr, "RC region is above."); |
|
659 return res; |
|
660 } else { |
|
661 jint* res = (jint*)(obj_addr - _rc_region_diff); |
|
662 assert((uintptr_t)res < obj_addr, "RC region is below."); |
|
663 return res; |
|
664 } |
|
665 } |
|
666 jint obj_rc(oop obj) { |
|
667 return *obj_rc_addr(obj); |
|
668 } |
|
669 void inc_obj_rc(oop obj) { |
|
670 (*obj_rc_addr(obj))++; |
|
671 } |
|
672 void atomic_inc_obj_rc(oop obj); |
|
673 |
|
674 |
|
675 // Number of popular objects and bytes (latter is cheaper!). |
|
676 size_t pop_object_used_objs(); |
|
677 size_t pop_object_used_bytes(); |
|
678 |
|
679 // Index of the popular region in which allocation is currently being |
|
680 // done. |
|
681 int _cur_pop_hr_index; |
|
682 |
|
683 // List of regions which require zero filling. |
620 // List of regions which require zero filling. |
684 UncleanRegionList _unclean_region_list; |
621 UncleanRegionList _unclean_region_list; |
685 bool _unclean_regions_coming; |
622 bool _unclean_regions_coming; |
686 |
|
687 bool check_age_cohort_well_formed_work(int a, HeapRegion* hr); |
|
688 |
623 |
689 public: |
624 public: |
690 void set_refine_cte_cl_concurrency(bool concurrent); |
625 void set_refine_cte_cl_concurrency(bool concurrent); |
691 |
626 |
692 RefToScanQueue *task_queue(int i); |
627 RefToScanQueue *task_queue(int i); |
1064 |
999 |
1065 // The boundary between a "large" and "small" array of primitives, in |
1000 // The boundary between a "large" and "small" array of primitives, in |
1066 // words. |
1001 // words. |
1067 virtual size_t large_typearray_limit(); |
1002 virtual size_t large_typearray_limit(); |
1068 |
1003 |
1069 // All popular objects are guaranteed to have addresses below this |
|
1070 // boundary. |
|
1071 HeapWord* popular_object_boundary() { |
|
1072 return _popular_object_boundary; |
|
1073 } |
|
1074 |
|
1075 // Declare the region as one that should be evacuated because its |
|
1076 // remembered set is too large. |
|
1077 void schedule_popular_region_evac(HeapRegion* r); |
|
1078 // If there is a popular region to evacuate it, remove it from the list |
|
1079 // and return it. |
|
1080 HeapRegion* popular_region_to_evac(); |
|
1081 // Evacuate the given popular region. |
|
1082 void evac_popular_region(HeapRegion* r); |
|
1083 |
|
1084 // Returns "true" iff the given word_size is "very large". |
1004 // Returns "true" iff the given word_size is "very large". |
1085 static bool isHumongous(size_t word_size) { |
1005 static bool isHumongous(size_t word_size) { |
1086 return word_size >= VeryLargeInWords; |
1006 return word_size >= VeryLargeInWords; |
1087 } |
1007 } |
1088 |
1008 |