src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp

changeset 3268
8aae2050e83e
parent 3219
c6a6e936dc68
child 3269
53074c2c4600
equal deleted inserted replaced
3267:ed80554efa25 3268:8aae2050e83e
1201 1201
1202 if (PrintHeapAtGC) { 1202 if (PrintHeapAtGC) {
1203 Universe::print_heap_before_gc(); 1203 Universe::print_heap_before_gc();
1204 } 1204 }
1205 1205
1206 HRSPhaseSetter x(HRSPhaseFullGC);
1206 verify_region_sets_optional(); 1207 verify_region_sets_optional();
1207 1208
1208 const bool do_clear_all_soft_refs = clear_all_soft_refs || 1209 const bool do_clear_all_soft_refs = clear_all_soft_refs ||
1209 collector_policy()->should_clear_all_soft_refs(); 1210 collector_policy()->should_clear_all_soft_refs();
1210 1211
1261 1262
1262 // Make sure we'll choose a new allocation region afterwards. 1263 // Make sure we'll choose a new allocation region afterwards.
1263 release_mutator_alloc_region(); 1264 release_mutator_alloc_region();
1264 abandon_gc_alloc_regions(); 1265 abandon_gc_alloc_regions();
1265 g1_rem_set()->cleanupHRRS(); 1266 g1_rem_set()->cleanupHRRS();
1266 tear_down_region_lists();
1267 1267
1268 // We should call this after we retire any currently active alloc 1268 // We should call this after we retire any currently active alloc
1269 // regions so that all the ALLOC / RETIRE events are generated 1269 // regions so that all the ALLOC / RETIRE events are generated
1270 // before the start GC event. 1270 // before the start GC event.
1271 _hr_printer.start_gc(true /* full */, (size_t) total_collections()); 1271 _hr_printer.start_gc(true /* full */, (size_t) total_collections());
1276 // after this full GC. 1276 // after this full GC.
1277 abandon_collection_set(g1_policy()->inc_cset_head()); 1277 abandon_collection_set(g1_policy()->inc_cset_head());
1278 g1_policy()->clear_incremental_cset(); 1278 g1_policy()->clear_incremental_cset();
1279 g1_policy()->stop_incremental_cset_building(); 1279 g1_policy()->stop_incremental_cset_building();
1280 1280
1281 empty_young_list(); 1281 tear_down_region_sets(false /* free_list_only */);
1282 g1_policy()->set_full_young_gcs(true); 1282 g1_policy()->set_full_young_gcs(true);
1283 1283
1284 // See the comments in g1CollectedHeap.hpp and 1284 // See the comments in g1CollectedHeap.hpp and
1285 // G1CollectedHeap::ref_processing_init() about 1285 // G1CollectedHeap::ref_processing_init() about
1286 // how reference processing currently works in G1. 1286 // how reference processing currently works in G1.
1299 HandleMark hm; // Discard invalid handles created during gc 1299 HandleMark hm; // Discard invalid handles created during gc
1300 G1MarkSweep::invoke_at_safepoint(ref_processor_stw(), do_clear_all_soft_refs); 1300 G1MarkSweep::invoke_at_safepoint(ref_processor_stw(), do_clear_all_soft_refs);
1301 } 1301 }
1302 1302
1303 assert(free_regions() == 0, "we should not have added any free regions"); 1303 assert(free_regions() == 0, "we should not have added any free regions");
1304 rebuild_region_lists(); 1304 rebuild_region_sets(false /* free_list_only */);
1305
1306 _summary_bytes_used = recalculate_used();
1307 1305
1308 // Enqueue any discovered reference objects that have 1306 // Enqueue any discovered reference objects that have
1309 // not been removed from the discovered lists. 1307 // not been removed from the discovered lists.
1310 ref_processor_stw()->enqueue_discovered_references(); 1308 ref_processor_stw()->enqueue_discovered_references();
1311 1309
1762 abandon_gc_alloc_regions(); 1760 abandon_gc_alloc_regions();
1763 1761
1764 // Instead of tearing down / rebuilding the free lists here, we 1762 // Instead of tearing down / rebuilding the free lists here, we
1765 // could instead use the remove_all_pending() method on free_list to 1763 // could instead use the remove_all_pending() method on free_list to
1766 // remove only the ones that we need to remove. 1764 // remove only the ones that we need to remove.
1767 tear_down_region_lists(); // We will rebuild them in a moment. 1765 tear_down_region_sets(true /* free_list_only */);
1768 shrink_helper(shrink_bytes); 1766 shrink_helper(shrink_bytes);
1769 rebuild_region_lists(); 1767 rebuild_region_sets(true /* free_list_only */);
1770 1768
1771 _hrs.verify_optional(); 1769 _hrs.verify_optional();
1772 verify_region_sets_optional(); 1770 verify_region_sets_optional();
1773 } 1771 }
1774 1772
1797 _g1mm(NULL), 1795 _g1mm(NULL),
1798 _refine_cte_cl(NULL), 1796 _refine_cte_cl(NULL),
1799 _full_collection(false), 1797 _full_collection(false),
1800 _free_list("Master Free List"), 1798 _free_list("Master Free List"),
1801 _secondary_free_list("Secondary Free List"), 1799 _secondary_free_list("Secondary Free List"),
1800 _old_set("Old Set"),
1802 _humongous_set("Master Humongous Set"), 1801 _humongous_set("Master Humongous Set"),
1803 _free_regions_coming(false), 1802 _free_regions_coming(false),
1804 _young_list(new YoungList(this)), 1803 _young_list(new YoungList(this)),
1805 _gc_time_stamp(0), 1804 _gc_time_stamp(0),
1806 _retained_old_gc_alloc_region(NULL), 1805 _retained_old_gc_alloc_region(NULL),
3350 3349
3351 if (PrintHeapAtGC) { 3350 if (PrintHeapAtGC) {
3352 Universe::print_heap_before_gc(); 3351 Universe::print_heap_before_gc();
3353 } 3352 }
3354 3353
3354 HRSPhaseSetter x(HRSPhaseEvacuation);
3355 verify_region_sets_optional(); 3355 verify_region_sets_optional();
3356 verify_dirty_young_regions(); 3356 verify_dirty_young_regions();
3357 3357
3358 { 3358 {
3359 // This call will decide whether this pause is an initial-mark 3359 // This call will decide whether this pause is an initial-mark
3772 !retained_region->in_collection_set() && 3772 !retained_region->in_collection_set() &&
3773 !(retained_region->top() == retained_region->end()) && 3773 !(retained_region->top() == retained_region->end()) &&
3774 !retained_region->is_empty() && 3774 !retained_region->is_empty() &&
3775 !retained_region->isHumongous()) { 3775 !retained_region->isHumongous()) {
3776 retained_region->set_saved_mark(); 3776 retained_region->set_saved_mark();
3777 // The retained region was added to the old region set when it was
3778 // retired. We have to remove it now, since we don't allow regions
3779 // we allocate to in the region sets. We'll re-add it later, when
3780 // it's retired again.
3781 _old_set.remove(retained_region);
3777 _old_gc_alloc_region.set(retained_region); 3782 _old_gc_alloc_region.set(retained_region);
3778 _hr_printer.reuse(retained_region); 3783 _hr_printer.reuse(retained_region);
3779 } 3784 }
3780 } 3785 }
3781 3786
5336 } 5341 }
5337 5342
5338 void G1CollectedHeap::free_region_if_empty(HeapRegion* hr, 5343 void G1CollectedHeap::free_region_if_empty(HeapRegion* hr,
5339 size_t* pre_used, 5344 size_t* pre_used,
5340 FreeRegionList* free_list, 5345 FreeRegionList* free_list,
5346 OldRegionSet* old_proxy_set,
5341 HumongousRegionSet* humongous_proxy_set, 5347 HumongousRegionSet* humongous_proxy_set,
5342 HRRSCleanupTask* hrrs_cleanup_task, 5348 HRRSCleanupTask* hrrs_cleanup_task,
5343 bool par) { 5349 bool par) {
5344 if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) { 5350 if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) {
5345 if (hr->isHumongous()) { 5351 if (hr->isHumongous()) {
5346 assert(hr->startsHumongous(), "we should only see starts humongous"); 5352 assert(hr->startsHumongous(), "we should only see starts humongous");
5347 free_humongous_region(hr, pre_used, free_list, humongous_proxy_set, par); 5353 free_humongous_region(hr, pre_used, free_list, humongous_proxy_set, par);
5348 } else { 5354 } else {
5355 _old_set.remove_with_proxy(hr, old_proxy_set);
5349 free_region(hr, pre_used, free_list, par); 5356 free_region(hr, pre_used, free_list, par);
5350 } 5357 }
5351 } else { 5358 } else {
5352 hr->rem_set()->do_cleanup_work(hrrs_cleanup_task); 5359 hr->rem_set()->do_cleanup_work(hrrs_cleanup_task);
5353 } 5360 }
5400 *pre_used += hr_pre_used; 5407 *pre_used += hr_pre_used;
5401 } 5408 }
5402 5409
5403 void G1CollectedHeap::update_sets_after_freeing_regions(size_t pre_used, 5410 void G1CollectedHeap::update_sets_after_freeing_regions(size_t pre_used,
5404 FreeRegionList* free_list, 5411 FreeRegionList* free_list,
5412 OldRegionSet* old_proxy_set,
5405 HumongousRegionSet* humongous_proxy_set, 5413 HumongousRegionSet* humongous_proxy_set,
5406 bool par) { 5414 bool par) {
5407 if (pre_used > 0) { 5415 if (pre_used > 0) {
5408 Mutex* lock = (par) ? ParGCRareEvent_lock : NULL; 5416 Mutex* lock = (par) ? ParGCRareEvent_lock : NULL;
5409 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag); 5417 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
5414 _summary_bytes_used -= pre_used; 5422 _summary_bytes_used -= pre_used;
5415 } 5423 }
5416 if (free_list != NULL && !free_list->is_empty()) { 5424 if (free_list != NULL && !free_list->is_empty()) {
5417 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag); 5425 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
5418 _free_list.add_as_head(free_list); 5426 _free_list.add_as_head(free_list);
5427 }
5428 if (old_proxy_set != NULL && !old_proxy_set->is_empty()) {
5429 MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag);
5430 _old_set.update_from_proxy(old_proxy_set);
5419 } 5431 }
5420 if (humongous_proxy_set != NULL && !humongous_proxy_set->is_empty()) { 5432 if (humongous_proxy_set != NULL && !humongous_proxy_set->is_empty()) {
5421 MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag); 5433 MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag);
5422 _humongous_set.update_from_proxy(humongous_proxy_set); 5434 _humongous_set.update_from_proxy(humongous_proxy_set);
5423 } 5435 }
5612 cur->uninstall_surv_rate_group(); 5624 cur->uninstall_surv_rate_group();
5613 if (cur->is_young()) 5625 if (cur->is_young())
5614 cur->set_young_index_in_cset(-1); 5626 cur->set_young_index_in_cset(-1);
5615 cur->set_not_young(); 5627 cur->set_not_young();
5616 cur->set_evacuation_failed(false); 5628 cur->set_evacuation_failed(false);
5629 // The region is now considered to be old.
5630 _old_set.add(cur);
5617 } 5631 }
5618 cur = next; 5632 cur = next;
5619 } 5633 }
5620 5634
5621 policy->record_max_rs_lengths(rs_lengths); 5635 policy->record_max_rs_lengths(rs_lengths);
5627 non_young_time_ms += elapsed_ms; 5641 non_young_time_ms += elapsed_ms;
5628 else 5642 else
5629 young_time_ms += elapsed_ms; 5643 young_time_ms += elapsed_ms;
5630 5644
5631 update_sets_after_freeing_regions(pre_used, &local_free_list, 5645 update_sets_after_freeing_regions(pre_used, &local_free_list,
5646 NULL /* old_proxy_set */,
5632 NULL /* humongous_proxy_set */, 5647 NULL /* humongous_proxy_set */,
5633 false /* par */); 5648 false /* par */);
5634 policy->record_young_free_cset_time_ms(young_time_ms); 5649 policy->record_young_free_cset_time_ms(young_time_ms);
5635 policy->record_non_young_free_cset_time_ms(non_young_time_ms); 5650 policy->record_non_young_free_cset_time_ms(non_young_time_ms);
5636 } 5651 }
5738 } 5753 }
5739 5754
5740 return ret; 5755 return ret;
5741 } 5756 }
5742 5757
5743 void G1CollectedHeap::empty_young_list() { 5758 class TearDownRegionSetsClosure : public HeapRegionClosure {
5744 assert(heap_lock_held_for_gc(), 5759 private:
5745 "the heap lock should already be held by or for this thread"); 5760 OldRegionSet *_old_set;
5746 5761
5747 _young_list->empty_list(); 5762 public:
5748 } 5763 TearDownRegionSetsClosure(OldRegionSet* old_set) : _old_set(old_set) { }
5749 5764
5750 // Done at the start of full GC. 5765 bool doHeapRegion(HeapRegion* r) {
5751 void G1CollectedHeap::tear_down_region_lists() { 5766 if (r->is_empty()) {
5767 // We ignore empty regions, we'll empty the free list afterwards
5768 } else if (r->is_young()) {
5769 // We ignore young regions, we'll empty the young list afterwards
5770 } else if (r->isHumongous()) {
5771 // We ignore humongous regions, we're not tearing down the
5772 // humongous region set
5773 } else {
5774 // The rest should be old
5775 _old_set->remove(r);
5776 }
5777 return false;
5778 }
5779
5780 ~TearDownRegionSetsClosure() {
5781 assert(_old_set->is_empty(), "post-condition");
5782 }
5783 };
5784
5785 void G1CollectedHeap::tear_down_region_sets(bool free_list_only) {
5786 assert_at_safepoint(true /* should_be_vm_thread */);
5787
5788 if (!free_list_only) {
5789 TearDownRegionSetsClosure cl(&_old_set);
5790 heap_region_iterate(&cl);
5791
5792 // Need to do this after the heap iteration to be able to
5793 // recognize the young regions and ignore them during the iteration.
5794 _young_list->empty_list();
5795 }
5752 _free_list.remove_all(); 5796 _free_list.remove_all();
5753 } 5797 }
5754 5798
5755 class RegionResetter: public HeapRegionClosure { 5799 class RebuildRegionSetsClosure : public HeapRegionClosure {
5756 G1CollectedHeap* _g1h; 5800 private:
5757 FreeRegionList _local_free_list; 5801 bool _free_list_only;
5802 OldRegionSet* _old_set;
5803 FreeRegionList* _free_list;
5804 size_t _total_used;
5758 5805
5759 public: 5806 public:
5760 RegionResetter() : _g1h(G1CollectedHeap::heap()), 5807 RebuildRegionSetsClosure(bool free_list_only,
5761 _local_free_list("Local Free List for RegionResetter") { } 5808 OldRegionSet* old_set, FreeRegionList* free_list) :
5809 _free_list_only(free_list_only),
5810 _old_set(old_set), _free_list(free_list), _total_used(0) {
5811 assert(_free_list->is_empty(), "pre-condition");
5812 if (!free_list_only) {
5813 assert(_old_set->is_empty(), "pre-condition");
5814 }
5815 }
5762 5816
5763 bool doHeapRegion(HeapRegion* r) { 5817 bool doHeapRegion(HeapRegion* r) {
5764 if (r->continuesHumongous()) return false; 5818 if (r->continuesHumongous()) {
5765 if (r->top() > r->bottom()) { 5819 return false;
5766 if (r->top() < r->end()) { 5820 }
5767 Copy::fill_to_words(r->top(), 5821
5768 pointer_delta(r->end(), r->top())); 5822 if (r->is_empty()) {
5823 // Add free regions to the free list
5824 _free_list->add_as_tail(r);
5825 } else if (!_free_list_only) {
5826 assert(!r->is_young(), "we should not come across young regions");
5827
5828 if (r->isHumongous()) {
5829 // We ignore humongous regions, we left the humongous set unchanged
5830 } else {
5831 // The rest should be old, add them to the old set
5832 _old_set->add(r);
5769 } 5833 }
5770 } else { 5834 _total_used += r->used();
5771 assert(r->is_empty(), "tautology"); 5835 }
5772 _local_free_list.add_as_tail(r); 5836
5773 }
5774 return false; 5837 return false;
5775 } 5838 }
5776 5839
5777 void update_free_lists() { 5840 size_t total_used() {
5778 _g1h->update_sets_after_freeing_regions(0, &_local_free_list, NULL, 5841 return _total_used;
5779 false /* par */);
5780 } 5842 }
5781 }; 5843 };
5782 5844
5783 // Done at the end of full GC. 5845 void G1CollectedHeap::rebuild_region_sets(bool free_list_only) {
5784 void G1CollectedHeap::rebuild_region_lists() { 5846 assert_at_safepoint(true /* should_be_vm_thread */);
5785 // This needs to go at the end of the full GC. 5847
5786 RegionResetter rs; 5848 RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_free_list);
5787 heap_region_iterate(&rs); 5849 heap_region_iterate(&cl);
5788 rs.update_free_lists(); 5850
5851 if (!free_list_only) {
5852 _summary_bytes_used = cl.total_used();
5853 }
5854 assert(_summary_bytes_used == recalculate_used(),
5855 err_msg("inconsistent _summary_bytes_used, "
5856 "value: "SIZE_FORMAT" recalculated: "SIZE_FORMAT,
5857 _summary_bytes_used, recalculate_used()));
5789 } 5858 }
5790 5859
5791 void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) { 5860 void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) {
5792 _refine_cte_cl->set_concurrent(concurrent); 5861 _refine_cte_cl->set_concurrent(concurrent);
5793 } 5862 }
5880 GCAllocPurpose ap) { 5949 GCAllocPurpose ap) {
5881 alloc_region->note_end_of_copying(); 5950 alloc_region->note_end_of_copying();
5882 g1_policy()->record_bytes_copied_during_gc(allocated_bytes); 5951 g1_policy()->record_bytes_copied_during_gc(allocated_bytes);
5883 if (ap == GCAllocForSurvived) { 5952 if (ap == GCAllocForSurvived) {
5884 young_list()->add_survivor_region(alloc_region); 5953 young_list()->add_survivor_region(alloc_region);
5954 } else {
5955 _old_set.add(alloc_region);
5885 } 5956 }
5886 _hr_printer.retire(alloc_region); 5957 _hr_printer.retire(alloc_region);
5887 } 5958 }
5888 5959
5889 HeapRegion* SurvivorGCAllocRegion::allocate_new_region(size_t word_size, 5960 HeapRegion* SurvivorGCAllocRegion::allocate_new_region(size_t word_size,
5911 } 5982 }
5912 // Heap region set verification 5983 // Heap region set verification
5913 5984
5914 class VerifyRegionListsClosure : public HeapRegionClosure { 5985 class VerifyRegionListsClosure : public HeapRegionClosure {
5915 private: 5986 private:
5987 FreeRegionList* _free_list;
5988 OldRegionSet* _old_set;
5916 HumongousRegionSet* _humongous_set; 5989 HumongousRegionSet* _humongous_set;
5917 FreeRegionList* _free_list;
5918 size_t _region_count; 5990 size_t _region_count;
5919 5991
5920 public: 5992 public:
5921 VerifyRegionListsClosure(HumongousRegionSet* humongous_set, 5993 VerifyRegionListsClosure(OldRegionSet* old_set,
5994 HumongousRegionSet* humongous_set,
5922 FreeRegionList* free_list) : 5995 FreeRegionList* free_list) :
5923 _humongous_set(humongous_set), _free_list(free_list), 5996 _old_set(old_set), _humongous_set(humongous_set),
5924 _region_count(0) { } 5997 _free_list(free_list), _region_count(0) { }
5925 5998
5926 size_t region_count() { return _region_count; } 5999 size_t region_count() { return _region_count; }
5927 6000
5928 bool doHeapRegion(HeapRegion* hr) { 6001 bool doHeapRegion(HeapRegion* hr) {
5929 _region_count += 1; 6002 _region_count += 1;
5936 // TODO 6009 // TODO
5937 } else if (hr->startsHumongous()) { 6010 } else if (hr->startsHumongous()) {
5938 _humongous_set->verify_next_region(hr); 6011 _humongous_set->verify_next_region(hr);
5939 } else if (hr->is_empty()) { 6012 } else if (hr->is_empty()) {
5940 _free_list->verify_next_region(hr); 6013 _free_list->verify_next_region(hr);
6014 } else {
6015 _old_set->verify_next_region(hr);
5941 } 6016 }
5942 return false; 6017 return false;
5943 } 6018 }
5944 }; 6019 };
5945 6020
5962 // the secondary free list we have to take the lock before 6037 // the secondary free list we have to take the lock before
5963 // verifying it. 6038 // verifying it.
5964 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); 6039 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
5965 _secondary_free_list.verify(); 6040 _secondary_free_list.verify();
5966 } 6041 }
6042 _old_set.verify();
5967 _humongous_set.verify(); 6043 _humongous_set.verify();
5968 6044
5969 // If a concurrent region freeing operation is in progress it will 6045 // If a concurrent region freeing operation is in progress it will
5970 // be difficult to correctly attributed any free regions we come 6046 // be difficult to correctly attributed any free regions we come
5971 // across to the correct free list given that they might belong to 6047 // across to the correct free list given that they might belong to
5985 // attributed to the free_list. 6061 // attributed to the free_list.
5986 append_secondary_free_list_if_not_empty_with_lock(); 6062 append_secondary_free_list_if_not_empty_with_lock();
5987 6063
5988 // Finally, make sure that the region accounting in the lists is 6064 // Finally, make sure that the region accounting in the lists is
5989 // consistent with what we see in the heap. 6065 // consistent with what we see in the heap.
6066 _old_set.verify_start();
5990 _humongous_set.verify_start(); 6067 _humongous_set.verify_start();
5991 _free_list.verify_start(); 6068 _free_list.verify_start();
5992 6069
5993 VerifyRegionListsClosure cl(&_humongous_set, &_free_list); 6070 VerifyRegionListsClosure cl(&_old_set, &_humongous_set, &_free_list);
5994 heap_region_iterate(&cl); 6071 heap_region_iterate(&cl);
5995 6072
6073 _old_set.verify_end();
5996 _humongous_set.verify_end(); 6074 _humongous_set.verify_end();
5997 _free_list.verify_end(); 6075 _free_list.verify_end();
5998 } 6076 }

mercurial