658 // We will set up the first region as "starts humongous". This |
659 // We will set up the first region as "starts humongous". This |
659 // will also update the BOT covering all the regions to reflect |
660 // will also update the BOT covering all the regions to reflect |
660 // that there is a single object that starts at the bottom of the |
661 // that there is a single object that starts at the bottom of the |
661 // first region. |
662 // first region. |
662 first_hr->set_startsHumongous(new_top, new_end); |
663 first_hr->set_startsHumongous(new_top, new_end); |
663 |
664 first_hr->set_allocation_context(context); |
664 // Then, if there are any, we will set up the "continues |
665 // Then, if there are any, we will set up the "continues |
665 // humongous" regions. |
666 // humongous" regions. |
666 HeapRegion* hr = NULL; |
667 HeapRegion* hr = NULL; |
667 for (uint i = first + 1; i < last; ++i) { |
668 for (uint i = first + 1; i < last; ++i) { |
668 hr = region_at(i); |
669 hr = region_at(i); |
669 hr->set_continuesHumongous(first_hr); |
670 hr->set_continuesHumongous(first_hr); |
|
671 hr->set_allocation_context(context); |
670 } |
672 } |
671 // If we have "continues humongous" regions (hr != NULL), then the |
673 // If we have "continues humongous" regions (hr != NULL), then the |
672 // end of the last one should match new_end. |
674 // end of the last one should match new_end. |
673 assert(hr == NULL || hr->end() == new_end, "sanity"); |
675 assert(hr == NULL || hr->end() == new_end, "sanity"); |
674 |
676 |
731 assert(hr == NULL || |
733 assert(hr == NULL || |
732 (hr->end() == new_end && hr->top() == new_top), "sanity"); |
734 (hr->end() == new_end && hr->top() == new_top), "sanity"); |
733 check_bitmaps("Humongous Region Allocation", first_hr); |
735 check_bitmaps("Humongous Region Allocation", first_hr); |
734 |
736 |
735 assert(first_hr->used() == word_size * HeapWordSize, "invariant"); |
737 assert(first_hr->used() == word_size * HeapWordSize, "invariant"); |
736 _summary_bytes_used += first_hr->used(); |
738 _allocator->increase_used(first_hr->used()); |
737 _humongous_set.add(first_hr); |
739 _humongous_set.add(first_hr); |
738 |
740 |
739 return new_obj; |
741 return new_obj; |
740 } |
742 } |
741 |
743 |
742 // If could fit into free regions w/o expansion, try. |
744 // If could fit into free regions w/o expansion, try. |
743 // Otherwise, if can expand, do so. |
745 // Otherwise, if can expand, do so. |
744 // Otherwise, if using ex regions might help, try with ex given back. |
746 // Otherwise, if using ex regions might help, try with ex given back. |
745 HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) { |
747 HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size, AllocationContext_t context) { |
746 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); |
748 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); |
747 |
749 |
748 verify_region_sets_optional(); |
750 verify_region_sets_optional(); |
749 |
751 |
750 uint first = G1_NO_HRM_INDEX; |
752 uint first = G1_NO_HRM_INDEX; |
808 } |
810 } |
809 } |
811 } |
810 |
812 |
811 HeapWord* result = NULL; |
813 HeapWord* result = NULL; |
812 if (first != G1_NO_HRM_INDEX) { |
814 if (first != G1_NO_HRM_INDEX) { |
813 result = humongous_obj_allocate_initialize_regions(first, obj_regions, word_size); |
815 result = humongous_obj_allocate_initialize_regions(first, obj_regions, |
|
816 word_size, context); |
814 assert(result != NULL, "it should always return a valid result"); |
817 assert(result != NULL, "it should always return a valid result"); |
815 |
818 |
816 // A successful humongous object allocation changes the used space |
819 // A successful humongous object allocation changes the used space |
817 // information of the old generation so we need to recalculate the |
820 // information of the old generation so we need to recalculate the |
818 // sizes and update the jstat counters here. |
821 // sizes and update the jstat counters here. |
887 ShouldNotReachHere(); |
892 ShouldNotReachHere(); |
888 return NULL; |
893 return NULL; |
889 } |
894 } |
890 |
895 |
891 HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size, |
896 HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size, |
892 unsigned int *gc_count_before_ret, |
897 AllocationContext_t context, |
893 int* gclocker_retry_count_ret) { |
898 unsigned int *gc_count_before_ret, |
|
899 int* gclocker_retry_count_ret) { |
894 // Make sure you read the note in attempt_allocation_humongous(). |
900 // Make sure you read the note in attempt_allocation_humongous(). |
895 |
901 |
896 assert_heap_not_locked_and_not_at_safepoint(); |
902 assert_heap_not_locked_and_not_at_safepoint(); |
897 assert(!isHumongous(word_size), "attempt_allocation_slow() should not " |
903 assert(!isHumongous(word_size), "attempt_allocation_slow() should not " |
898 "be called for humongous allocation requests"); |
904 "be called for humongous allocation requests"); |
909 bool should_try_gc; |
915 bool should_try_gc; |
910 unsigned int gc_count_before; |
916 unsigned int gc_count_before; |
911 |
917 |
912 { |
918 { |
913 MutexLockerEx x(Heap_lock); |
919 MutexLockerEx x(Heap_lock); |
914 |
920 result = _allocator->mutator_alloc_region(context)->attempt_allocation_locked(word_size, |
915 result = _mutator_alloc_region.attempt_allocation_locked(word_size, |
921 false /* bot_updates */); |
916 false /* bot_updates */); |
|
917 if (result != NULL) { |
922 if (result != NULL) { |
918 return result; |
923 return result; |
919 } |
924 } |
920 |
925 |
921 // If we reach here, attempt_allocation_locked() above failed to |
926 // If we reach here, attempt_allocation_locked() above failed to |
922 // allocate a new region. So the mutator alloc region should be NULL. |
927 // allocate a new region. So the mutator alloc region should be NULL. |
923 assert(_mutator_alloc_region.get() == NULL, "only way to get here"); |
928 assert(_allocator->mutator_alloc_region(context)->get() == NULL, "only way to get here"); |
924 |
929 |
925 if (GC_locker::is_active_and_needs_gc()) { |
930 if (GC_locker::is_active_and_needs_gc()) { |
926 if (g1_policy()->can_expand_young_list()) { |
931 if (g1_policy()->can_expand_young_list()) { |
927 // No need for an ergo verbose message here, |
932 // No need for an ergo verbose message here, |
928 // can_expand_young_list() does this when it returns true. |
933 // can_expand_young_list() does this when it returns true. |
929 result = _mutator_alloc_region.attempt_allocation_force(word_size, |
934 result = _allocator->mutator_alloc_region(context)->attempt_allocation_force(word_size, |
930 false /* bot_updates */); |
935 false /* bot_updates */); |
931 if (result != NULL) { |
936 if (result != NULL) { |
932 return result; |
937 return result; |
933 } |
938 } |
934 } |
939 } |
935 should_try_gc = false; |
940 should_try_gc = false; |
985 // allocation attempt in case another thread successfully |
990 // allocation attempt in case another thread successfully |
986 // performed a collection and reclaimed enough space. We do the |
991 // performed a collection and reclaimed enough space. We do the |
987 // first attempt (without holding the Heap_lock) here and the |
992 // first attempt (without holding the Heap_lock) here and the |
988 // follow-on attempt will be at the start of the next loop |
993 // follow-on attempt will be at the start of the next loop |
989 // iteration (after taking the Heap_lock). |
994 // iteration (after taking the Heap_lock). |
990 result = _mutator_alloc_region.attempt_allocation(word_size, |
995 result = _allocator->mutator_alloc_region(context)->attempt_allocation(word_size, |
991 false /* bot_updates */); |
996 false /* bot_updates */); |
992 if (result != NULL) { |
997 if (result != NULL) { |
993 return result; |
998 return result; |
994 } |
999 } |
995 |
1000 |
996 // Give a warning if we seem to be looping forever. |
1001 // Give a warning if we seem to be looping forever. |
1004 ShouldNotReachHere(); |
1009 ShouldNotReachHere(); |
1005 return NULL; |
1010 return NULL; |
1006 } |
1011 } |
1007 |
1012 |
1008 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size, |
1013 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size, |
1009 unsigned int * gc_count_before_ret, |
1014 unsigned int * gc_count_before_ret, |
1010 int* gclocker_retry_count_ret) { |
1015 int* gclocker_retry_count_ret) { |
1011 // The structure of this method has a lot of similarities to |
1016 // The structure of this method has a lot of similarities to |
1012 // attempt_allocation_slow(). The reason these two were not merged |
1017 // attempt_allocation_slow(). The reason these two were not merged |
1013 // into a single one is that such a method would require several "if |
1018 // into a single one is that such a method would require several "if |
1014 // allocation is not humongous do this, otherwise do that" |
1019 // allocation is not humongous do this, otherwise do that" |
1015 // conditional paths which would obscure its flow. In fact, an early |
1020 // conditional paths which would obscure its flow. In fact, an early |
1122 ShouldNotReachHere(); |
1127 ShouldNotReachHere(); |
1123 return NULL; |
1128 return NULL; |
1124 } |
1129 } |
1125 |
1130 |
1126 HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size, |
1131 HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size, |
1127 bool expect_null_mutator_alloc_region) { |
1132 AllocationContext_t context, |
|
1133 bool expect_null_mutator_alloc_region) { |
1128 assert_at_safepoint(true /* should_be_vm_thread */); |
1134 assert_at_safepoint(true /* should_be_vm_thread */); |
1129 assert(_mutator_alloc_region.get() == NULL || |
1135 assert(_allocator->mutator_alloc_region(context)->get() == NULL || |
1130 !expect_null_mutator_alloc_region, |
1136 !expect_null_mutator_alloc_region, |
1131 "the current alloc region was unexpectedly found to be non-NULL"); |
1137 "the current alloc region was unexpectedly found to be non-NULL"); |
1132 |
1138 |
1133 if (!isHumongous(word_size)) { |
1139 if (!isHumongous(word_size)) { |
1134 return _mutator_alloc_region.attempt_allocation_locked(word_size, |
1140 return _allocator->mutator_alloc_region(context)->attempt_allocation_locked(word_size, |
1135 false /* bot_updates */); |
1141 false /* bot_updates */); |
1136 } else { |
1142 } else { |
1137 HeapWord* result = humongous_obj_allocate(word_size); |
1143 HeapWord* result = humongous_obj_allocate(word_size, context); |
1138 if (result != NULL && g1_policy()->need_to_start_conc_mark("STW humongous allocation")) { |
1144 if (result != NULL && g1_policy()->need_to_start_conc_mark("STW humongous allocation")) { |
1139 g1_policy()->set_initiate_conc_mark_if_possible(); |
1145 g1_policy()->set_initiate_conc_mark_if_possible(); |
1140 } |
1146 } |
1141 return result; |
1147 return result; |
1142 } |
1148 } |
1332 // refinement, if any are in progress. We have to do this before |
1338 // refinement, if any are in progress. We have to do this before |
1333 // wait_until_scan_finished() below. |
1339 // wait_until_scan_finished() below. |
1334 concurrent_mark()->abort(); |
1340 concurrent_mark()->abort(); |
1335 |
1341 |
1336 // Make sure we'll choose a new allocation region afterwards. |
1342 // Make sure we'll choose a new allocation region afterwards. |
1337 release_mutator_alloc_region(); |
1343 _allocator->release_mutator_alloc_region(); |
1338 abandon_gc_alloc_regions(); |
1344 _allocator->abandon_gc_alloc_regions(); |
1339 g1_rem_set()->cleanupHRRS(); |
1345 g1_rem_set()->cleanupHRRS(); |
1340 |
1346 |
1341 // We should call this after we retire any currently active alloc |
1347 // We should call this after we retire any currently active alloc |
1342 // regions so that all the ALLOC / RETIRE events are generated |
1348 // regions so that all the ALLOC / RETIRE events are generated |
1343 // before the start GC event. |
1349 // before the start GC event. |
1643 } |
1649 } |
1644 |
1650 |
1645 |
1651 |
1646 HeapWord* |
1652 HeapWord* |
1647 G1CollectedHeap::satisfy_failed_allocation(size_t word_size, |
1653 G1CollectedHeap::satisfy_failed_allocation(size_t word_size, |
|
1654 AllocationContext_t context, |
1648 bool* succeeded) { |
1655 bool* succeeded) { |
1649 assert_at_safepoint(true /* should_be_vm_thread */); |
1656 assert_at_safepoint(true /* should_be_vm_thread */); |
1650 |
1657 |
1651 *succeeded = true; |
1658 *succeeded = true; |
1652 // Let's attempt the allocation first. |
1659 // Let's attempt the allocation first. |
1653 HeapWord* result = |
1660 HeapWord* result = |
1654 attempt_allocation_at_safepoint(word_size, |
1661 attempt_allocation_at_safepoint(word_size, |
1655 false /* expect_null_mutator_alloc_region */); |
1662 context, |
|
1663 false /* expect_null_mutator_alloc_region */); |
1656 if (result != NULL) { |
1664 if (result != NULL) { |
1657 assert(*succeeded, "sanity"); |
1665 assert(*succeeded, "sanity"); |
1658 return result; |
1666 return result; |
1659 } |
1667 } |
1660 |
1668 |
1661 // In a G1 heap, we're supposed to keep allocation from failing by |
1669 // In a G1 heap, we're supposed to keep allocation from failing by |
1662 // incremental pauses. Therefore, at least for now, we'll favor |
1670 // incremental pauses. Therefore, at least for now, we'll favor |
1663 // expansion over collection. (This might change in the future if we can |
1671 // expansion over collection. (This might change in the future if we can |
1664 // do something smarter than full collection to satisfy a failed alloc.) |
1672 // do something smarter than full collection to satisfy a failed alloc.) |
1665 result = expand_and_allocate(word_size); |
1673 result = expand_and_allocate(word_size, context); |
1666 if (result != NULL) { |
1674 if (result != NULL) { |
1667 assert(*succeeded, "sanity"); |
1675 assert(*succeeded, "sanity"); |
1668 return result; |
1676 return result; |
1669 } |
1677 } |
1670 |
1678 |
1716 // Attempting to expand the heap sufficiently |
1726 // Attempting to expand the heap sufficiently |
1717 // to support an allocation of the given "word_size". If |
1727 // to support an allocation of the given "word_size". If |
1718 // successful, perform the allocation and return the address of the |
1728 // successful, perform the allocation and return the address of the |
1719 // allocated block, or else "NULL". |
1729 // allocated block, or else "NULL". |
1720 |
1730 |
1721 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) { |
1731 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size, AllocationContext_t context) { |
1722 assert_at_safepoint(true /* should_be_vm_thread */); |
1732 assert_at_safepoint(true /* should_be_vm_thread */); |
1723 |
1733 |
1724 verify_region_sets_optional(); |
1734 verify_region_sets_optional(); |
1725 |
1735 |
1726 size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes); |
1736 size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes); |
1808 verify_region_sets_optional(); |
1819 verify_region_sets_optional(); |
1809 |
1820 |
1810 // We should only reach here at the end of a Full GC which means we |
1821 // We should only reach here at the end of a Full GC which means we |
1811 // should not not be holding to any GC alloc regions. The method |
1822 // should not not be holding to any GC alloc regions. The method |
1812 // below will make sure of that and do any remaining clean up. |
1823 // below will make sure of that and do any remaining clean up. |
1813 abandon_gc_alloc_regions(); |
1824 _allocator->abandon_gc_alloc_regions(); |
1814 |
1825 |
1815 // Instead of tearing down / rebuilding the free lists here, we |
1826 // Instead of tearing down / rebuilding the free lists here, we |
1816 // could instead use the remove_all_pending() method on free_list to |
1827 // could instead use the remove_all_pending() method on free_list to |
1817 // remove only the ones that we need to remove. |
1828 // remove only the ones that we need to remove. |
1818 tear_down_region_sets(true /* free_list_only */); |
1829 tear_down_region_sets(true /* free_list_only */); |
1841 _ref_processor_stw(NULL), |
1852 _ref_processor_stw(NULL), |
1842 _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)), |
1853 _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)), |
1843 _bot_shared(NULL), |
1854 _bot_shared(NULL), |
1844 _evac_failure_scan_stack(NULL), |
1855 _evac_failure_scan_stack(NULL), |
1845 _mark_in_progress(false), |
1856 _mark_in_progress(false), |
1846 _cg1r(NULL), _summary_bytes_used(0), |
1857 _cg1r(NULL), |
1847 _g1mm(NULL), |
1858 _g1mm(NULL), |
1848 _refine_cte_cl(NULL), |
1859 _refine_cte_cl(NULL), |
1849 _full_collection(false), |
1860 _full_collection(false), |
1850 _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()), |
1861 _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()), |
1851 _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()), |
1862 _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()), |
1853 _humongous_is_live(), |
1864 _humongous_is_live(), |
1854 _has_humongous_reclaim_candidates(false), |
1865 _has_humongous_reclaim_candidates(false), |
1855 _free_regions_coming(false), |
1866 _free_regions_coming(false), |
1856 _young_list(new YoungList(this)), |
1867 _young_list(new YoungList(this)), |
1857 _gc_time_stamp(0), |
1868 _gc_time_stamp(0), |
1858 _retained_old_gc_alloc_region(NULL), |
|
1859 _survivor_plab_stats(YoungPLABSize, PLABWeight), |
1869 _survivor_plab_stats(YoungPLABSize, PLABWeight), |
1860 _old_plab_stats(OldPLABSize, PLABWeight), |
1870 _old_plab_stats(OldPLABSize, PLABWeight), |
1861 _expand_heap_after_alloc_failure(true), |
1871 _expand_heap_after_alloc_failure(true), |
1862 _surviving_young_words(NULL), |
1872 _surviving_young_words(NULL), |
1863 _old_marking_cycles_started(0), |
1873 _old_marking_cycles_started(0), |
2120 dummy_region->set_young(); |
2131 dummy_region->set_young(); |
2121 // Make sure it's full. |
2132 // Make sure it's full. |
2122 dummy_region->set_top(dummy_region->end()); |
2133 dummy_region->set_top(dummy_region->end()); |
2123 G1AllocRegion::setup(this, dummy_region); |
2134 G1AllocRegion::setup(this, dummy_region); |
2124 |
2135 |
2125 init_mutator_alloc_region(); |
2136 _allocator->init_mutator_alloc_region(); |
2126 |
2137 |
2127 // Do create of the monitoring and management support so that |
2138 // Do create of the monitoring and management support so that |
2128 // values in the heap have been properly initialized. |
2139 // values in the heap have been properly initialized. |
2129 _g1mm = new G1MonitoringSupport(this); |
2140 _g1mm = new G1MonitoringSupport(this); |
2130 |
2141 |
2294 assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!"); |
2305 assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!"); |
2295 } |
2306 } |
2296 |
2307 |
2297 |
2308 |
2298 // Computes the sum of the storage used by the various regions. |
2309 // Computes the sum of the storage used by the various regions. |
2299 |
|
2300 size_t G1CollectedHeap::used() const { |
2310 size_t G1CollectedHeap::used() const { |
2301 assert(Heap_lock->owner() != NULL, |
2311 return _allocator->used(); |
2302 "Should be owned on this thread's behalf."); |
|
2303 size_t result = _summary_bytes_used; |
|
2304 // Read only once in case it is set to NULL concurrently |
|
2305 HeapRegion* hr = _mutator_alloc_region.get(); |
|
2306 if (hr != NULL) |
|
2307 result += hr->used(); |
|
2308 return result; |
|
2309 } |
2312 } |
2310 |
2313 |
2311 size_t G1CollectedHeap::used_unlocked() const { |
2314 size_t G1CollectedHeap::used_unlocked() const { |
2312 size_t result = _summary_bytes_used; |
2315 return _allocator->used_unlocked(); |
2313 return result; |
|
2314 } |
2316 } |
2315 |
2317 |
2316 class SumUsedClosure: public HeapRegionClosure { |
2318 class SumUsedClosure: public HeapRegionClosure { |
2317 size_t _used; |
2319 size_t _used; |
2318 public: |
2320 public: |
2352 // And as a result the region we'll allocate will be humongous. |
2354 // And as a result the region we'll allocate will be humongous. |
2353 guarantee(isHumongous(word_size), "sanity"); |
2355 guarantee(isHumongous(word_size), "sanity"); |
2354 |
2356 |
2355 for (uintx i = 0; i < G1DummyRegionsPerGC; ++i) { |
2357 for (uintx i = 0; i < G1DummyRegionsPerGC; ++i) { |
2356 // Let's use the existing mechanism for the allocation |
2358 // Let's use the existing mechanism for the allocation |
2357 HeapWord* dummy_obj = humongous_obj_allocate(word_size); |
2359 HeapWord* dummy_obj = humongous_obj_allocate(word_size, |
|
2360 AllocationContext::system()); |
2358 if (dummy_obj != NULL) { |
2361 if (dummy_obj != NULL) { |
2359 MemRegion mr(dummy_obj, word_size); |
2362 MemRegion mr(dummy_obj, word_size); |
2360 CollectedHeap::fill_with_object(mr); |
2363 CollectedHeap::fill_with_object(mr); |
2361 } else { |
2364 } else { |
2362 // If we can't allocate once, we probably cannot allocate |
2365 // If we can't allocate once, we probably cannot allocate |
2492 VM_G1IncCollectionPause op(gc_count_before, |
2495 VM_G1IncCollectionPause op(gc_count_before, |
2493 0, /* word_size */ |
2496 0, /* word_size */ |
2494 true, /* should_initiate_conc_mark */ |
2497 true, /* should_initiate_conc_mark */ |
2495 g1_policy()->max_pause_time_ms(), |
2498 g1_policy()->max_pause_time_ms(), |
2496 cause); |
2499 cause); |
|
2500 op.set_allocation_context(AllocationContext::current()); |
2497 |
2501 |
2498 VMThread::execute(&op); |
2502 VMThread::execute(&op); |
2499 if (!op.pause_succeeded()) { |
2503 if (!op.pause_succeeded()) { |
2500 if (old_marking_count_before == _old_marking_cycles_started) { |
2504 if (old_marking_count_before == _old_marking_cycles_started) { |
2501 retry_gc = op.should_retry_gc(); |
2505 retry_gc = op.should_retry_gc(); |
3613 VM_G1IncCollectionPause op(gc_count_before, |
3617 VM_G1IncCollectionPause op(gc_count_before, |
3614 word_size, |
3618 word_size, |
3615 false, /* should_initiate_conc_mark */ |
3619 false, /* should_initiate_conc_mark */ |
3616 g1_policy()->max_pause_time_ms(), |
3620 g1_policy()->max_pause_time_ms(), |
3617 gc_cause); |
3621 gc_cause); |
|
3622 |
|
3623 op.set_allocation_context(AllocationContext::current()); |
3618 VMThread::execute(&op); |
3624 VMThread::execute(&op); |
3619 |
3625 |
3620 HeapWord* result = op.result(); |
3626 HeapWord* result = op.result(); |
3621 bool ret_succeeded = op.prologue_succeeded() && op.pause_succeeded(); |
3627 bool ret_succeeded = op.prologue_succeeded() && op.pause_succeeded(); |
3622 assert(result == NULL || ret_succeeded, |
3628 assert(result == NULL || ret_succeeded, |
3942 // NoRefDiscovery object will do this. |
3948 // NoRefDiscovery object will do this. |
3943 NoRefDiscovery no_cm_discovery(ref_processor_cm()); |
3949 NoRefDiscovery no_cm_discovery(ref_processor_cm()); |
3944 |
3950 |
3945 // Forget the current alloc region (we might even choose it to be part |
3951 // Forget the current alloc region (we might even choose it to be part |
3946 // of the collection set!). |
3952 // of the collection set!). |
3947 release_mutator_alloc_region(); |
3953 _allocator->release_mutator_alloc_region(); |
3948 |
3954 |
3949 // We should call this after we retire the mutator alloc |
3955 // We should call this after we retire the mutator alloc |
3950 // region(s) so that all the ALLOC / RETIRE events are generated |
3956 // region(s) so that all the ALLOC / RETIRE events are generated |
3951 // before the start GC event. |
3957 // before the start GC event. |
3952 _hr_printer.start_gc(false /* full */, (size_t) total_collections()); |
3958 _hr_printer.start_gc(false /* full */, (size_t) total_collections()); |
4082 _young_list->last_survivor_region()); |
4088 _young_list->last_survivor_region()); |
4083 |
4089 |
4084 _young_list->reset_auxilary_lists(); |
4090 _young_list->reset_auxilary_lists(); |
4085 |
4091 |
4086 if (evacuation_failed()) { |
4092 if (evacuation_failed()) { |
4087 _summary_bytes_used = recalculate_used(); |
4093 _allocator->set_used(recalculate_used()); |
4088 uint n_queues = MAX2((int)ParallelGCThreads, 1); |
4094 uint n_queues = MAX2((int)ParallelGCThreads, 1); |
4089 for (uint i = 0; i < n_queues; i++) { |
4095 for (uint i = 0; i < n_queues; i++) { |
4090 if (_evacuation_failed_info_array[i].has_failed()) { |
4096 if (_evacuation_failed_info_array[i].has_failed()) { |
4091 _gc_tracer_stw->report_evacuation_failed(_evacuation_failed_info_array[i]); |
4097 _gc_tracer_stw->report_evacuation_failed(_evacuation_failed_info_array[i]); |
4092 } |
4098 } |
4093 } |
4099 } |
4094 } else { |
4100 } else { |
4095 // The "used" of the the collection set have already been subtracted |
4101 // The "used" of the the collection set have already been subtracted |
4096 // when they were freed. Add in the bytes evacuated. |
4102 // when they were freed. Add in the bytes evacuated. |
4097 _summary_bytes_used += g1_policy()->bytes_copied_during_gc(); |
4103 _allocator->increase_used(g1_policy()->bytes_copied_during_gc()); |
4098 } |
4104 } |
4099 |
4105 |
4100 if (g1_policy()->during_initial_mark_pause()) { |
4106 if (g1_policy()->during_initial_mark_pause()) { |
4101 // We have to do this before we notify the CM threads that |
4107 // We have to do this before we notify the CM threads that |
4102 // they can start working to make sure that all the |
4108 // they can start working to make sure that all the |
4114 gclog_or_tty->print_cr("\nEnd of the pause.\nYoung_list:"); |
4120 gclog_or_tty->print_cr("\nEnd of the pause.\nYoung_list:"); |
4115 _young_list->print(); |
4121 _young_list->print(); |
4116 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); |
4122 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); |
4117 #endif // YOUNG_LIST_VERBOSE |
4123 #endif // YOUNG_LIST_VERBOSE |
4118 |
4124 |
4119 init_mutator_alloc_region(); |
4125 _allocator->init_mutator_alloc_region(); |
4120 |
4126 |
4121 { |
4127 { |
4122 size_t expand_bytes = g1_policy()->expansion_amount(); |
4128 size_t expand_bytes = g1_policy()->expansion_amount(); |
4123 if (expand_bytes > 0) { |
4129 if (expand_bytes > 0) { |
4124 size_t bytes_before = capacity(); |
4130 size_t bytes_before = capacity(); |
4259 // never be in a humongous region |
4265 // never be in a humongous region |
4260 // * Allowing humongous PLABs needlessly churns the region free lists |
4266 // * Allowing humongous PLABs needlessly churns the region free lists |
4261 return MIN2(_humongous_object_threshold_in_words, gclab_word_size); |
4267 return MIN2(_humongous_object_threshold_in_words, gclab_word_size); |
4262 } |
4268 } |
4263 |
4269 |
4264 void G1CollectedHeap::init_mutator_alloc_region() { |
|
4265 assert(_mutator_alloc_region.get() == NULL, "pre-condition"); |
|
4266 _mutator_alloc_region.init(); |
|
4267 } |
|
4268 |
|
4269 void G1CollectedHeap::release_mutator_alloc_region() { |
|
4270 _mutator_alloc_region.release(); |
|
4271 assert(_mutator_alloc_region.get() == NULL, "post-condition"); |
|
4272 } |
|
4273 |
|
4274 void G1CollectedHeap::use_retained_old_gc_alloc_region(EvacuationInfo& evacuation_info) { |
|
4275 HeapRegion* retained_region = _retained_old_gc_alloc_region; |
|
4276 _retained_old_gc_alloc_region = NULL; |
|
4277 |
|
4278 // We will discard the current GC alloc region if: |
|
4279 // a) it's in the collection set (it can happen!), |
|
4280 // b) it's already full (no point in using it), |
|
4281 // c) it's empty (this means that it was emptied during |
|
4282 // a cleanup and it should be on the free list now), or |
|
4283 // d) it's humongous (this means that it was emptied |
|
4284 // during a cleanup and was added to the free list, but |
|
4285 // has been subsequently used to allocate a humongous |
|
4286 // object that may be less than the region size). |
|
4287 if (retained_region != NULL && |
|
4288 !retained_region->in_collection_set() && |
|
4289 !(retained_region->top() == retained_region->end()) && |
|
4290 !retained_region->is_empty() && |
|
4291 !retained_region->isHumongous()) { |
|
4292 retained_region->record_top_and_timestamp(); |
|
4293 // The retained region was added to the old region set when it was |
|
4294 // retired. We have to remove it now, since we don't allow regions |
|
4295 // we allocate to in the region sets. We'll re-add it later, when |
|
4296 // it's retired again. |
|
4297 _old_set.remove(retained_region); |
|
4298 bool during_im = g1_policy()->during_initial_mark_pause(); |
|
4299 retained_region->note_start_of_copying(during_im); |
|
4300 _old_gc_alloc_region.set(retained_region); |
|
4301 _hr_printer.reuse(retained_region); |
|
4302 evacuation_info.set_alloc_regions_used_before(retained_region->used()); |
|
4303 } |
|
4304 } |
|
4305 |
|
4306 void G1CollectedHeap::init_gc_alloc_regions(EvacuationInfo& evacuation_info) { |
|
4307 assert_at_safepoint(true /* should_be_vm_thread */); |
|
4308 |
|
4309 _survivor_gc_alloc_region.init(); |
|
4310 _old_gc_alloc_region.init(); |
|
4311 |
|
4312 use_retained_old_gc_alloc_region(evacuation_info); |
|
4313 } |
|
4314 |
|
4315 void G1CollectedHeap::release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info) { |
|
4316 evacuation_info.set_allocation_regions(_survivor_gc_alloc_region.count() + |
|
4317 _old_gc_alloc_region.count()); |
|
4318 _survivor_gc_alloc_region.release(); |
|
4319 // If we have an old GC alloc region to release, we'll save it in |
|
4320 // _retained_old_gc_alloc_region. If we don't |
|
4321 // _retained_old_gc_alloc_region will become NULL. This is what we |
|
4322 // want either way so no reason to check explicitly for either |
|
4323 // condition. |
|
4324 _retained_old_gc_alloc_region = _old_gc_alloc_region.release(); |
|
4325 |
|
4326 if (ResizePLAB) { |
|
4327 _survivor_plab_stats.adjust_desired_plab_sz(no_of_gc_workers); |
|
4328 _old_plab_stats.adjust_desired_plab_sz(no_of_gc_workers); |
|
4329 } |
|
4330 } |
|
4331 |
|
4332 void G1CollectedHeap::abandon_gc_alloc_regions() { |
|
4333 assert(_survivor_gc_alloc_region.get() == NULL, "pre-condition"); |
|
4334 assert(_old_gc_alloc_region.get() == NULL, "pre-condition"); |
|
4335 _retained_old_gc_alloc_region = NULL; |
|
4336 } |
|
4337 |
|
4338 void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) { |
4270 void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) { |
4339 _drain_in_progress = false; |
4271 _drain_in_progress = false; |
4340 set_evac_failure_closure(cl); |
4272 set_evac_failure_closure(cl); |
4341 _evac_failure_scan_stack = new (ResourceObj::C_HEAP, mtGC) GrowableArray<oop>(40, true); |
4273 _evac_failure_scan_stack = new (ResourceObj::C_HEAP, mtGC) GrowableArray<oop>(40, true); |
4342 } |
4274 } |
4473 _preserved_marks_of_objs.push(m); |
4405 _preserved_marks_of_objs.push(m); |
4474 } |
4406 } |
4475 } |
4407 } |
4476 |
4408 |
4477 HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose, |
4409 HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose, |
4478 size_t word_size) { |
4410 size_t word_size, |
|
4411 AllocationContext_t context) { |
4479 if (purpose == GCAllocForSurvived) { |
4412 if (purpose == GCAllocForSurvived) { |
4480 HeapWord* result = survivor_attempt_allocation(word_size); |
4413 HeapWord* result = survivor_attempt_allocation(word_size, context); |
4481 if (result != NULL) { |
4414 if (result != NULL) { |
4482 return result; |
4415 return result; |
4483 } else { |
4416 } else { |
4484 // Let's try to allocate in the old gen in case we can fit the |
4417 // Let's try to allocate in the old gen in case we can fit the |
4485 // object there. |
4418 // object there. |
4486 return old_attempt_allocation(word_size); |
4419 return old_attempt_allocation(word_size, context); |
4487 } |
4420 } |
4488 } else { |
4421 } else { |
4489 assert(purpose == GCAllocForTenured, "sanity"); |
4422 assert(purpose == GCAllocForTenured, "sanity"); |
4490 HeapWord* result = old_attempt_allocation(word_size); |
4423 HeapWord* result = old_attempt_allocation(word_size, context); |
4491 if (result != NULL) { |
4424 if (result != NULL) { |
4492 return result; |
4425 return result; |
4493 } else { |
4426 } else { |
4494 // Let's try to allocate in the survivors in case we can fit the |
4427 // Let's try to allocate in the survivors in case we can fit the |
4495 // object there. |
4428 // object there. |
4496 return survivor_attempt_allocation(word_size); |
4429 return survivor_attempt_allocation(word_size, context); |
4497 } |
4430 } |
4498 } |
4431 } |
4499 |
4432 |
4500 ShouldNotReachHere(); |
4433 ShouldNotReachHere(); |
4501 // Trying to keep some compilers happy. |
4434 // Trying to keep some compilers happy. |
4502 return NULL; |
4435 return NULL; |
4503 } |
4436 } |
4504 |
|
4505 G1ParGCAllocBuffer::G1ParGCAllocBuffer(size_t gclab_word_size) : |
|
4506 ParGCAllocBuffer(gclab_word_size), _retired(true) { } |
|
4507 |
4437 |
4508 void G1ParCopyHelper::mark_object(oop obj) { |
4438 void G1ParCopyHelper::mark_object(oop obj) { |
4509 assert(!_g1->heap_region_containing(obj)->in_collection_set(), "should not mark objects in the CSet"); |
4439 assert(!_g1->heap_region_containing(obj)->in_collection_set(), "should not mark objects in the CSet"); |
4510 |
4440 |
4511 // We know that the object is not moving so it's safe to read its size. |
4441 // We know that the object is not moving so it's safe to read its size. |
5963 if (G1StringDedup::is_enabled()) { |
5893 if (G1StringDedup::is_enabled()) { |
5964 G1StringDedup::unlink_or_oops_do(&is_alive, &keep_alive); |
5894 G1StringDedup::unlink_or_oops_do(&is_alive, &keep_alive); |
5965 } |
5895 } |
5966 } |
5896 } |
5967 |
5897 |
5968 release_gc_alloc_regions(n_workers, evacuation_info); |
5898 _allocator->release_gc_alloc_regions(n_workers, evacuation_info); |
5969 g1_rem_set()->cleanup_after_oops_into_collection_set_do(); |
5899 g1_rem_set()->cleanup_after_oops_into_collection_set_do(); |
5970 |
5900 |
5971 // Reset and re-enable the hot card cache. |
5901 // Reset and re-enable the hot card cache. |
5972 // Note the counts for the cards in the regions in the |
5902 // Note the counts for the cards in the regions in the |
5973 // collection set are reset when the collection set is freed. |
5903 // collection set are reset when the collection set is freed. |
6740 |
6668 |
6741 RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_hrm); |
6669 RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_hrm); |
6742 heap_region_iterate(&cl); |
6670 heap_region_iterate(&cl); |
6743 |
6671 |
6744 if (!free_list_only) { |
6672 if (!free_list_only) { |
6745 _summary_bytes_used = cl.total_used(); |
6673 _allocator->set_used(cl.total_used()); |
6746 } |
6674 } |
6747 assert(_summary_bytes_used == recalculate_used(), |
6675 assert(_allocator->used_unlocked() == recalculate_used(), |
6748 err_msg("inconsistent _summary_bytes_used, " |
6676 err_msg("inconsistent _allocator->used_unlocked(), " |
6749 "value: "SIZE_FORMAT" recalculated: "SIZE_FORMAT, |
6677 "value: "SIZE_FORMAT" recalculated: "SIZE_FORMAT, |
6750 _summary_bytes_used, recalculate_used())); |
6678 _allocator->used_unlocked(), recalculate_used())); |
6751 } |
6679 } |
6752 |
6680 |
6753 void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) { |
6681 void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) { |
6754 _refine_cte_cl->set_concurrent(concurrent); |
6682 _refine_cte_cl->set_concurrent(concurrent); |
6755 } |
6683 } |
6785 size_t allocated_bytes) { |
6713 size_t allocated_bytes) { |
6786 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); |
6714 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); |
6787 assert(alloc_region->is_young(), "all mutator alloc regions should be young"); |
6715 assert(alloc_region->is_young(), "all mutator alloc regions should be young"); |
6788 |
6716 |
6789 g1_policy()->add_region_to_incremental_cset_lhs(alloc_region); |
6717 g1_policy()->add_region_to_incremental_cset_lhs(alloc_region); |
6790 _summary_bytes_used += allocated_bytes; |
6718 _allocator->increase_used(allocated_bytes); |
6791 _hr_printer.retire(alloc_region); |
6719 _hr_printer.retire(alloc_region); |
6792 // We update the eden sizes here, when the region is retired, |
6720 // We update the eden sizes here, when the region is retired, |
6793 // instead of when it's allocated, since this is the point that its |
6721 // instead of when it's allocated, since this is the point that its |
6794 // used space has been recored in _summary_bytes_used. |
6722 // used space has been recored in _summary_bytes_used. |
6795 g1mm()->update_eden_size(); |
6723 g1mm()->update_eden_size(); |
6796 } |
|
6797 |
|
6798 HeapRegion* MutatorAllocRegion::allocate_new_region(size_t word_size, |
|
6799 bool force) { |
|
6800 return _g1h->new_mutator_alloc_region(word_size, force); |
|
6801 } |
6724 } |
6802 |
6725 |
6803 void G1CollectedHeap::set_par_threads() { |
6726 void G1CollectedHeap::set_par_threads() { |
6804 // Don't change the number of workers. Use the value previously set |
6727 // Don't change the number of workers. Use the value previously set |
6805 // in the workgroup. |
6728 // in the workgroup. |
6812 assert(false, "Should have been set in prior evacuation pause."); |
6735 assert(false, "Should have been set in prior evacuation pause."); |
6813 n_workers = ParallelGCThreads; |
6736 n_workers = ParallelGCThreads; |
6814 workers()->set_active_workers(n_workers); |
6737 workers()->set_active_workers(n_workers); |
6815 } |
6738 } |
6816 set_par_threads(n_workers); |
6739 set_par_threads(n_workers); |
6817 } |
|
6818 |
|
6819 void MutatorAllocRegion::retire_region(HeapRegion* alloc_region, |
|
6820 size_t allocated_bytes) { |
|
6821 _g1h->retire_mutator_alloc_region(alloc_region, allocated_bytes); |
|
6822 } |
6740 } |
6823 |
6741 |
6824 // Methods for the GC alloc regions |
6742 // Methods for the GC alloc regions |
6825 |
6743 |
6826 HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size, |
6744 HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size, |
6868 _old_set.add(alloc_region); |
6786 _old_set.add(alloc_region); |
6869 } |
6787 } |
6870 _hr_printer.retire(alloc_region); |
6788 _hr_printer.retire(alloc_region); |
6871 } |
6789 } |
6872 |
6790 |
6873 HeapRegion* SurvivorGCAllocRegion::allocate_new_region(size_t word_size, |
|
6874 bool force) { |
|
6875 assert(!force, "not supported for GC alloc regions"); |
|
6876 return _g1h->new_gc_alloc_region(word_size, count(), GCAllocForSurvived); |
|
6877 } |
|
6878 |
|
6879 void SurvivorGCAllocRegion::retire_region(HeapRegion* alloc_region, |
|
6880 size_t allocated_bytes) { |
|
6881 _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes, |
|
6882 GCAllocForSurvived); |
|
6883 } |
|
6884 |
|
6885 HeapRegion* OldGCAllocRegion::allocate_new_region(size_t word_size, |
|
6886 bool force) { |
|
6887 assert(!force, "not supported for GC alloc regions"); |
|
6888 return _g1h->new_gc_alloc_region(word_size, count(), GCAllocForTenured); |
|
6889 } |
|
6890 |
|
6891 void OldGCAllocRegion::retire_region(HeapRegion* alloc_region, |
|
6892 size_t allocated_bytes) { |
|
6893 _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes, |
|
6894 GCAllocForTenured); |
|
6895 } |
|
6896 |
|
6897 HeapRegion* OldGCAllocRegion::release() { |
|
6898 HeapRegion* cur = get(); |
|
6899 if (cur != NULL) { |
|
6900 // Determine how far we are from the next card boundary. If it is smaller than |
|
6901 // the minimum object size we can allocate into, expand into the next card. |
|
6902 HeapWord* top = cur->top(); |
|
6903 HeapWord* aligned_top = (HeapWord*)align_ptr_up(top, G1BlockOffsetSharedArray::N_bytes); |
|
6904 |
|
6905 size_t to_allocate_words = pointer_delta(aligned_top, top, HeapWordSize); |
|
6906 |
|
6907 if (to_allocate_words != 0) { |
|
6908 // We are not at a card boundary. Fill up, possibly into the next, taking the |
|
6909 // end of the region and the minimum object size into account. |
|
6910 to_allocate_words = MIN2(pointer_delta(cur->end(), cur->top(), HeapWordSize), |
|
6911 MAX2(to_allocate_words, G1CollectedHeap::min_fill_size())); |
|
6912 |
|
6913 // Skip allocation if there is not enough space to allocate even the smallest |
|
6914 // possible object. In this case this region will not be retained, so the |
|
6915 // original problem cannot occur. |
|
6916 if (to_allocate_words >= G1CollectedHeap::min_fill_size()) { |
|
6917 HeapWord* dummy = attempt_allocation(to_allocate_words, true /* bot_updates */); |
|
6918 CollectedHeap::fill_with_object(dummy, to_allocate_words); |
|
6919 } |
|
6920 } |
|
6921 } |
|
6922 return G1AllocRegion::release(); |
|
6923 } |
|
6924 |
|
6925 // Heap region set verification |
6791 // Heap region set verification |
6926 |
6792 |
6927 class VerifyRegionListsClosure : public HeapRegionClosure { |
6793 class VerifyRegionListsClosure : public HeapRegionClosure { |
6928 private: |
6794 private: |
6929 HeapRegionSet* _old_set; |
6795 HeapRegionSet* _old_set; |