1813 ref_processor()->clean_up_discovered_references(); |
1813 ref_processor()->clean_up_discovered_references(); |
1814 |
1814 |
1815 do_compaction_work(clear_all_soft_refs); |
1815 do_compaction_work(clear_all_soft_refs); |
1816 |
1816 |
1817 // Has the GC time limit been exceeded? |
1817 // Has the GC time limit been exceeded? |
1818 check_gc_time_limit(); |
1818 DefNewGeneration* young_gen = _young_gen->as_DefNewGeneration(); |
1819 |
1819 size_t max_eden_size = young_gen->max_capacity() - |
|
1820 young_gen->to()->capacity() - |
|
1821 young_gen->from()->capacity(); |
|
1822 GenCollectedHeap* gch = GenCollectedHeap::heap(); |
|
1823 GCCause::Cause gc_cause = gch->gc_cause(); |
|
1824 size_policy()->check_gc_overhead_limit(_young_gen->used(), |
|
1825 young_gen->eden()->used(), |
|
1826 _cmsGen->max_capacity(), |
|
1827 max_eden_size, |
|
1828 full, |
|
1829 gc_cause, |
|
1830 gch->collector_policy()); |
1820 } else { |
1831 } else { |
1821 do_mark_sweep_work(clear_all_soft_refs, first_state, |
1832 do_mark_sweep_work(clear_all_soft_refs, first_state, |
1822 should_start_over); |
1833 should_start_over); |
1823 } |
1834 } |
1824 // Reset the expansion cause, now that we just completed |
1835 // Reset the expansion cause, now that we just completed |
1825 // a collection cycle. |
1836 // a collection cycle. |
1826 clear_expansion_cause(); |
1837 clear_expansion_cause(); |
1827 _foregroundGCIsActive = false; |
1838 _foregroundGCIsActive = false; |
1828 return; |
1839 return; |
1829 } |
|
1830 |
|
1831 void CMSCollector::check_gc_time_limit() { |
|
1832 |
|
1833 // Ignore explicit GC's. Exiting here does not set the flag and |
|
1834 // does not reset the count. Updating of the averages for system |
|
1835 // GC's is still controlled by UseAdaptiveSizePolicyWithSystemGC. |
|
1836 GCCause::Cause gc_cause = GenCollectedHeap::heap()->gc_cause(); |
|
1837 if (GCCause::is_user_requested_gc(gc_cause) || |
|
1838 GCCause::is_serviceability_requested_gc(gc_cause)) { |
|
1839 return; |
|
1840 } |
|
1841 |
|
1842 // Calculate the fraction of the CMS generation was freed during |
|
1843 // the last collection. |
|
1844 // Only consider the STW compacting cost for now. |
|
1845 // |
|
1846 // Note that the gc time limit test only works for the collections |
|
1847 // of the young gen + tenured gen and not for collections of the |
|
1848 // permanent gen. That is because the calculation of the space |
|
1849 // freed by the collection is the free space in the young gen + |
|
1850 // tenured gen. |
|
1851 |
|
1852 double fraction_free = |
|
1853 ((double)_cmsGen->free())/((double)_cmsGen->max_capacity()); |
|
1854 if ((100.0 * size_policy()->compacting_gc_cost()) > |
|
1855 ((double) GCTimeLimit) && |
|
1856 ((fraction_free * 100) < GCHeapFreeLimit)) { |
|
1857 size_policy()->inc_gc_time_limit_count(); |
|
1858 if (UseGCOverheadLimit && |
|
1859 (size_policy()->gc_time_limit_count() > |
|
1860 AdaptiveSizePolicyGCTimeLimitThreshold)) { |
|
1861 size_policy()->set_gc_time_limit_exceeded(true); |
|
1862 // Avoid consecutive OOM due to the gc time limit by resetting |
|
1863 // the counter. |
|
1864 size_policy()->reset_gc_time_limit_count(); |
|
1865 if (PrintGCDetails) { |
|
1866 gclog_or_tty->print_cr(" GC is exceeding overhead limit " |
|
1867 "of %d%%", GCTimeLimit); |
|
1868 } |
|
1869 } else { |
|
1870 if (PrintGCDetails) { |
|
1871 gclog_or_tty->print_cr(" GC would exceed overhead limit " |
|
1872 "of %d%%", GCTimeLimit); |
|
1873 } |
|
1874 } |
|
1875 } else { |
|
1876 size_policy()->reset_gc_time_limit_count(); |
|
1877 } |
|
1878 } |
1840 } |
1879 |
1841 |
1880 // Resize the perm generation and the tenured generation |
1842 // Resize the perm generation and the tenured generation |
1881 // after obtaining the free list locks for the |
1843 // after obtaining the free list locks for the |
1882 // two generations. |
1844 // two generations. |
6180 bitMapLock()->lock_without_safepoint_check(); |
6142 bitMapLock()->lock_without_safepoint_check(); |
6181 startTimer(); |
6143 startTimer(); |
6182 } |
6144 } |
6183 curAddr = chunk.end(); |
6145 curAddr = chunk.end(); |
6184 } |
6146 } |
|
6147 // A successful mostly concurrent collection has been done. |
|
6148 // Because only the full (i.e., concurrent mode failure) collections |
|
6149 // are being measured for gc overhead limits, clean the "near" flag |
|
6150 // and count. |
|
6151 sp->reset_gc_overhead_limit_count(); |
6185 _collectorState = Idling; |
6152 _collectorState = Idling; |
6186 } else { |
6153 } else { |
6187 // already have the lock |
6154 // already have the lock |
6188 assert(_collectorState == Resetting, "just checking"); |
6155 assert(_collectorState == Resetting, "just checking"); |
6189 assert_lock_strong(bitMapLock()); |
6156 assert_lock_strong(bitMapLock()); |