ysr@777: /* tonyp@3539: * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. ysr@777: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ysr@777: * ysr@777: * This code is free software; you can redistribute it and/or modify it ysr@777: * under the terms of the GNU General Public License version 2 only, as ysr@777: * published by the Free Software Foundation. ysr@777: * ysr@777: * This code is distributed in the hope that it will be useful, but WITHOUT ysr@777: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ysr@777: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ysr@777: * version 2 for more details (a copy is included in the LICENSE file that ysr@777: * accompanied this code). ysr@777: * ysr@777: * You should have received a copy of the GNU General Public License version ysr@777: * 2 along with this work; if not, write to the Free Software Foundation, ysr@777: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ysr@777: * trims@1907: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA trims@1907: * or visit www.oracle.com if you need additional information or have any trims@1907: * questions. ysr@777: * ysr@777: */ ysr@777: stefank@2314: #include "precompiled.hpp" stefank@2314: #include "gc_implementation/g1/collectionSetChooser.hpp" stefank@2314: #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" stefank@2314: #include "gc_implementation/g1/g1CollectorPolicy.hpp" tonyp@3114: #include "gc_implementation/g1/g1ErgoVerbose.hpp" stefank@2314: #include "memory/space.inline.hpp" ysr@777: ysr@777: CSetChooserCache::CSetChooserCache() { ysr@777: for (int i = 0; i < CacheLength; ++i) ysr@777: _cache[i] = NULL; ysr@777: clear(); ysr@777: } ysr@777: ysr@777: void CSetChooserCache::clear() { ysr@777: _occupancy = 0; ysr@777: _first = 0; ysr@777: for (int i = 0; i < CacheLength; ++i) { ysr@777: HeapRegion *hr = _cache[i]; ysr@777: if (hr != NULL) ysr@777: hr->set_sort_index(-1); ysr@777: _cache[i] = NULL; ysr@777: } ysr@777: } ysr@777: ysr@777: #ifndef PRODUCT ysr@777: bool CSetChooserCache::verify() { tonyp@3539: guarantee(false, "CSetChooserCache::verify(): don't call this any more"); tonyp@3539: ysr@777: int index = _first; ysr@777: HeapRegion *prev = NULL; ysr@777: for (int i = 0; i < _occupancy; ++i) { ysr@777: guarantee(_cache[index] != NULL, "cache entry should not be empty"); ysr@777: HeapRegion *hr = _cache[index]; ysr@777: guarantee(!hr->is_young(), "should not be young!"); ysr@777: if (prev != NULL) { ysr@777: guarantee(prev->gc_efficiency() >= hr->gc_efficiency(), ysr@777: "cache should be correctly ordered"); ysr@777: } ysr@777: guarantee(hr->sort_index() == get_sort_index(index), ysr@777: "sort index should be correct"); ysr@777: index = trim_index(index + 1); ysr@777: prev = hr; ysr@777: } ysr@777: ysr@777: for (int i = 0; i < (CacheLength - _occupancy); ++i) { ysr@777: guarantee(_cache[index] == NULL, "cache entry should be empty"); ysr@777: index = trim_index(index + 1); ysr@777: } ysr@777: ysr@777: guarantee(index == _first, "we should have reached where we started from"); ysr@777: return true; ysr@777: } ysr@777: #endif // PRODUCT ysr@777: ysr@777: void CSetChooserCache::insert(HeapRegion *hr) { tonyp@3539: guarantee(false, "CSetChooserCache::insert(): don't call this any more"); tonyp@3539: ysr@777: assert(!is_full(), "cache should not be empty"); ysr@777: hr->calc_gc_efficiency(); ysr@777: ysr@777: int empty_index; ysr@777: if (_occupancy == 0) { ysr@777: empty_index = _first; ysr@777: } else { ysr@777: empty_index = trim_index(_first + _occupancy); ysr@777: assert(_cache[empty_index] == NULL, "last slot should be empty"); ysr@777: int last_index = trim_index(empty_index - 1); ysr@777: HeapRegion *last = _cache[last_index]; ysr@777: assert(last != NULL,"as the cache is not empty, last should not be empty"); ysr@777: while (empty_index != _first && ysr@777: last->gc_efficiency() < hr->gc_efficiency()) { ysr@777: _cache[empty_index] = last; ysr@777: last->set_sort_index(get_sort_index(empty_index)); ysr@777: empty_index = last_index; ysr@777: last_index = trim_index(last_index - 1); ysr@777: last = _cache[last_index]; ysr@777: } ysr@777: } ysr@777: _cache[empty_index] = hr; ysr@777: hr->set_sort_index(get_sort_index(empty_index)); ysr@777: ysr@777: ++_occupancy; ysr@777: assert(verify(), "cache should be consistent"); ysr@777: } ysr@777: ysr@777: HeapRegion *CSetChooserCache::remove_first() { tonyp@3539: guarantee(false, "CSetChooserCache::remove_first(): " tonyp@3539: "don't call this any more"); tonyp@3539: ysr@777: if (_occupancy > 0) { ysr@777: assert(_cache[_first] != NULL, "cache should have at least one region"); ysr@777: HeapRegion *ret = _cache[_first]; ysr@777: _cache[_first] = NULL; ysr@777: ret->set_sort_index(-1); ysr@777: --_occupancy; ysr@777: _first = trim_index(_first + 1); ysr@777: assert(verify(), "cache should be consistent"); ysr@777: return ret; ysr@777: } else { ysr@777: return NULL; ysr@777: } ysr@777: } ysr@777: tonyp@3539: // Even though we don't use the GC efficiency in our heuristics as tonyp@3539: // much as we used to, we still order according to GC efficiency. This tonyp@3539: // will cause regions with a lot of live objects and large RSets to tonyp@3539: // end up at the end of the array. Given that we might skip collecting tonyp@3539: // the last few old regions, if after a few mixed GCs the remaining tonyp@3539: // have reclaimable bytes under a certain threshold, the hope is that tonyp@3539: // the ones we'll skip are ones with both large RSets and a lot of tonyp@3539: // live objects, not the ones with just a lot of live objects if we tonyp@3539: // ordered according to the amount of reclaimable bytes per region. tonyp@3539: static int orderRegions(HeapRegion* hr1, HeapRegion* hr2) { ysr@777: if (hr1 == NULL) { tonyp@3539: if (hr2 == NULL) { tonyp@3539: return 0; tonyp@3539: } else { tonyp@3539: return 1; tonyp@3539: } ysr@777: } else if (hr2 == NULL) { ysr@777: return -1; ysr@777: } tonyp@3539: tonyp@3539: double gc_eff1 = hr1->gc_efficiency(); tonyp@3539: double gc_eff2 = hr2->gc_efficiency(); tonyp@3539: if (gc_eff1 > gc_eff2) { tonyp@3539: return -1; tonyp@3539: } if (gc_eff1 < gc_eff2) { tonyp@3539: return 1; tonyp@3539: } else { tonyp@3539: return 0; tonyp@3539: } ysr@777: } ysr@777: ysr@777: static int orderRegions(HeapRegion** hr1p, HeapRegion** hr2p) { ysr@777: return orderRegions(*hr1p, *hr2p); ysr@777: } ysr@777: ysr@777: CollectionSetChooser::CollectionSetChooser() : ysr@777: // The line below is the worst bit of C++ hackery I've ever written ysr@777: // (Detlefs, 11/23). You should think of it as equivalent to ysr@777: // "_regions(100, true)": initialize the growable array and inform it kvn@2043: // that it should allocate its elem array(s) on the C heap. kvn@2043: // kvn@2043: // The first argument, however, is actually a comma expression kvn@2043: // (set_allocation_type(this, C_HEAP), 100). The purpose of the kvn@2043: // set_allocation_type() call is to replace the default allocation kvn@2043: // type for embedded objects STACK_OR_EMBEDDED with C_HEAP. It will kvn@2043: // allow to pass the assert in GenericGrowableArray() which checks kvn@2043: // that a growable array object must be on C heap if elements are. kvn@2043: // kvn@2043: // Note: containing object is allocated on C heap since it is CHeapObj. kvn@2043: // kvn@2043: _markedRegions((ResourceObj::set_allocation_type((address)&_markedRegions, ysr@777: ResourceObj::C_HEAP), tonyp@3539: 100), true /* C_Heap */), tonyp@3539: _curr_index(0), _length(0), tonyp@3539: _regionLiveThresholdBytes(0), _remainingReclaimableBytes(0), tonyp@3539: _first_par_unreserved_idx(0) { tonyp@3539: _regionLiveThresholdBytes = tonyp@3539: HeapRegion::GrainBytes * (size_t) G1OldCSetRegionLiveThresholdPercent / 100; tonyp@3539: } ysr@777: ysr@777: #ifndef PRODUCT ysr@777: bool CollectionSetChooser::verify() { tonyp@3539: guarantee(_length >= 0, err_msg("_length: %d", _length)); tonyp@3539: guarantee(0 <= _curr_index && _curr_index <= _length, tonyp@3539: err_msg("_curr_index: %d _length: %d", _curr_index, _length)); ysr@777: int index = 0; tonyp@3539: size_t sum_of_reclaimable_bytes = 0; tonyp@3539: while (index < _curr_index) { tonyp@3539: guarantee(_markedRegions.at(index) == NULL, tonyp@3539: "all entries before _curr_index should be NULL"); tonyp@3539: index += 1; ysr@777: } ysr@777: HeapRegion *prev = NULL; tonyp@3539: while (index < _length) { ysr@777: HeapRegion *curr = _markedRegions.at(index++); ysr@3185: guarantee(curr != NULL, "Regions in _markedRegions array cannot be NULL"); ysr@3185: int si = curr->sort_index(); ysr@3185: guarantee(!curr->is_young(), "should not be young!"); tonyp@3539: guarantee(!curr->isHumongous(), "should not be humongous!"); ysr@3185: guarantee(si > -1 && si == (index-1), "sort index invariant"); ysr@3185: if (prev != NULL) { tonyp@3539: guarantee(orderRegions(prev, curr) != 1, tonyp@3539: err_msg("GC eff prev: %1.4f GC eff curr: %1.4f", tonyp@3539: prev->gc_efficiency(), curr->gc_efficiency())); ysr@777: } tonyp@3539: sum_of_reclaimable_bytes += curr->reclaimable_bytes(); ysr@3185: prev = curr; ysr@777: } tonyp@3539: guarantee(sum_of_reclaimable_bytes == _remainingReclaimableBytes, tonyp@3539: err_msg("reclaimable bytes inconsistent, " tonyp@3539: "remaining: "SIZE_FORMAT" sum: "SIZE_FORMAT, tonyp@3539: _remainingReclaimableBytes, sum_of_reclaimable_bytes)); tonyp@3539: return true; ysr@777: } ysr@777: #endif ysr@777: tonyp@3539: void CollectionSetChooser::fillCache() { tonyp@3539: guarantee(false, "fillCache: don't call this any more"); tonyp@3539: tonyp@3539: while (!_cache.is_full() && (_curr_index < _length)) { tonyp@3539: HeapRegion* hr = _markedRegions.at(_curr_index); ysr@3185: assert(hr != NULL, ysr@3185: err_msg("Unexpected NULL hr in _markedRegions at index %d", tonyp@3539: _curr_index)); tonyp@3539: _curr_index += 1; ysr@3185: assert(!hr->is_young(), "should not be young!"); tonyp@3539: assert(hr->sort_index() == _curr_index-1, "sort_index invariant"); ysr@3185: _markedRegions.at_put(hr->sort_index(), NULL); ysr@3185: _cache.insert(hr); ysr@3185: assert(!_cache.is_empty(), "cache should not be empty"); ysr@777: } ysr@3185: assert(verify(), "cache should be consistent"); ysr@777: } ysr@777: tonyp@3539: void CollectionSetChooser::sortMarkedHeapRegions() { ysr@777: // First trim any unused portion of the top in the parallel case. ysr@777: if (_first_par_unreserved_idx > 0) { ysr@777: if (G1PrintParCleanupStats) { ysr@777: gclog_or_tty->print(" Truncating _markedRegions from %d to %d.\n", ysr@777: _markedRegions.length(), _first_par_unreserved_idx); ysr@777: } ysr@777: assert(_first_par_unreserved_idx <= _markedRegions.length(), ysr@777: "Or we didn't reserved enough length"); ysr@777: _markedRegions.trunc_to(_first_par_unreserved_idx); ysr@777: } ysr@777: _markedRegions.sort(orderRegions); tonyp@3539: assert(_length <= _markedRegions.length(), "Requirement"); tonyp@3539: assert(_length == 0 || _markedRegions.at(_length - 1) != NULL, tonyp@3539: "Testing _length"); tonyp@3539: assert(_length == _markedRegions.length() || tonyp@3539: _markedRegions.at(_length) == NULL, "Testing _length"); ysr@777: if (G1PrintParCleanupStats) { tonyp@3539: gclog_or_tty->print_cr(" Sorted %d marked regions.", _length); ysr@777: } tonyp@3539: for (int i = 0; i < _length; i++) { ysr@777: assert(_markedRegions.at(i) != NULL, "Should be true by sorting!"); ysr@777: _markedRegions.at(i)->set_sort_index(i); tonyp@2717: } tonyp@2717: if (G1PrintRegionLivenessInfo) { tonyp@2717: G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Sorting"); tonyp@3539: for (int i = 0; i < _length; ++i) { tonyp@2717: HeapRegion* r = _markedRegions.at(i); tonyp@2717: cl.doHeapRegion(r); ysr@777: } ysr@777: } tonyp@3539: assert(verify(), "CSet chooser verification"); ysr@777: } ysr@777: tonyp@3539: size_t CollectionSetChooser::calcMinOldCSetLength() { tonyp@3539: // The min old CSet region bound is based on the maximum desired tonyp@3539: // number of mixed GCs after a cycle. I.e., even if some old regions tonyp@3539: // look expensive, we should add them to the CSet anyway to make tonyp@3539: // sure we go through the available old regions in no more than the tonyp@3539: // maximum desired number of mixed GCs. tonyp@3539: // tonyp@3539: // The calculation is based on the number of marked regions we added tonyp@3539: // to the CSet chooser in the first place, not how many remain, so tonyp@3539: // that the result is the same during all mixed GCs that follow a cycle. tonyp@3539: tonyp@3539: const size_t region_num = (size_t) _length; tonyp@3539: const size_t gc_num = (size_t) G1MaxMixedGCNum; tonyp@3539: size_t result = region_num / gc_num; tonyp@3539: // emulate ceiling tonyp@3539: if (result * gc_num < region_num) { tonyp@3539: result += 1; tonyp@3539: } tonyp@3539: return result; tonyp@3539: } tonyp@3539: tonyp@3539: size_t CollectionSetChooser::calcMaxOldCSetLength() { tonyp@3539: // The max old CSet region bound is based on the threshold expressed tonyp@3539: // as a percentage of the heap size. I.e., it should bound the tonyp@3539: // number of old regions added to the CSet irrespective of how many tonyp@3539: // of them are available. tonyp@3539: tonyp@3539: G1CollectedHeap* g1h = G1CollectedHeap::heap(); tonyp@3539: const size_t region_num = g1h->n_regions(); tonyp@3539: const size_t perc = (size_t) G1OldCSetRegionThresholdPercent; tonyp@3539: size_t result = region_num * perc / 100; tonyp@3539: // emulate ceiling tonyp@3539: if (100 * result < region_num * perc) { tonyp@3539: result += 1; tonyp@3539: } tonyp@3539: return result; tonyp@3539: } tonyp@3539: tonyp@3539: void CollectionSetChooser::addMarkedHeapRegion(HeapRegion* hr) { ysr@777: assert(!hr->isHumongous(), ysr@777: "Humongous regions shouldn't be added to the collection set"); ysr@777: assert(!hr->is_young(), "should not be young!"); ysr@777: _markedRegions.append(hr); tonyp@3539: _length++; tonyp@3539: _remainingReclaimableBytes += hr->reclaimable_bytes(); ysr@777: hr->calc_gc_efficiency(); ysr@777: } ysr@777: tonyp@3539: void CollectionSetChooser::prepareForAddMarkedHeapRegionsPar(size_t n_regions, tonyp@3539: size_t chunkSize) { ysr@777: _first_par_unreserved_idx = 0; jmasa@3294: int n_threads = ParallelGCThreads; jmasa@3294: if (UseDynamicNumberOfGCThreads) { jmasa@3294: assert(G1CollectedHeap::heap()->workers()->active_workers() > 0, jmasa@3294: "Should have been set earlier"); jmasa@3294: // This is defensive code. As the assertion above says, the number jmasa@3294: // of active threads should be > 0, but in case there is some path jmasa@3294: // or some improperly initialized variable with leads to no jmasa@3294: // active threads, protect against that in a product build. jmasa@3294: n_threads = MAX2(G1CollectedHeap::heap()->workers()->active_workers(), jmasa@3357: 1U); jmasa@3294: } jmasa@3294: size_t max_waste = n_threads * chunkSize; ysr@777: // it should be aligned with respect to chunkSize ysr@777: size_t aligned_n_regions = ysr@777: (n_regions + (chunkSize - 1)) / chunkSize * chunkSize; ysr@777: assert( aligned_n_regions % chunkSize == 0, "should be aligned" ); ysr@777: _markedRegions.at_put_grow((int)(aligned_n_regions + max_waste - 1), NULL); ysr@777: } ysr@777: tonyp@3539: jint CollectionSetChooser::getParMarkedHeapRegionChunk(jint n_regions) { jmasa@3294: // Don't do this assert because this can be called at a point jmasa@3294: // where the loop up stream will not execute again but might jmasa@3294: // try to claim more chunks (loop test has not been done yet). jmasa@3294: // assert(_markedRegions.length() > _first_par_unreserved_idx, jmasa@3294: // "Striding beyond the marked regions"); ysr@777: jint res = Atomic::add(n_regions, &_first_par_unreserved_idx); ysr@777: assert(_markedRegions.length() > res + n_regions - 1, ysr@777: "Should already have been expanded"); ysr@777: return res - n_regions; ysr@777: } ysr@777: tonyp@3539: void CollectionSetChooser::setMarkedHeapRegion(jint index, HeapRegion* hr) { ysr@777: assert(_markedRegions.at(index) == NULL, "precondition"); ysr@777: assert(!hr->is_young(), "should not be young!"); ysr@777: _markedRegions.at_put(index, hr); ysr@777: hr->calc_gc_efficiency(); ysr@777: } ysr@777: tonyp@3539: void CollectionSetChooser::updateTotals(jint region_num, tonyp@3539: size_t reclaimable_bytes) { tonyp@3539: // Only take the lock if we actually need to update the totals. tonyp@3539: if (region_num > 0) { tonyp@3539: assert(reclaimable_bytes > 0, "invariant"); tonyp@3539: // We could have just used atomics instead of taking the tonyp@3539: // lock. However, we currently don't have an atomic add for size_t. tonyp@3539: MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); tonyp@3539: _length += (int) region_num; tonyp@3539: _remainingReclaimableBytes += reclaimable_bytes; tonyp@3539: } else { tonyp@3539: assert(reclaimable_bytes == 0, "invariant"); tonyp@3539: } ysr@777: } ysr@777: tonyp@3539: void CollectionSetChooser::clearMarkedHeapRegions() { ysr@777: for (int i = 0; i < _markedRegions.length(); i++) { tonyp@3539: HeapRegion* r = _markedRegions.at(i); tonyp@3539: if (r != NULL) { tonyp@3539: r->set_sort_index(-1); tonyp@3539: } ysr@777: } ysr@777: _markedRegions.clear(); tonyp@3539: _curr_index = 0; tonyp@3539: _length = 0; tonyp@3539: _remainingReclaimableBytes = 0; ysr@777: };