ysr@777: /* stefank@2314: * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. ysr@777: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ysr@777: * ysr@777: * This code is free software; you can redistribute it and/or modify it ysr@777: * under the terms of the GNU General Public License version 2 only, as ysr@777: * published by the Free Software Foundation. ysr@777: * ysr@777: * This code is distributed in the hope that it will be useful, but WITHOUT ysr@777: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ysr@777: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ysr@777: * version 2 for more details (a copy is included in the LICENSE file that ysr@777: * accompanied this code). ysr@777: * ysr@777: * You should have received a copy of the GNU General Public License version ysr@777: * 2 along with this work; if not, write to the Free Software Foundation, ysr@777: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ysr@777: * trims@1907: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA trims@1907: * or visit www.oracle.com if you need additional information or have any trims@1907: * questions. ysr@777: * ysr@777: */ ysr@777: stefank@2314: #include "precompiled.hpp" stefank@2314: #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" stefank@2314: #include "gc_implementation/g1/heapRegionSeq.hpp" stefank@2314: #include "memory/allocation.hpp" ysr@777: ysr@777: // Local to this file. ysr@777: ysr@777: static int orderRegions(HeapRegion** hr1p, HeapRegion** hr2p) { ysr@777: if ((*hr1p)->end() <= (*hr2p)->bottom()) return -1; ysr@777: else if ((*hr2p)->end() <= (*hr1p)->bottom()) return 1; ysr@777: else if (*hr1p == *hr2p) return 0; ysr@777: else { ysr@777: assert(false, "We should never compare distinct overlapping regions."); ysr@777: } ysr@777: return 0; ysr@777: } ysr@777: iveresov@828: HeapRegionSeq::HeapRegionSeq(const size_t max_size) : ysr@777: _alloc_search_start(0), ysr@777: // The line below is the worst bit of C++ hackery I've ever written ysr@777: // (Detlefs, 11/23). You should think of it as equivalent to ysr@777: // "_regions(100, true)": initialize the growable array and inform it kvn@2043: // that it should allocate its elem array(s) on the C heap. kvn@2043: // kvn@2043: // The first argument, however, is actually a comma expression kvn@2043: // (set_allocation_type(this, C_HEAP), 100). The purpose of the kvn@2043: // set_allocation_type() call is to replace the default allocation kvn@2043: // type for embedded objects STACK_OR_EMBEDDED with C_HEAP. It will kvn@2043: // allow to pass the assert in GenericGrowableArray() which checks kvn@2043: // that a growable array object must be on C heap if elements are. kvn@2043: // kvn@2043: // Note: containing object is allocated on C heap since it is CHeapObj. kvn@2043: // kvn@2043: _regions((ResourceObj::set_allocation_type((address)&_regions, kvn@2043: ResourceObj::C_HEAP), iveresov@828: (int)max_size), ysr@777: true), ysr@777: _next_rr_candidate(0), ysr@777: _seq_bottom(NULL) ysr@777: {} ysr@777: ysr@777: // Private methods. ysr@777: ysr@777: HeapWord* ysr@777: HeapRegionSeq::alloc_obj_from_region_index(int ind, size_t word_size) { ysr@777: assert(G1CollectedHeap::isHumongous(word_size), ysr@777: "Allocation size should be humongous"); ysr@777: int cur = ind; ysr@777: int first = cur; ysr@777: size_t sumSizes = 0; ysr@777: while (cur < _regions.length() && sumSizes < word_size) { ysr@777: // Loop invariant: ysr@777: // For all i in [first, cur): ysr@777: // _regions.at(i)->is_empty() ysr@777: // && _regions.at(i) is contiguous with its predecessor, if any ysr@777: // && sumSizes is the sum of the sizes of the regions in the interval ysr@777: // [first, cur) ysr@777: HeapRegion* curhr = _regions.at(cur); ysr@777: if (curhr->is_empty() ysr@777: && (first == cur ysr@777: || (_regions.at(cur-1)->end() == ysr@777: curhr->bottom()))) { ysr@777: sumSizes += curhr->capacity() / HeapWordSize; ysr@777: } else { ysr@777: first = cur + 1; ysr@777: sumSizes = 0; ysr@777: } ysr@777: cur++; ysr@777: } ysr@777: if (sumSizes >= word_size) { ysr@777: _alloc_search_start = cur; tonyp@2241: tonyp@2241: // We need to initialize the region(s) we just discovered. This is tonyp@2241: // a bit tricky given that it can happen concurrently with tonyp@2241: // refinement threads refining cards on these regions and tonyp@2241: // potentially wanting to refine the BOT as they are scanning tonyp@2241: // those cards (this can happen shortly after a cleanup; see CR tonyp@2241: // 6991377). So we have to set up the region(s) carefully and in tonyp@2241: // a specific order. tonyp@2241: tonyp@2241: // Currently, allocs_are_zero_filled() returns false. The zero tonyp@2241: // filling infrastructure will be going away soon (see CR 6977804). tonyp@2241: // So no need to do anything else here. ysr@777: bool zf = G1CollectedHeap::heap()->allocs_are_zero_filled(); tonyp@2241: assert(!zf, "not supported"); tonyp@2241: tonyp@2241: // This will be the "starts humongous" region. ysr@777: HeapRegion* first_hr = _regions.at(first); tonyp@2241: { tonyp@2241: MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); tonyp@2241: first_hr->set_zero_fill_allocated(); tonyp@2241: } tonyp@2241: // The header of the new object will be placed at the bottom of tonyp@2241: // the first region. tonyp@2241: HeapWord* new_obj = first_hr->bottom(); tonyp@2241: // This will be the new end of the first region in the series that tonyp@2241: // should also match the end of the last region in the seriers. tonyp@2241: // (Note: sumSizes = "region size" x "number of regions we found"). tonyp@2241: HeapWord* new_end = new_obj + sumSizes; tonyp@2241: // This will be the new top of the first region that will reflect tonyp@2241: // this allocation. tonyp@2241: HeapWord* new_top = new_obj + word_size; tonyp@2241: tonyp@2241: // First, we need to zero the header of the space that we will be tonyp@2241: // allocating. When we update top further down, some refinement tonyp@2241: // threads might try to scan the region. By zeroing the header we tonyp@2241: // ensure that any thread that will try to scan the region will tonyp@2241: // come across the zero klass word and bail out. tonyp@2241: // tonyp@2241: // NOTE: It would not have been correct to have used tonyp@2241: // CollectedHeap::fill_with_object() and make the space look like tonyp@2241: // an int array. The thread that is doing the allocation will tonyp@2241: // later update the object header to a potentially different array tonyp@2241: // type and, for a very short period of time, the klass and length tonyp@2241: // fields will be inconsistent. This could cause a refinement tonyp@2241: // thread to calculate the object size incorrectly. tonyp@2241: Copy::fill_to_words(new_obj, oopDesc::header_size(), 0); tonyp@2241: tonyp@2241: // We will set up the first region as "starts humongous". This tonyp@2241: // will also update the BOT covering all the regions to reflect tonyp@2241: // that there is a single object that starts at the bottom of the tonyp@2241: // first region. tonyp@2241: first_hr->set_startsHumongous(new_end); tonyp@2241: tonyp@2241: // Then, if there are any, we will set up the "continues tonyp@2241: // humongous" regions. tonyp@2241: HeapRegion* hr = NULL; tonyp@2241: for (int i = first + 1; i < cur; ++i) { tonyp@2241: hr = _regions.at(i); ysr@777: { ysr@777: MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); ysr@777: hr->set_zero_fill_allocated(); ysr@777: } tonyp@2241: hr->set_continuesHumongous(first_hr); tonyp@2241: } tonyp@2241: // If we have "continues humongous" regions (hr != NULL), then the tonyp@2241: // end of the last one should match new_end. tonyp@2241: assert(hr == NULL || hr->end() == new_end, "sanity"); tonyp@2241: tonyp@2241: // Up to this point no concurrent thread would have been able to tonyp@2241: // do any scanning on any region in this series. All the top tonyp@2241: // fields still point to bottom, so the intersection between tonyp@2241: // [bottom,top] and [card_start,card_end] will be empty. Before we tonyp@2241: // update the top fields, we'll do a storestore to make sure that tonyp@2241: // no thread sees the update to top before the zeroing of the tonyp@2241: // object header and the BOT initialization. tonyp@2241: OrderAccess::storestore(); tonyp@2241: tonyp@2241: // Now that the BOT and the object header have been initialized, tonyp@2241: // we can update top of the "starts humongous" region. tonyp@2241: assert(first_hr->bottom() < new_top && new_top <= first_hr->end(), tonyp@2241: "new_top should be in this region"); tonyp@2241: first_hr->set_top(new_top); tonyp@2241: tonyp@2241: // Now, we will update the top fields of the "continues humongous" tonyp@2241: // regions. The reason we need to do this is that, otherwise, tonyp@2241: // these regions would look empty and this will confuse parts of tonyp@2241: // G1. For example, the code that looks for a consecutive number tonyp@2241: // of empty regions will consider them empty and try to tonyp@2241: // re-allocate them. We can extend is_empty() to also include tonyp@2241: // !continuesHumongous(), but it is easier to just update the top tonyp@2241: // fields here. tonyp@2241: hr = NULL; tonyp@2241: for (int i = first + 1; i < cur; ++i) { tonyp@2241: hr = _regions.at(i); tonyp@2241: if ((i + 1) == cur) { tonyp@2241: // last continues humongous region tonyp@2241: assert(hr->bottom() < new_top && new_top <= hr->end(), tonyp@2241: "new_top should fall on this region"); tonyp@2241: hr->set_top(new_top); ysr@777: } else { tonyp@2241: // not last one tonyp@2241: assert(new_top > hr->end(), "new_top should be above this region"); tonyp@2241: hr->set_top(hr->end()); ysr@777: } ysr@777: } tonyp@2241: // If we have continues humongous regions (hr != NULL), then the tonyp@2241: // end of the last one should match new_end and its top should tonyp@2241: // match new_top. tonyp@2241: assert(hr == NULL || tonyp@2241: (hr->end() == new_end && hr->top() == new_top), "sanity"); tonyp@2241: tonyp@2241: return new_obj; ysr@777: } else { ysr@777: // If we started from the beginning, we want to know why we can't alloc. ysr@777: return NULL; ysr@777: } ysr@777: } ysr@777: apetrusenko@1112: void HeapRegionSeq::print_empty_runs() { ysr@777: int empty_run = 0; ysr@777: int n_empty = 0; ysr@777: int empty_run_start; ysr@777: for (int i = 0; i < _regions.length(); i++) { ysr@777: HeapRegion* r = _regions.at(i); ysr@777: if (r->continuesHumongous()) continue; apetrusenko@1112: if (r->is_empty()) { ysr@777: assert(!r->isHumongous(), "H regions should not be empty."); ysr@777: if (empty_run == 0) empty_run_start = i; ysr@777: empty_run++; ysr@777: n_empty++; ysr@777: } else { ysr@777: if (empty_run > 0) { ysr@777: gclog_or_tty->print(" %d:%d", empty_run_start, empty_run); ysr@777: empty_run = 0; ysr@777: } ysr@777: } ysr@777: } ysr@777: if (empty_run > 0) { ysr@777: gclog_or_tty->print(" %d:%d", empty_run_start, empty_run); ysr@777: } ysr@777: gclog_or_tty->print_cr(" [tot = %d]", n_empty); ysr@777: } ysr@777: ysr@777: int HeapRegionSeq::find(HeapRegion* hr) { ysr@777: // FIXME: optimized for adjacent regions of fixed size. ysr@777: int ind = hr->hrs_index(); ysr@777: if (ind != -1) { ysr@777: assert(_regions.at(ind) == hr, "Mismatch"); ysr@777: } ysr@777: return ind; ysr@777: } ysr@777: ysr@777: ysr@777: // Public methods. ysr@777: ysr@777: void HeapRegionSeq::insert(HeapRegion* hr) { iveresov@828: assert(!_regions.is_full(), "Too many elements in HeapRegionSeq"); ysr@777: if (_regions.length() == 0 ysr@777: || _regions.top()->end() <= hr->bottom()) { ysr@777: hr->set_hrs_index(_regions.length()); ysr@777: _regions.append(hr); ysr@777: } else { ysr@777: _regions.append(hr); ysr@777: _regions.sort(orderRegions); ysr@777: for (int i = 0; i < _regions.length(); i++) { ysr@777: _regions.at(i)->set_hrs_index(i); ysr@777: } ysr@777: } ysr@777: char* bot = (char*)_regions.at(0)->bottom(); ysr@777: if (_seq_bottom == NULL || bot < _seq_bottom) _seq_bottom = bot; ysr@777: } ysr@777: ysr@777: size_t HeapRegionSeq::length() { ysr@777: return _regions.length(); ysr@777: } ysr@777: ysr@777: size_t HeapRegionSeq::free_suffix() { ysr@777: size_t res = 0; ysr@777: int first = _regions.length() - 1; ysr@777: int cur = first; ysr@777: while (cur >= 0 && ysr@777: (_regions.at(cur)->is_empty() ysr@777: && (first == cur ysr@777: || (_regions.at(cur+1)->bottom() == ysr@777: _regions.at(cur)->end())))) { ysr@777: res++; ysr@777: cur--; ysr@777: } ysr@777: return res; ysr@777: } ysr@777: ysr@777: HeapWord* HeapRegionSeq::obj_allocate(size_t word_size) { ysr@777: int cur = _alloc_search_start; ysr@777: // Make sure "cur" is a valid index. ysr@777: assert(cur >= 0, "Invariant."); ysr@777: HeapWord* res = alloc_obj_from_region_index(cur, word_size); ysr@777: if (res == NULL) ysr@777: res = alloc_obj_from_region_index(0, word_size); ysr@777: return res; ysr@777: } ysr@777: ysr@777: void HeapRegionSeq::iterate(HeapRegionClosure* blk) { ysr@777: iterate_from((HeapRegion*)NULL, blk); ysr@777: } ysr@777: ysr@777: // The first argument r is the heap region at which iteration begins. ysr@777: // This operation runs fastest when r is NULL, or the heap region for ysr@777: // which a HeapRegionClosure most recently returned true, or the ysr@777: // heap region immediately to its right in the sequence. In all ysr@777: // other cases a linear search is required to find the index of r. ysr@777: ysr@777: void HeapRegionSeq::iterate_from(HeapRegion* r, HeapRegionClosure* blk) { ysr@777: ysr@777: // :::: FIXME :::: ysr@777: // Static cache value is bad, especially when we start doing parallel ysr@777: // remembered set update. For now just don't cache anything (the ysr@777: // code in the def'd out blocks). ysr@777: ysr@777: #if 0 ysr@777: static int cached_j = 0; ysr@777: #endif ysr@777: int len = _regions.length(); ysr@777: int j = 0; ysr@777: // Find the index of r. ysr@777: if (r != NULL) { ysr@777: #if 0 ysr@777: assert(cached_j >= 0, "Invariant."); ysr@777: if ((cached_j < len) && (r == _regions.at(cached_j))) { ysr@777: j = cached_j; ysr@777: } else if ((cached_j + 1 < len) && (r == _regions.at(cached_j + 1))) { ysr@777: j = cached_j + 1; ysr@777: } else { ysr@777: j = find(r); ysr@777: #endif ysr@777: if (j < 0) { ysr@777: j = 0; ysr@777: } ysr@777: #if 0 ysr@777: } ysr@777: #endif ysr@777: } ysr@777: int i; ysr@777: for (i = j; i < len; i += 1) { ysr@777: int res = blk->doHeapRegion(_regions.at(i)); ysr@777: if (res) { ysr@777: #if 0 ysr@777: cached_j = i; ysr@777: #endif ysr@777: blk->incomplete(); ysr@777: return; ysr@777: } ysr@777: } ysr@777: for (i = 0; i < j; i += 1) { ysr@777: int res = blk->doHeapRegion(_regions.at(i)); ysr@777: if (res) { ysr@777: #if 0 ysr@777: cached_j = i; ysr@777: #endif ysr@777: blk->incomplete(); ysr@777: return; ysr@777: } ysr@777: } ysr@777: } ysr@777: ysr@777: void HeapRegionSeq::iterate_from(int idx, HeapRegionClosure* blk) { ysr@777: int len = _regions.length(); ysr@777: int i; ysr@777: for (i = idx; i < len; i++) { ysr@777: if (blk->doHeapRegion(_regions.at(i))) { ysr@777: blk->incomplete(); ysr@777: return; ysr@777: } ysr@777: } ysr@777: for (i = 0; i < idx; i++) { ysr@777: if (blk->doHeapRegion(_regions.at(i))) { ysr@777: blk->incomplete(); ysr@777: return; ysr@777: } ysr@777: } ysr@777: } ysr@777: ysr@777: MemRegion HeapRegionSeq::shrink_by(size_t shrink_bytes, ysr@777: size_t& num_regions_deleted) { ysr@777: assert(shrink_bytes % os::vm_page_size() == 0, "unaligned"); ysr@777: assert(shrink_bytes % HeapRegion::GrainBytes == 0, "unaligned"); ysr@777: ysr@777: if (_regions.length() == 0) { ysr@777: num_regions_deleted = 0; ysr@777: return MemRegion(); ysr@777: } ysr@777: int j = _regions.length() - 1; ysr@777: HeapWord* end = _regions.at(j)->end(); ysr@777: HeapWord* last_start = end; ysr@777: while (j >= 0 && shrink_bytes > 0) { ysr@777: HeapRegion* cur = _regions.at(j); ysr@777: // We have to leave humongous regions where they are, ysr@777: // and work around them. ysr@777: if (cur->isHumongous()) { ysr@777: return MemRegion(last_start, end); ysr@777: } ysr@777: assert(cur == _regions.top(), "Should be top"); ysr@777: if (!cur->is_empty()) break; ysr@1395: cur->reset_zero_fill(); ysr@777: shrink_bytes -= cur->capacity(); ysr@777: num_regions_deleted++; ysr@777: _regions.pop(); ysr@777: last_start = cur->bottom(); ysr@777: // We need to delete these somehow, but can't currently do so here: if ysr@777: // we do, the ZF thread may still access the deleted region. We'll ysr@777: // leave this here as a reminder that we have to do something about ysr@777: // this. ysr@777: // delete cur; ysr@777: j--; ysr@777: } ysr@777: return MemRegion(last_start, end); ysr@777: } ysr@777: ysr@777: ysr@777: class PrintHeapRegionClosure : public HeapRegionClosure { ysr@777: public: ysr@777: bool doHeapRegion(HeapRegion* r) { ysr@777: gclog_or_tty->print(PTR_FORMAT ":", r); ysr@777: r->print(); ysr@777: return false; ysr@777: } ysr@777: }; ysr@777: ysr@777: void HeapRegionSeq::print() { ysr@777: PrintHeapRegionClosure cl; ysr@777: iterate(&cl); ysr@777: }