src/share/vm/gc_implementation/g1/heapRegionSeq.cpp

Wed, 19 Jan 2011 19:30:42 -0500

author
tonyp
date
Wed, 19 Jan 2011 19:30:42 -0500
changeset 2472
0fa27f37d4d4
parent 2453
2250ee17e258
child 2492
a672e43650cc
permissions
-rw-r--r--

6977804: G1: remove the zero-filling thread
Summary: This changeset removes the zero-filling thread from G1 and collapses the two free region lists we had before (the "free" and "unclean" lists) into one. The new free list uses the new heap region sets / lists abstractions that we'll ultimately use it to keep track of all regions in the heap. A heap region set was also introduced for the humongous regions. Finally, this change increases the concurrency between the thread that completes freeing regions (after a cleanup pause) and the rest of the system (before we'd have to wait for said thread to complete before allocating a new region). The changest also includes a lot of refactoring and code simplification.
Reviewed-by: jcoomes, johnc

ysr@777 1 /*
tonyp@2453 2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
stefank@2314 27 #include "gc_implementation/g1/heapRegionSeq.hpp"
stefank@2314 28 #include "memory/allocation.hpp"
ysr@777 29
ysr@777 30 // Local to this file.
ysr@777 31
ysr@777 32 static int orderRegions(HeapRegion** hr1p, HeapRegion** hr2p) {
ysr@777 33 if ((*hr1p)->end() <= (*hr2p)->bottom()) return -1;
ysr@777 34 else if ((*hr2p)->end() <= (*hr1p)->bottom()) return 1;
ysr@777 35 else if (*hr1p == *hr2p) return 0;
ysr@777 36 else {
ysr@777 37 assert(false, "We should never compare distinct overlapping regions.");
ysr@777 38 }
ysr@777 39 return 0;
ysr@777 40 }
ysr@777 41
iveresov@828 42 HeapRegionSeq::HeapRegionSeq(const size_t max_size) :
ysr@777 43 _alloc_search_start(0),
ysr@777 44 // The line below is the worst bit of C++ hackery I've ever written
ysr@777 45 // (Detlefs, 11/23). You should think of it as equivalent to
ysr@777 46 // "_regions(100, true)": initialize the growable array and inform it
kvn@2043 47 // that it should allocate its elem array(s) on the C heap.
kvn@2043 48 //
kvn@2043 49 // The first argument, however, is actually a comma expression
kvn@2043 50 // (set_allocation_type(this, C_HEAP), 100). The purpose of the
kvn@2043 51 // set_allocation_type() call is to replace the default allocation
kvn@2043 52 // type for embedded objects STACK_OR_EMBEDDED with C_HEAP. It will
kvn@2043 53 // allow to pass the assert in GenericGrowableArray() which checks
kvn@2043 54 // that a growable array object must be on C heap if elements are.
kvn@2043 55 //
kvn@2043 56 // Note: containing object is allocated on C heap since it is CHeapObj.
kvn@2043 57 //
kvn@2043 58 _regions((ResourceObj::set_allocation_type((address)&_regions,
kvn@2043 59 ResourceObj::C_HEAP),
iveresov@828 60 (int)max_size),
ysr@777 61 true),
ysr@777 62 _next_rr_candidate(0),
ysr@777 63 _seq_bottom(NULL)
ysr@777 64 {}
ysr@777 65
ysr@777 66 // Private methods.
ysr@777 67
apetrusenko@1112 68 void HeapRegionSeq::print_empty_runs() {
ysr@777 69 int empty_run = 0;
ysr@777 70 int n_empty = 0;
ysr@777 71 int empty_run_start;
ysr@777 72 for (int i = 0; i < _regions.length(); i++) {
ysr@777 73 HeapRegion* r = _regions.at(i);
ysr@777 74 if (r->continuesHumongous()) continue;
apetrusenko@1112 75 if (r->is_empty()) {
ysr@777 76 assert(!r->isHumongous(), "H regions should not be empty.");
ysr@777 77 if (empty_run == 0) empty_run_start = i;
ysr@777 78 empty_run++;
ysr@777 79 n_empty++;
ysr@777 80 } else {
ysr@777 81 if (empty_run > 0) {
ysr@777 82 gclog_or_tty->print(" %d:%d", empty_run_start, empty_run);
ysr@777 83 empty_run = 0;
ysr@777 84 }
ysr@777 85 }
ysr@777 86 }
ysr@777 87 if (empty_run > 0) {
ysr@777 88 gclog_or_tty->print(" %d:%d", empty_run_start, empty_run);
ysr@777 89 }
ysr@777 90 gclog_or_tty->print_cr(" [tot = %d]", n_empty);
ysr@777 91 }
ysr@777 92
ysr@777 93 int HeapRegionSeq::find(HeapRegion* hr) {
ysr@777 94 // FIXME: optimized for adjacent regions of fixed size.
ysr@777 95 int ind = hr->hrs_index();
ysr@777 96 if (ind != -1) {
ysr@777 97 assert(_regions.at(ind) == hr, "Mismatch");
ysr@777 98 }
ysr@777 99 return ind;
ysr@777 100 }
ysr@777 101
ysr@777 102
ysr@777 103 // Public methods.
ysr@777 104
ysr@777 105 void HeapRegionSeq::insert(HeapRegion* hr) {
iveresov@828 106 assert(!_regions.is_full(), "Too many elements in HeapRegionSeq");
ysr@777 107 if (_regions.length() == 0
ysr@777 108 || _regions.top()->end() <= hr->bottom()) {
ysr@777 109 hr->set_hrs_index(_regions.length());
ysr@777 110 _regions.append(hr);
ysr@777 111 } else {
ysr@777 112 _regions.append(hr);
ysr@777 113 _regions.sort(orderRegions);
ysr@777 114 for (int i = 0; i < _regions.length(); i++) {
ysr@777 115 _regions.at(i)->set_hrs_index(i);
ysr@777 116 }
ysr@777 117 }
ysr@777 118 char* bot = (char*)_regions.at(0)->bottom();
ysr@777 119 if (_seq_bottom == NULL || bot < _seq_bottom) _seq_bottom = bot;
ysr@777 120 }
ysr@777 121
ysr@777 122 size_t HeapRegionSeq::length() {
ysr@777 123 return _regions.length();
ysr@777 124 }
ysr@777 125
ysr@777 126 size_t HeapRegionSeq::free_suffix() {
ysr@777 127 size_t res = 0;
ysr@777 128 int first = _regions.length() - 1;
ysr@777 129 int cur = first;
ysr@777 130 while (cur >= 0 &&
ysr@777 131 (_regions.at(cur)->is_empty()
ysr@777 132 && (first == cur
ysr@777 133 || (_regions.at(cur+1)->bottom() ==
ysr@777 134 _regions.at(cur)->end())))) {
ysr@777 135 res++;
ysr@777 136 cur--;
ysr@777 137 }
ysr@777 138 return res;
ysr@777 139 }
ysr@777 140
tonyp@2472 141 int HeapRegionSeq::find_contiguous_from(int from, size_t num) {
tonyp@2472 142 assert(num > 1, "pre-condition");
tonyp@2472 143 assert(0 <= from && from <= _regions.length(),
tonyp@2472 144 err_msg("from: %d should be valid and <= than %d",
tonyp@2472 145 from, _regions.length()));
tonyp@2472 146
tonyp@2472 147 int curr = from;
tonyp@2472 148 int first = -1;
tonyp@2472 149 size_t num_so_far = 0;
tonyp@2472 150 while (curr < _regions.length() && num_so_far < num) {
tonyp@2472 151 HeapRegion* curr_hr = _regions.at(curr);
tonyp@2472 152 if (curr_hr->is_empty()) {
tonyp@2472 153 if (first == -1) {
tonyp@2472 154 first = curr;
tonyp@2472 155 num_so_far = 1;
tonyp@2472 156 } else {
tonyp@2472 157 num_so_far += 1;
tonyp@2472 158 }
tonyp@2472 159 } else {
tonyp@2472 160 first = -1;
tonyp@2472 161 num_so_far = 0;
tonyp@2472 162 }
tonyp@2472 163 curr += 1;
tonyp@2472 164 }
tonyp@2472 165
tonyp@2472 166 assert(num_so_far <= num, "post-condition");
tonyp@2472 167 if (num_so_far == num) {
tonyp@2472 168 // we find enough space for the humongous object
tonyp@2472 169 assert(from <= first && first < _regions.length(), "post-condition");
tonyp@2472 170 assert(first < curr && (curr - first) == (int) num, "post-condition");
tonyp@2472 171 for (int i = first; i < first + (int) num; ++i) {
tonyp@2472 172 assert(_regions.at(i)->is_empty(), "post-condition");
tonyp@2472 173 }
tonyp@2472 174 return first;
tonyp@2472 175 } else {
tonyp@2472 176 // we failed to find enough space for the humongous object
tonyp@2472 177 return -1;
tonyp@2472 178 }
tonyp@2472 179 }
tonyp@2472 180
tonyp@2472 181 int HeapRegionSeq::find_contiguous(size_t num) {
tonyp@2472 182 assert(num > 1, "otherwise we should not be calling this");
tonyp@2472 183 assert(0 <= _alloc_search_start && _alloc_search_start <= _regions.length(),
tonyp@2472 184 err_msg("_alloc_search_start: %d should be valid and <= than %d",
tonyp@2472 185 _alloc_search_start, _regions.length()));
tonyp@2472 186
tonyp@2472 187 int start = _alloc_search_start;
tonyp@2472 188 int res = find_contiguous_from(start, num);
tonyp@2472 189 if (res == -1 && start != 0) {
tonyp@2472 190 // Try starting from the beginning. If _alloc_search_start was 0,
tonyp@2472 191 // no point in doing this again.
tonyp@2472 192 res = find_contiguous_from(0, num);
tonyp@2472 193 }
tonyp@2472 194 if (res != -1) {
tonyp@2472 195 assert(0 <= res && res < _regions.length(),
tonyp@2472 196 err_msg("res: %d should be valid", res));
tonyp@2472 197 _alloc_search_start = res + (int) num;
tonyp@2472 198 }
tonyp@2472 199 assert(0 < _alloc_search_start && _alloc_search_start <= _regions.length(),
tonyp@2472 200 err_msg("_alloc_search_start: %d should be valid",
tonyp@2472 201 _alloc_search_start));
ysr@777 202 return res;
ysr@777 203 }
ysr@777 204
ysr@777 205 void HeapRegionSeq::iterate(HeapRegionClosure* blk) {
ysr@777 206 iterate_from((HeapRegion*)NULL, blk);
ysr@777 207 }
ysr@777 208
ysr@777 209 // The first argument r is the heap region at which iteration begins.
ysr@777 210 // This operation runs fastest when r is NULL, or the heap region for
ysr@777 211 // which a HeapRegionClosure most recently returned true, or the
ysr@777 212 // heap region immediately to its right in the sequence. In all
ysr@777 213 // other cases a linear search is required to find the index of r.
ysr@777 214
ysr@777 215 void HeapRegionSeq::iterate_from(HeapRegion* r, HeapRegionClosure* blk) {
ysr@777 216
ysr@777 217 // :::: FIXME ::::
ysr@777 218 // Static cache value is bad, especially when we start doing parallel
ysr@777 219 // remembered set update. For now just don't cache anything (the
ysr@777 220 // code in the def'd out blocks).
ysr@777 221
ysr@777 222 #if 0
ysr@777 223 static int cached_j = 0;
ysr@777 224 #endif
ysr@777 225 int len = _regions.length();
ysr@777 226 int j = 0;
ysr@777 227 // Find the index of r.
ysr@777 228 if (r != NULL) {
ysr@777 229 #if 0
ysr@777 230 assert(cached_j >= 0, "Invariant.");
ysr@777 231 if ((cached_j < len) && (r == _regions.at(cached_j))) {
ysr@777 232 j = cached_j;
ysr@777 233 } else if ((cached_j + 1 < len) && (r == _regions.at(cached_j + 1))) {
ysr@777 234 j = cached_j + 1;
ysr@777 235 } else {
ysr@777 236 j = find(r);
ysr@777 237 #endif
ysr@777 238 if (j < 0) {
ysr@777 239 j = 0;
ysr@777 240 }
ysr@777 241 #if 0
ysr@777 242 }
ysr@777 243 #endif
ysr@777 244 }
ysr@777 245 int i;
ysr@777 246 for (i = j; i < len; i += 1) {
ysr@777 247 int res = blk->doHeapRegion(_regions.at(i));
ysr@777 248 if (res) {
ysr@777 249 #if 0
ysr@777 250 cached_j = i;
ysr@777 251 #endif
ysr@777 252 blk->incomplete();
ysr@777 253 return;
ysr@777 254 }
ysr@777 255 }
ysr@777 256 for (i = 0; i < j; i += 1) {
ysr@777 257 int res = blk->doHeapRegion(_regions.at(i));
ysr@777 258 if (res) {
ysr@777 259 #if 0
ysr@777 260 cached_j = i;
ysr@777 261 #endif
ysr@777 262 blk->incomplete();
ysr@777 263 return;
ysr@777 264 }
ysr@777 265 }
ysr@777 266 }
ysr@777 267
ysr@777 268 void HeapRegionSeq::iterate_from(int idx, HeapRegionClosure* blk) {
ysr@777 269 int len = _regions.length();
ysr@777 270 int i;
ysr@777 271 for (i = idx; i < len; i++) {
ysr@777 272 if (blk->doHeapRegion(_regions.at(i))) {
ysr@777 273 blk->incomplete();
ysr@777 274 return;
ysr@777 275 }
ysr@777 276 }
ysr@777 277 for (i = 0; i < idx; i++) {
ysr@777 278 if (blk->doHeapRegion(_regions.at(i))) {
ysr@777 279 blk->incomplete();
ysr@777 280 return;
ysr@777 281 }
ysr@777 282 }
ysr@777 283 }
ysr@777 284
ysr@777 285 MemRegion HeapRegionSeq::shrink_by(size_t shrink_bytes,
ysr@777 286 size_t& num_regions_deleted) {
tonyp@2472 287 // Reset this in case it's currently pointing into the regions that
tonyp@2472 288 // we just removed.
tonyp@2472 289 _alloc_search_start = 0;
tonyp@2472 290
ysr@777 291 assert(shrink_bytes % os::vm_page_size() == 0, "unaligned");
ysr@777 292 assert(shrink_bytes % HeapRegion::GrainBytes == 0, "unaligned");
ysr@777 293
ysr@777 294 if (_regions.length() == 0) {
ysr@777 295 num_regions_deleted = 0;
ysr@777 296 return MemRegion();
ysr@777 297 }
ysr@777 298 int j = _regions.length() - 1;
ysr@777 299 HeapWord* end = _regions.at(j)->end();
ysr@777 300 HeapWord* last_start = end;
ysr@777 301 while (j >= 0 && shrink_bytes > 0) {
ysr@777 302 HeapRegion* cur = _regions.at(j);
ysr@777 303 // We have to leave humongous regions where they are,
ysr@777 304 // and work around them.
ysr@777 305 if (cur->isHumongous()) {
ysr@777 306 return MemRegion(last_start, end);
ysr@777 307 }
ysr@777 308 assert(cur == _regions.top(), "Should be top");
ysr@777 309 if (!cur->is_empty()) break;
ysr@777 310 shrink_bytes -= cur->capacity();
ysr@777 311 num_regions_deleted++;
ysr@777 312 _regions.pop();
ysr@777 313 last_start = cur->bottom();
ysr@777 314 // We need to delete these somehow, but can't currently do so here: if
ysr@777 315 // we do, the ZF thread may still access the deleted region. We'll
ysr@777 316 // leave this here as a reminder that we have to do something about
ysr@777 317 // this.
ysr@777 318 // delete cur;
ysr@777 319 j--;
ysr@777 320 }
ysr@777 321 return MemRegion(last_start, end);
ysr@777 322 }
ysr@777 323
ysr@777 324 class PrintHeapRegionClosure : public HeapRegionClosure {
ysr@777 325 public:
ysr@777 326 bool doHeapRegion(HeapRegion* r) {
ysr@777 327 gclog_or_tty->print(PTR_FORMAT ":", r);
ysr@777 328 r->print();
ysr@777 329 return false;
ysr@777 330 }
ysr@777 331 };
ysr@777 332
ysr@777 333 void HeapRegionSeq::print() {
ysr@777 334 PrintHeapRegionClosure cl;
ysr@777 335 iterate(&cl);
ysr@777 336 }

mercurial