src/share/vm/gc_implementation/g1/heapRegionSeq.cpp

Thu, 05 Jun 2008 15:57:56 -0700

author
ysr
date
Thu, 05 Jun 2008 15:57:56 -0700
changeset 777
37f87013dfd8
child 828
078b8a0d8d7c
permissions
-rw-r--r--

6711316: Open source the Garbage-First garbage collector
Summary: First mercurial integration of the code for the Garbage-First garbage collector.
Reviewed-by: apetrusenko, iveresov, jmasa, sgoldman, tonyp, ysr

ysr@777 1 /*
ysr@777 2 * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
ysr@777 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
ysr@777 20 * CA 95054 USA or visit www.sun.com if you need additional information or
ysr@777 21 * have any questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
ysr@777 25 #include "incls/_precompiled.incl"
ysr@777 26 #include "incls/_heapRegionSeq.cpp.incl"
ysr@777 27
ysr@777 28 // Local to this file.
ysr@777 29
ysr@777 30 static int orderRegions(HeapRegion** hr1p, HeapRegion** hr2p) {
ysr@777 31 if ((*hr1p)->end() <= (*hr2p)->bottom()) return -1;
ysr@777 32 else if ((*hr2p)->end() <= (*hr1p)->bottom()) return 1;
ysr@777 33 else if (*hr1p == *hr2p) return 0;
ysr@777 34 else {
ysr@777 35 assert(false, "We should never compare distinct overlapping regions.");
ysr@777 36 }
ysr@777 37 return 0;
ysr@777 38 }
ysr@777 39
ysr@777 40 HeapRegionSeq::HeapRegionSeq() :
ysr@777 41 _alloc_search_start(0),
ysr@777 42 // The line below is the worst bit of C++ hackery I've ever written
ysr@777 43 // (Detlefs, 11/23). You should think of it as equivalent to
ysr@777 44 // "_regions(100, true)": initialize the growable array and inform it
ysr@777 45 // that it should allocate its elem array(s) on the C heap. The first
ysr@777 46 // argument, however, is actually a comma expression (new-expr, 100).
ysr@777 47 // The purpose of the new_expr is to inform the growable array that it
ysr@777 48 // is *already* allocated on the C heap: it uses the placement syntax to
ysr@777 49 // keep it from actually doing any allocation.
ysr@777 50 _regions((ResourceObj::operator new (sizeof(GrowableArray<HeapRegion*>),
ysr@777 51 (void*)&_regions,
ysr@777 52 ResourceObj::C_HEAP),
ysr@777 53 100),
ysr@777 54 true),
ysr@777 55 _next_rr_candidate(0),
ysr@777 56 _seq_bottom(NULL)
ysr@777 57 {}
ysr@777 58
ysr@777 59 // Private methods.
ysr@777 60
ysr@777 61 HeapWord*
ysr@777 62 HeapRegionSeq::alloc_obj_from_region_index(int ind, size_t word_size) {
ysr@777 63 assert(G1CollectedHeap::isHumongous(word_size),
ysr@777 64 "Allocation size should be humongous");
ysr@777 65 int cur = ind;
ysr@777 66 int first = cur;
ysr@777 67 size_t sumSizes = 0;
ysr@777 68 while (cur < _regions.length() && sumSizes < word_size) {
ysr@777 69 // Loop invariant:
ysr@777 70 // For all i in [first, cur):
ysr@777 71 // _regions.at(i)->is_empty()
ysr@777 72 // && _regions.at(i) is contiguous with its predecessor, if any
ysr@777 73 // && sumSizes is the sum of the sizes of the regions in the interval
ysr@777 74 // [first, cur)
ysr@777 75 HeapRegion* curhr = _regions.at(cur);
ysr@777 76 if (curhr->is_empty()
ysr@777 77 && !curhr->is_reserved()
ysr@777 78 && (first == cur
ysr@777 79 || (_regions.at(cur-1)->end() ==
ysr@777 80 curhr->bottom()))) {
ysr@777 81 sumSizes += curhr->capacity() / HeapWordSize;
ysr@777 82 } else {
ysr@777 83 first = cur + 1;
ysr@777 84 sumSizes = 0;
ysr@777 85 }
ysr@777 86 cur++;
ysr@777 87 }
ysr@777 88 if (sumSizes >= word_size) {
ysr@777 89 _alloc_search_start = cur;
ysr@777 90 // Mark the allocated regions as allocated.
ysr@777 91 bool zf = G1CollectedHeap::heap()->allocs_are_zero_filled();
ysr@777 92 HeapRegion* first_hr = _regions.at(first);
ysr@777 93 for (int i = first; i < cur; i++) {
ysr@777 94 HeapRegion* hr = _regions.at(i);
ysr@777 95 if (zf)
ysr@777 96 hr->ensure_zero_filled();
ysr@777 97 {
ysr@777 98 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
ysr@777 99 hr->set_zero_fill_allocated();
ysr@777 100 }
ysr@777 101 size_t sz = hr->capacity() / HeapWordSize;
ysr@777 102 HeapWord* tmp = hr->allocate(sz);
ysr@777 103 assert(tmp != NULL, "Humongous allocation failure");
ysr@777 104 MemRegion mr = MemRegion(tmp, sz);
ysr@777 105 SharedHeap::fill_region_with_object(mr);
ysr@777 106 hr->declare_filled_region_to_BOT(mr);
ysr@777 107 if (i == first) {
ysr@777 108 first_hr->set_startsHumongous();
ysr@777 109 } else {
ysr@777 110 assert(i > first, "sanity");
ysr@777 111 hr->set_continuesHumongous(first_hr);
ysr@777 112 }
ysr@777 113 }
ysr@777 114 HeapWord* first_hr_bot = first_hr->bottom();
ysr@777 115 HeapWord* obj_end = first_hr_bot + word_size;
ysr@777 116 first_hr->set_top(obj_end);
ysr@777 117 return first_hr_bot;
ysr@777 118 } else {
ysr@777 119 // If we started from the beginning, we want to know why we can't alloc.
ysr@777 120 return NULL;
ysr@777 121 }
ysr@777 122 }
ysr@777 123
ysr@777 124 void HeapRegionSeq::print_empty_runs(bool reserved_are_empty) {
ysr@777 125 int empty_run = 0;
ysr@777 126 int n_empty = 0;
ysr@777 127 bool at_least_one_reserved = false;
ysr@777 128 int empty_run_start;
ysr@777 129 for (int i = 0; i < _regions.length(); i++) {
ysr@777 130 HeapRegion* r = _regions.at(i);
ysr@777 131 if (r->continuesHumongous()) continue;
ysr@777 132 if (r->is_empty() && (reserved_are_empty || !r->is_reserved())) {
ysr@777 133 assert(!r->isHumongous(), "H regions should not be empty.");
ysr@777 134 if (empty_run == 0) empty_run_start = i;
ysr@777 135 empty_run++;
ysr@777 136 n_empty++;
ysr@777 137 if (r->is_reserved()) {
ysr@777 138 at_least_one_reserved = true;
ysr@777 139 }
ysr@777 140 } else {
ysr@777 141 if (empty_run > 0) {
ysr@777 142 gclog_or_tty->print(" %d:%d", empty_run_start, empty_run);
ysr@777 143 if (reserved_are_empty && at_least_one_reserved)
ysr@777 144 gclog_or_tty->print("(R)");
ysr@777 145 empty_run = 0;
ysr@777 146 at_least_one_reserved = false;
ysr@777 147 }
ysr@777 148 }
ysr@777 149 }
ysr@777 150 if (empty_run > 0) {
ysr@777 151 gclog_or_tty->print(" %d:%d", empty_run_start, empty_run);
ysr@777 152 if (reserved_are_empty && at_least_one_reserved) gclog_or_tty->print("(R)");
ysr@777 153 }
ysr@777 154 gclog_or_tty->print_cr(" [tot = %d]", n_empty);
ysr@777 155 }
ysr@777 156
ysr@777 157 int HeapRegionSeq::find(HeapRegion* hr) {
ysr@777 158 // FIXME: optimized for adjacent regions of fixed size.
ysr@777 159 int ind = hr->hrs_index();
ysr@777 160 if (ind != -1) {
ysr@777 161 assert(_regions.at(ind) == hr, "Mismatch");
ysr@777 162 }
ysr@777 163 return ind;
ysr@777 164 }
ysr@777 165
ysr@777 166
ysr@777 167 // Public methods.
ysr@777 168
ysr@777 169 void HeapRegionSeq::insert(HeapRegion* hr) {
ysr@777 170 if (_regions.length() == 0
ysr@777 171 || _regions.top()->end() <= hr->bottom()) {
ysr@777 172 hr->set_hrs_index(_regions.length());
ysr@777 173 _regions.append(hr);
ysr@777 174 } else {
ysr@777 175 _regions.append(hr);
ysr@777 176 _regions.sort(orderRegions);
ysr@777 177 for (int i = 0; i < _regions.length(); i++) {
ysr@777 178 _regions.at(i)->set_hrs_index(i);
ysr@777 179 }
ysr@777 180 }
ysr@777 181 char* bot = (char*)_regions.at(0)->bottom();
ysr@777 182 if (_seq_bottom == NULL || bot < _seq_bottom) _seq_bottom = bot;
ysr@777 183 }
ysr@777 184
ysr@777 185 size_t HeapRegionSeq::length() {
ysr@777 186 return _regions.length();
ysr@777 187 }
ysr@777 188
ysr@777 189 size_t HeapRegionSeq::free_suffix() {
ysr@777 190 size_t res = 0;
ysr@777 191 int first = _regions.length() - 1;
ysr@777 192 int cur = first;
ysr@777 193 while (cur >= 0 &&
ysr@777 194 (_regions.at(cur)->is_empty()
ysr@777 195 && !_regions.at(cur)->is_reserved()
ysr@777 196 && (first == cur
ysr@777 197 || (_regions.at(cur+1)->bottom() ==
ysr@777 198 _regions.at(cur)->end())))) {
ysr@777 199 res++;
ysr@777 200 cur--;
ysr@777 201 }
ysr@777 202 return res;
ysr@777 203 }
ysr@777 204
ysr@777 205 HeapWord* HeapRegionSeq::obj_allocate(size_t word_size) {
ysr@777 206 int cur = _alloc_search_start;
ysr@777 207 // Make sure "cur" is a valid index.
ysr@777 208 assert(cur >= 0, "Invariant.");
ysr@777 209 HeapWord* res = alloc_obj_from_region_index(cur, word_size);
ysr@777 210 if (res == NULL)
ysr@777 211 res = alloc_obj_from_region_index(0, word_size);
ysr@777 212 return res;
ysr@777 213 }
ysr@777 214
ysr@777 215 void HeapRegionSeq::iterate(HeapRegionClosure* blk) {
ysr@777 216 iterate_from((HeapRegion*)NULL, blk);
ysr@777 217 }
ysr@777 218
ysr@777 219 // The first argument r is the heap region at which iteration begins.
ysr@777 220 // This operation runs fastest when r is NULL, or the heap region for
ysr@777 221 // which a HeapRegionClosure most recently returned true, or the
ysr@777 222 // heap region immediately to its right in the sequence. In all
ysr@777 223 // other cases a linear search is required to find the index of r.
ysr@777 224
ysr@777 225 void HeapRegionSeq::iterate_from(HeapRegion* r, HeapRegionClosure* blk) {
ysr@777 226
ysr@777 227 // :::: FIXME ::::
ysr@777 228 // Static cache value is bad, especially when we start doing parallel
ysr@777 229 // remembered set update. For now just don't cache anything (the
ysr@777 230 // code in the def'd out blocks).
ysr@777 231
ysr@777 232 #if 0
ysr@777 233 static int cached_j = 0;
ysr@777 234 #endif
ysr@777 235 int len = _regions.length();
ysr@777 236 int j = 0;
ysr@777 237 // Find the index of r.
ysr@777 238 if (r != NULL) {
ysr@777 239 #if 0
ysr@777 240 assert(cached_j >= 0, "Invariant.");
ysr@777 241 if ((cached_j < len) && (r == _regions.at(cached_j))) {
ysr@777 242 j = cached_j;
ysr@777 243 } else if ((cached_j + 1 < len) && (r == _regions.at(cached_j + 1))) {
ysr@777 244 j = cached_j + 1;
ysr@777 245 } else {
ysr@777 246 j = find(r);
ysr@777 247 #endif
ysr@777 248 if (j < 0) {
ysr@777 249 j = 0;
ysr@777 250 }
ysr@777 251 #if 0
ysr@777 252 }
ysr@777 253 #endif
ysr@777 254 }
ysr@777 255 int i;
ysr@777 256 for (i = j; i < len; i += 1) {
ysr@777 257 int res = blk->doHeapRegion(_regions.at(i));
ysr@777 258 if (res) {
ysr@777 259 #if 0
ysr@777 260 cached_j = i;
ysr@777 261 #endif
ysr@777 262 blk->incomplete();
ysr@777 263 return;
ysr@777 264 }
ysr@777 265 }
ysr@777 266 for (i = 0; i < j; i += 1) {
ysr@777 267 int res = blk->doHeapRegion(_regions.at(i));
ysr@777 268 if (res) {
ysr@777 269 #if 0
ysr@777 270 cached_j = i;
ysr@777 271 #endif
ysr@777 272 blk->incomplete();
ysr@777 273 return;
ysr@777 274 }
ysr@777 275 }
ysr@777 276 }
ysr@777 277
ysr@777 278 void HeapRegionSeq::iterate_from(int idx, HeapRegionClosure* blk) {
ysr@777 279 int len = _regions.length();
ysr@777 280 int i;
ysr@777 281 for (i = idx; i < len; i++) {
ysr@777 282 if (blk->doHeapRegion(_regions.at(i))) {
ysr@777 283 blk->incomplete();
ysr@777 284 return;
ysr@777 285 }
ysr@777 286 }
ysr@777 287 for (i = 0; i < idx; i++) {
ysr@777 288 if (blk->doHeapRegion(_regions.at(i))) {
ysr@777 289 blk->incomplete();
ysr@777 290 return;
ysr@777 291 }
ysr@777 292 }
ysr@777 293 }
ysr@777 294
ysr@777 295 MemRegion HeapRegionSeq::shrink_by(size_t shrink_bytes,
ysr@777 296 size_t& num_regions_deleted) {
ysr@777 297 assert(shrink_bytes % os::vm_page_size() == 0, "unaligned");
ysr@777 298 assert(shrink_bytes % HeapRegion::GrainBytes == 0, "unaligned");
ysr@777 299
ysr@777 300 if (_regions.length() == 0) {
ysr@777 301 num_regions_deleted = 0;
ysr@777 302 return MemRegion();
ysr@777 303 }
ysr@777 304 int j = _regions.length() - 1;
ysr@777 305 HeapWord* end = _regions.at(j)->end();
ysr@777 306 HeapWord* last_start = end;
ysr@777 307 while (j >= 0 && shrink_bytes > 0) {
ysr@777 308 HeapRegion* cur = _regions.at(j);
ysr@777 309 // We have to leave humongous regions where they are,
ysr@777 310 // and work around them.
ysr@777 311 if (cur->isHumongous()) {
ysr@777 312 return MemRegion(last_start, end);
ysr@777 313 }
ysr@777 314 cur->reset_zero_fill();
ysr@777 315 assert(cur == _regions.top(), "Should be top");
ysr@777 316 if (!cur->is_empty()) break;
ysr@777 317 shrink_bytes -= cur->capacity();
ysr@777 318 num_regions_deleted++;
ysr@777 319 _regions.pop();
ysr@777 320 last_start = cur->bottom();
ysr@777 321 // We need to delete these somehow, but can't currently do so here: if
ysr@777 322 // we do, the ZF thread may still access the deleted region. We'll
ysr@777 323 // leave this here as a reminder that we have to do something about
ysr@777 324 // this.
ysr@777 325 // delete cur;
ysr@777 326 j--;
ysr@777 327 }
ysr@777 328 return MemRegion(last_start, end);
ysr@777 329 }
ysr@777 330
ysr@777 331
ysr@777 332 class PrintHeapRegionClosure : public HeapRegionClosure {
ysr@777 333 public:
ysr@777 334 bool doHeapRegion(HeapRegion* r) {
ysr@777 335 gclog_or_tty->print(PTR_FORMAT ":", r);
ysr@777 336 r->print();
ysr@777 337 return false;
ysr@777 338 }
ysr@777 339 };
ysr@777 340
ysr@777 341 void HeapRegionSeq::print() {
ysr@777 342 PrintHeapRegionClosure cl;
ysr@777 343 iterate(&cl);
ysr@777 344 }

mercurial