src/share/vm/gc_implementation/g1/heapRegionSeq.cpp

Mon, 02 Aug 2010 12:51:43 -0700

author
johnc
date
Mon, 02 Aug 2010 12:51:43 -0700
changeset 2060
2d160770d2e5
parent 1907
c18cbe5936b8
child 2043
2dfd013a7465
permissions
-rw-r--r--

6814437: G1: remove the _new_refs array
Summary: The per-worker _new_refs array is used to hold references that point into the collection set. It is populated during RSet updating and subsequently processed. In the event of an evacuation failure it processed again to recreate the RSets of regions in the collection set. Remove the per-worker _new_refs array by processing the references directly. Use a DirtyCardQueue to hold the cards containing the references so that the RSets of regions in the collection set can be recreated when handling an evacuation failure.
Reviewed-by: iveresov, jmasa, tonyp

ysr@777 1 /*
trims@1907 2 * Copyright (c) 2001, 2009, Oracle and/or its affiliates. All rights reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
ysr@777 25 #include "incls/_precompiled.incl"
ysr@777 26 #include "incls/_heapRegionSeq.cpp.incl"
ysr@777 27
ysr@777 28 // Local to this file.
ysr@777 29
ysr@777 30 static int orderRegions(HeapRegion** hr1p, HeapRegion** hr2p) {
ysr@777 31 if ((*hr1p)->end() <= (*hr2p)->bottom()) return -1;
ysr@777 32 else if ((*hr2p)->end() <= (*hr1p)->bottom()) return 1;
ysr@777 33 else if (*hr1p == *hr2p) return 0;
ysr@777 34 else {
ysr@777 35 assert(false, "We should never compare distinct overlapping regions.");
ysr@777 36 }
ysr@777 37 return 0;
ysr@777 38 }
ysr@777 39
iveresov@828 40 HeapRegionSeq::HeapRegionSeq(const size_t max_size) :
ysr@777 41 _alloc_search_start(0),
ysr@777 42 // The line below is the worst bit of C++ hackery I've ever written
ysr@777 43 // (Detlefs, 11/23). You should think of it as equivalent to
ysr@777 44 // "_regions(100, true)": initialize the growable array and inform it
ysr@777 45 // that it should allocate its elem array(s) on the C heap. The first
ysr@777 46 // argument, however, is actually a comma expression (new-expr, 100).
ysr@777 47 // The purpose of the new_expr is to inform the growable array that it
ysr@777 48 // is *already* allocated on the C heap: it uses the placement syntax to
ysr@777 49 // keep it from actually doing any allocation.
ysr@777 50 _regions((ResourceObj::operator new (sizeof(GrowableArray<HeapRegion*>),
ysr@777 51 (void*)&_regions,
ysr@777 52 ResourceObj::C_HEAP),
iveresov@828 53 (int)max_size),
ysr@777 54 true),
ysr@777 55 _next_rr_candidate(0),
ysr@777 56 _seq_bottom(NULL)
ysr@777 57 {}
ysr@777 58
ysr@777 59 // Private methods.
ysr@777 60
ysr@777 61 HeapWord*
ysr@777 62 HeapRegionSeq::alloc_obj_from_region_index(int ind, size_t word_size) {
ysr@777 63 assert(G1CollectedHeap::isHumongous(word_size),
ysr@777 64 "Allocation size should be humongous");
ysr@777 65 int cur = ind;
ysr@777 66 int first = cur;
ysr@777 67 size_t sumSizes = 0;
ysr@777 68 while (cur < _regions.length() && sumSizes < word_size) {
ysr@777 69 // Loop invariant:
ysr@777 70 // For all i in [first, cur):
ysr@777 71 // _regions.at(i)->is_empty()
ysr@777 72 // && _regions.at(i) is contiguous with its predecessor, if any
ysr@777 73 // && sumSizes is the sum of the sizes of the regions in the interval
ysr@777 74 // [first, cur)
ysr@777 75 HeapRegion* curhr = _regions.at(cur);
ysr@777 76 if (curhr->is_empty()
ysr@777 77 && (first == cur
ysr@777 78 || (_regions.at(cur-1)->end() ==
ysr@777 79 curhr->bottom()))) {
ysr@777 80 sumSizes += curhr->capacity() / HeapWordSize;
ysr@777 81 } else {
ysr@777 82 first = cur + 1;
ysr@777 83 sumSizes = 0;
ysr@777 84 }
ysr@777 85 cur++;
ysr@777 86 }
ysr@777 87 if (sumSizes >= word_size) {
ysr@777 88 _alloc_search_start = cur;
ysr@777 89 // Mark the allocated regions as allocated.
ysr@777 90 bool zf = G1CollectedHeap::heap()->allocs_are_zero_filled();
ysr@777 91 HeapRegion* first_hr = _regions.at(first);
ysr@777 92 for (int i = first; i < cur; i++) {
ysr@777 93 HeapRegion* hr = _regions.at(i);
ysr@777 94 if (zf)
ysr@777 95 hr->ensure_zero_filled();
ysr@777 96 {
ysr@777 97 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
ysr@777 98 hr->set_zero_fill_allocated();
ysr@777 99 }
ysr@777 100 size_t sz = hr->capacity() / HeapWordSize;
ysr@777 101 HeapWord* tmp = hr->allocate(sz);
ysr@777 102 assert(tmp != NULL, "Humongous allocation failure");
ysr@777 103 MemRegion mr = MemRegion(tmp, sz);
jcoomes@916 104 CollectedHeap::fill_with_object(mr);
ysr@777 105 hr->declare_filled_region_to_BOT(mr);
ysr@777 106 if (i == first) {
ysr@777 107 first_hr->set_startsHumongous();
ysr@777 108 } else {
ysr@777 109 assert(i > first, "sanity");
ysr@777 110 hr->set_continuesHumongous(first_hr);
ysr@777 111 }
ysr@777 112 }
ysr@777 113 HeapWord* first_hr_bot = first_hr->bottom();
ysr@777 114 HeapWord* obj_end = first_hr_bot + word_size;
ysr@777 115 first_hr->set_top(obj_end);
ysr@777 116 return first_hr_bot;
ysr@777 117 } else {
ysr@777 118 // If we started from the beginning, we want to know why we can't alloc.
ysr@777 119 return NULL;
ysr@777 120 }
ysr@777 121 }
ysr@777 122
apetrusenko@1112 123 void HeapRegionSeq::print_empty_runs() {
ysr@777 124 int empty_run = 0;
ysr@777 125 int n_empty = 0;
ysr@777 126 int empty_run_start;
ysr@777 127 for (int i = 0; i < _regions.length(); i++) {
ysr@777 128 HeapRegion* r = _regions.at(i);
ysr@777 129 if (r->continuesHumongous()) continue;
apetrusenko@1112 130 if (r->is_empty()) {
ysr@777 131 assert(!r->isHumongous(), "H regions should not be empty.");
ysr@777 132 if (empty_run == 0) empty_run_start = i;
ysr@777 133 empty_run++;
ysr@777 134 n_empty++;
ysr@777 135 } else {
ysr@777 136 if (empty_run > 0) {
ysr@777 137 gclog_or_tty->print(" %d:%d", empty_run_start, empty_run);
ysr@777 138 empty_run = 0;
ysr@777 139 }
ysr@777 140 }
ysr@777 141 }
ysr@777 142 if (empty_run > 0) {
ysr@777 143 gclog_or_tty->print(" %d:%d", empty_run_start, empty_run);
ysr@777 144 }
ysr@777 145 gclog_or_tty->print_cr(" [tot = %d]", n_empty);
ysr@777 146 }
ysr@777 147
ysr@777 148 int HeapRegionSeq::find(HeapRegion* hr) {
ysr@777 149 // FIXME: optimized for adjacent regions of fixed size.
ysr@777 150 int ind = hr->hrs_index();
ysr@777 151 if (ind != -1) {
ysr@777 152 assert(_regions.at(ind) == hr, "Mismatch");
ysr@777 153 }
ysr@777 154 return ind;
ysr@777 155 }
ysr@777 156
ysr@777 157
ysr@777 158 // Public methods.
ysr@777 159
ysr@777 160 void HeapRegionSeq::insert(HeapRegion* hr) {
iveresov@828 161 assert(!_regions.is_full(), "Too many elements in HeapRegionSeq");
ysr@777 162 if (_regions.length() == 0
ysr@777 163 || _regions.top()->end() <= hr->bottom()) {
ysr@777 164 hr->set_hrs_index(_regions.length());
ysr@777 165 _regions.append(hr);
ysr@777 166 } else {
ysr@777 167 _regions.append(hr);
ysr@777 168 _regions.sort(orderRegions);
ysr@777 169 for (int i = 0; i < _regions.length(); i++) {
ysr@777 170 _regions.at(i)->set_hrs_index(i);
ysr@777 171 }
ysr@777 172 }
ysr@777 173 char* bot = (char*)_regions.at(0)->bottom();
ysr@777 174 if (_seq_bottom == NULL || bot < _seq_bottom) _seq_bottom = bot;
ysr@777 175 }
ysr@777 176
ysr@777 177 size_t HeapRegionSeq::length() {
ysr@777 178 return _regions.length();
ysr@777 179 }
ysr@777 180
ysr@777 181 size_t HeapRegionSeq::free_suffix() {
ysr@777 182 size_t res = 0;
ysr@777 183 int first = _regions.length() - 1;
ysr@777 184 int cur = first;
ysr@777 185 while (cur >= 0 &&
ysr@777 186 (_regions.at(cur)->is_empty()
ysr@777 187 && (first == cur
ysr@777 188 || (_regions.at(cur+1)->bottom() ==
ysr@777 189 _regions.at(cur)->end())))) {
ysr@777 190 res++;
ysr@777 191 cur--;
ysr@777 192 }
ysr@777 193 return res;
ysr@777 194 }
ysr@777 195
ysr@777 196 HeapWord* HeapRegionSeq::obj_allocate(size_t word_size) {
ysr@777 197 int cur = _alloc_search_start;
ysr@777 198 // Make sure "cur" is a valid index.
ysr@777 199 assert(cur >= 0, "Invariant.");
ysr@777 200 HeapWord* res = alloc_obj_from_region_index(cur, word_size);
ysr@777 201 if (res == NULL)
ysr@777 202 res = alloc_obj_from_region_index(0, word_size);
ysr@777 203 return res;
ysr@777 204 }
ysr@777 205
ysr@777 206 void HeapRegionSeq::iterate(HeapRegionClosure* blk) {
ysr@777 207 iterate_from((HeapRegion*)NULL, blk);
ysr@777 208 }
ysr@777 209
ysr@777 210 // The first argument r is the heap region at which iteration begins.
ysr@777 211 // This operation runs fastest when r is NULL, or the heap region for
ysr@777 212 // which a HeapRegionClosure most recently returned true, or the
ysr@777 213 // heap region immediately to its right in the sequence. In all
ysr@777 214 // other cases a linear search is required to find the index of r.
ysr@777 215
ysr@777 216 void HeapRegionSeq::iterate_from(HeapRegion* r, HeapRegionClosure* blk) {
ysr@777 217
ysr@777 218 // :::: FIXME ::::
ysr@777 219 // Static cache value is bad, especially when we start doing parallel
ysr@777 220 // remembered set update. For now just don't cache anything (the
ysr@777 221 // code in the def'd out blocks).
ysr@777 222
ysr@777 223 #if 0
ysr@777 224 static int cached_j = 0;
ysr@777 225 #endif
ysr@777 226 int len = _regions.length();
ysr@777 227 int j = 0;
ysr@777 228 // Find the index of r.
ysr@777 229 if (r != NULL) {
ysr@777 230 #if 0
ysr@777 231 assert(cached_j >= 0, "Invariant.");
ysr@777 232 if ((cached_j < len) && (r == _regions.at(cached_j))) {
ysr@777 233 j = cached_j;
ysr@777 234 } else if ((cached_j + 1 < len) && (r == _regions.at(cached_j + 1))) {
ysr@777 235 j = cached_j + 1;
ysr@777 236 } else {
ysr@777 237 j = find(r);
ysr@777 238 #endif
ysr@777 239 if (j < 0) {
ysr@777 240 j = 0;
ysr@777 241 }
ysr@777 242 #if 0
ysr@777 243 }
ysr@777 244 #endif
ysr@777 245 }
ysr@777 246 int i;
ysr@777 247 for (i = j; i < len; i += 1) {
ysr@777 248 int res = blk->doHeapRegion(_regions.at(i));
ysr@777 249 if (res) {
ysr@777 250 #if 0
ysr@777 251 cached_j = i;
ysr@777 252 #endif
ysr@777 253 blk->incomplete();
ysr@777 254 return;
ysr@777 255 }
ysr@777 256 }
ysr@777 257 for (i = 0; i < j; i += 1) {
ysr@777 258 int res = blk->doHeapRegion(_regions.at(i));
ysr@777 259 if (res) {
ysr@777 260 #if 0
ysr@777 261 cached_j = i;
ysr@777 262 #endif
ysr@777 263 blk->incomplete();
ysr@777 264 return;
ysr@777 265 }
ysr@777 266 }
ysr@777 267 }
ysr@777 268
ysr@777 269 void HeapRegionSeq::iterate_from(int idx, HeapRegionClosure* blk) {
ysr@777 270 int len = _regions.length();
ysr@777 271 int i;
ysr@777 272 for (i = idx; i < len; i++) {
ysr@777 273 if (blk->doHeapRegion(_regions.at(i))) {
ysr@777 274 blk->incomplete();
ysr@777 275 return;
ysr@777 276 }
ysr@777 277 }
ysr@777 278 for (i = 0; i < idx; i++) {
ysr@777 279 if (blk->doHeapRegion(_regions.at(i))) {
ysr@777 280 blk->incomplete();
ysr@777 281 return;
ysr@777 282 }
ysr@777 283 }
ysr@777 284 }
ysr@777 285
ysr@777 286 MemRegion HeapRegionSeq::shrink_by(size_t shrink_bytes,
ysr@777 287 size_t& num_regions_deleted) {
ysr@777 288 assert(shrink_bytes % os::vm_page_size() == 0, "unaligned");
ysr@777 289 assert(shrink_bytes % HeapRegion::GrainBytes == 0, "unaligned");
ysr@777 290
ysr@777 291 if (_regions.length() == 0) {
ysr@777 292 num_regions_deleted = 0;
ysr@777 293 return MemRegion();
ysr@777 294 }
ysr@777 295 int j = _regions.length() - 1;
ysr@777 296 HeapWord* end = _regions.at(j)->end();
ysr@777 297 HeapWord* last_start = end;
ysr@777 298 while (j >= 0 && shrink_bytes > 0) {
ysr@777 299 HeapRegion* cur = _regions.at(j);
ysr@777 300 // We have to leave humongous regions where they are,
ysr@777 301 // and work around them.
ysr@777 302 if (cur->isHumongous()) {
ysr@777 303 return MemRegion(last_start, end);
ysr@777 304 }
ysr@777 305 assert(cur == _regions.top(), "Should be top");
ysr@777 306 if (!cur->is_empty()) break;
ysr@1395 307 cur->reset_zero_fill();
ysr@777 308 shrink_bytes -= cur->capacity();
ysr@777 309 num_regions_deleted++;
ysr@777 310 _regions.pop();
ysr@777 311 last_start = cur->bottom();
ysr@777 312 // We need to delete these somehow, but can't currently do so here: if
ysr@777 313 // we do, the ZF thread may still access the deleted region. We'll
ysr@777 314 // leave this here as a reminder that we have to do something about
ysr@777 315 // this.
ysr@777 316 // delete cur;
ysr@777 317 j--;
ysr@777 318 }
ysr@777 319 return MemRegion(last_start, end);
ysr@777 320 }
ysr@777 321
ysr@777 322
ysr@777 323 class PrintHeapRegionClosure : public HeapRegionClosure {
ysr@777 324 public:
ysr@777 325 bool doHeapRegion(HeapRegion* r) {
ysr@777 326 gclog_or_tty->print(PTR_FORMAT ":", r);
ysr@777 327 r->print();
ysr@777 328 return false;
ysr@777 329 }
ysr@777 330 };
ysr@777 331
ysr@777 332 void HeapRegionSeq::print() {
ysr@777 333 PrintHeapRegionClosure cl;
ysr@777 334 iterate(&cl);
ysr@777 335 }

mercurial