src/share/vm/gc_implementation/g1/heapRegion.cpp

Mon, 02 Aug 2010 12:51:43 -0700

author
johnc
date
Mon, 02 Aug 2010 12:51:43 -0700
changeset 2060
2d160770d2e5
parent 2021
5cbac8938c4c
child 2073
bb847e31b836
permissions
-rw-r--r--

6814437: G1: remove the _new_refs array
Summary: The per-worker _new_refs array is used to hold references that point into the collection set. It is populated during RSet updating and subsequently processed. In the event of an evacuation failure it processed again to recreate the RSets of regions in the collection set. Remove the per-worker _new_refs array by processing the references directly. Use a DirtyCardQueue to hold the cards containing the references so that the RSets of regions in the collection set can be recreated when handling an evacuation failure.
Reviewed-by: iveresov, jmasa, tonyp

ysr@777 1 /*
trims@1907 2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
ysr@777 25 #include "incls/_precompiled.incl"
ysr@777 26 #include "incls/_heapRegion.cpp.incl"
ysr@777 27
tonyp@1377 28 int HeapRegion::LogOfHRGrainBytes = 0;
tonyp@1377 29 int HeapRegion::LogOfHRGrainWords = 0;
tonyp@1377 30 int HeapRegion::GrainBytes = 0;
tonyp@1377 31 int HeapRegion::GrainWords = 0;
tonyp@1377 32 int HeapRegion::CardsPerRegion = 0;
tonyp@1377 33
ysr@777 34 HeapRegionDCTOC::HeapRegionDCTOC(G1CollectedHeap* g1,
ysr@777 35 HeapRegion* hr, OopClosure* cl,
ysr@777 36 CardTableModRefBS::PrecisionStyle precision,
ysr@777 37 FilterKind fk) :
ysr@777 38 ContiguousSpaceDCTOC(hr, cl, precision, NULL),
ysr@777 39 _hr(hr), _fk(fk), _g1(g1)
ysr@777 40 {}
ysr@777 41
ysr@777 42 FilterOutOfRegionClosure::FilterOutOfRegionClosure(HeapRegion* r,
ysr@777 43 OopClosure* oc) :
ysr@777 44 _r_bottom(r->bottom()), _r_end(r->end()),
ysr@777 45 _oc(oc), _out_of_region(0)
ysr@777 46 {}
ysr@777 47
ysr@777 48 class VerifyLiveClosure: public OopClosure {
tonyp@1246 49 private:
ysr@777 50 G1CollectedHeap* _g1h;
ysr@777 51 CardTableModRefBS* _bs;
ysr@777 52 oop _containing_obj;
ysr@777 53 bool _failures;
ysr@777 54 int _n_failures;
tonyp@1246 55 bool _use_prev_marking;
ysr@777 56 public:
tonyp@1246 57 // use_prev_marking == true -> use "prev" marking information,
tonyp@1246 58 // use_prev_marking == false -> use "next" marking information
tonyp@1246 59 VerifyLiveClosure(G1CollectedHeap* g1h, bool use_prev_marking) :
ysr@777 60 _g1h(g1h), _bs(NULL), _containing_obj(NULL),
tonyp@1246 61 _failures(false), _n_failures(0), _use_prev_marking(use_prev_marking)
ysr@777 62 {
ysr@777 63 BarrierSet* bs = _g1h->barrier_set();
ysr@777 64 if (bs->is_a(BarrierSet::CardTableModRef))
ysr@777 65 _bs = (CardTableModRefBS*)bs;
ysr@777 66 }
ysr@777 67
ysr@777 68 void set_containing_obj(oop obj) {
ysr@777 69 _containing_obj = obj;
ysr@777 70 }
ysr@777 71
ysr@777 72 bool failures() { return _failures; }
ysr@777 73 int n_failures() { return _n_failures; }
ysr@777 74
ysr@1280 75 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
ysr@1280 76 virtual void do_oop( oop* p) { do_oop_work(p); }
ysr@777 77
tonyp@1823 78 void print_object(outputStream* out, oop obj) {
tonyp@1823 79 #ifdef PRODUCT
tonyp@1823 80 klassOop k = obj->klass();
tonyp@1823 81 const char* class_name = instanceKlass::cast(k)->external_name();
tonyp@1823 82 out->print_cr("class name %s", class_name);
tonyp@1823 83 #else // PRODUCT
tonyp@1823 84 obj->print_on(out);
tonyp@1823 85 #endif // PRODUCT
tonyp@1823 86 }
tonyp@1823 87
ysr@1280 88 template <class T> void do_oop_work(T* p) {
ysr@777 89 assert(_containing_obj != NULL, "Precondition");
tonyp@1246 90 assert(!_g1h->is_obj_dead_cond(_containing_obj, _use_prev_marking),
tonyp@1246 91 "Precondition");
ysr@1280 92 T heap_oop = oopDesc::load_heap_oop(p);
ysr@1280 93 if (!oopDesc::is_null(heap_oop)) {
ysr@1280 94 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
ysr@777 95 bool failed = false;
tonyp@1246 96 if (!_g1h->is_in_closed_subset(obj) ||
tonyp@1246 97 _g1h->is_obj_dead_cond(obj, _use_prev_marking)) {
ysr@777 98 if (!_failures) {
ysr@777 99 gclog_or_tty->print_cr("");
ysr@777 100 gclog_or_tty->print_cr("----------");
ysr@777 101 }
ysr@777 102 if (!_g1h->is_in_closed_subset(obj)) {
tonyp@1823 103 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
ysr@777 104 gclog_or_tty->print_cr("Field "PTR_FORMAT
tonyp@1823 105 " of live obj "PTR_FORMAT" in region "
tonyp@1823 106 "["PTR_FORMAT", "PTR_FORMAT")",
tonyp@1823 107 p, (void*) _containing_obj,
tonyp@1823 108 from->bottom(), from->end());
tonyp@1823 109 print_object(gclog_or_tty, _containing_obj);
tonyp@1823 110 gclog_or_tty->print_cr("points to obj "PTR_FORMAT" not in the heap",
tonyp@1823 111 (void*) obj);
ysr@777 112 } else {
tonyp@1823 113 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
tonyp@1823 114 HeapRegion* to = _g1h->heap_region_containing((HeapWord*)obj);
ysr@777 115 gclog_or_tty->print_cr("Field "PTR_FORMAT
tonyp@1823 116 " of live obj "PTR_FORMAT" in region "
tonyp@1823 117 "["PTR_FORMAT", "PTR_FORMAT")",
tonyp@1823 118 p, (void*) _containing_obj,
tonyp@1823 119 from->bottom(), from->end());
tonyp@1823 120 print_object(gclog_or_tty, _containing_obj);
tonyp@1823 121 gclog_or_tty->print_cr("points to dead obj "PTR_FORMAT" in region "
tonyp@1823 122 "["PTR_FORMAT", "PTR_FORMAT")",
tonyp@1823 123 (void*) obj, to->bottom(), to->end());
tonyp@1823 124 print_object(gclog_or_tty, obj);
ysr@777 125 }
ysr@777 126 gclog_or_tty->print_cr("----------");
ysr@777 127 _failures = true;
ysr@777 128 failed = true;
ysr@777 129 _n_failures++;
ysr@777 130 }
ysr@777 131
ysr@777 132 if (!_g1h->full_collection()) {
ysr@1280 133 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
ysr@1280 134 HeapRegion* to = _g1h->heap_region_containing(obj);
ysr@777 135 if (from != NULL && to != NULL &&
ysr@777 136 from != to &&
ysr@777 137 !to->isHumongous()) {
ysr@777 138 jbyte cv_obj = *_bs->byte_for_const(_containing_obj);
ysr@777 139 jbyte cv_field = *_bs->byte_for_const(p);
ysr@777 140 const jbyte dirty = CardTableModRefBS::dirty_card_val();
ysr@777 141
ysr@777 142 bool is_bad = !(from->is_young()
ysr@777 143 || to->rem_set()->contains_reference(p)
ysr@777 144 || !G1HRRSFlushLogBuffersOnVerify && // buffers were not flushed
ysr@777 145 (_containing_obj->is_objArray() ?
ysr@777 146 cv_field == dirty
ysr@777 147 : cv_obj == dirty || cv_field == dirty));
ysr@777 148 if (is_bad) {
ysr@777 149 if (!_failures) {
ysr@777 150 gclog_or_tty->print_cr("");
ysr@777 151 gclog_or_tty->print_cr("----------");
ysr@777 152 }
ysr@777 153 gclog_or_tty->print_cr("Missing rem set entry:");
ysr@777 154 gclog_or_tty->print_cr("Field "PTR_FORMAT
ysr@777 155 " of obj "PTR_FORMAT
ysr@777 156 ", in region %d ["PTR_FORMAT
ysr@777 157 ", "PTR_FORMAT"),",
ysr@777 158 p, (void*) _containing_obj,
ysr@777 159 from->hrs_index(),
ysr@777 160 from->bottom(),
ysr@777 161 from->end());
ysr@777 162 _containing_obj->print_on(gclog_or_tty);
ysr@777 163 gclog_or_tty->print_cr("points to obj "PTR_FORMAT
ysr@777 164 " in region %d ["PTR_FORMAT
ysr@777 165 ", "PTR_FORMAT").",
ysr@777 166 (void*) obj, to->hrs_index(),
ysr@777 167 to->bottom(), to->end());
ysr@777 168 obj->print_on(gclog_or_tty);
ysr@777 169 gclog_or_tty->print_cr("Obj head CTE = %d, field CTE = %d.",
ysr@777 170 cv_obj, cv_field);
ysr@777 171 gclog_or_tty->print_cr("----------");
ysr@777 172 _failures = true;
ysr@777 173 if (!failed) _n_failures++;
ysr@777 174 }
ysr@777 175 }
ysr@777 176 }
ysr@777 177 }
ysr@777 178 }
ysr@777 179 };
ysr@777 180
ysr@777 181 template<class ClosureType>
ysr@777 182 HeapWord* walk_mem_region_loop(ClosureType* cl, G1CollectedHeap* g1h,
ysr@777 183 HeapRegion* hr,
ysr@777 184 HeapWord* cur, HeapWord* top) {
ysr@777 185 oop cur_oop = oop(cur);
ysr@777 186 int oop_size = cur_oop->size();
ysr@777 187 HeapWord* next_obj = cur + oop_size;
ysr@777 188 while (next_obj < top) {
ysr@777 189 // Keep filtering the remembered set.
ysr@777 190 if (!g1h->is_obj_dead(cur_oop, hr)) {
ysr@777 191 // Bottom lies entirely below top, so we can call the
ysr@777 192 // non-memRegion version of oop_iterate below.
ysr@777 193 cur_oop->oop_iterate(cl);
ysr@777 194 }
ysr@777 195 cur = next_obj;
ysr@777 196 cur_oop = oop(cur);
ysr@777 197 oop_size = cur_oop->size();
ysr@777 198 next_obj = cur + oop_size;
ysr@777 199 }
ysr@777 200 return cur;
ysr@777 201 }
ysr@777 202
ysr@777 203 void HeapRegionDCTOC::walk_mem_region_with_cl(MemRegion mr,
ysr@777 204 HeapWord* bottom,
ysr@777 205 HeapWord* top,
ysr@777 206 OopClosure* cl) {
ysr@777 207 G1CollectedHeap* g1h = _g1;
ysr@777 208
ysr@777 209 int oop_size;
ysr@777 210
ysr@777 211 OopClosure* cl2 = cl;
ysr@777 212 FilterIntoCSClosure intoCSFilt(this, g1h, cl);
ysr@777 213 FilterOutOfRegionClosure outOfRegionFilt(_hr, cl);
ysr@777 214 switch (_fk) {
ysr@777 215 case IntoCSFilterKind: cl2 = &intoCSFilt; break;
ysr@777 216 case OutOfRegionFilterKind: cl2 = &outOfRegionFilt; break;
ysr@777 217 }
ysr@777 218
ysr@777 219 // Start filtering what we add to the remembered set. If the object is
ysr@777 220 // not considered dead, either because it is marked (in the mark bitmap)
ysr@777 221 // or it was allocated after marking finished, then we add it. Otherwise
ysr@777 222 // we can safely ignore the object.
ysr@777 223 if (!g1h->is_obj_dead(oop(bottom), _hr)) {
ysr@777 224 oop_size = oop(bottom)->oop_iterate(cl2, mr);
ysr@777 225 } else {
ysr@777 226 oop_size = oop(bottom)->size();
ysr@777 227 }
ysr@777 228
ysr@777 229 bottom += oop_size;
ysr@777 230
ysr@777 231 if (bottom < top) {
ysr@777 232 // We replicate the loop below for several kinds of possible filters.
ysr@777 233 switch (_fk) {
ysr@777 234 case NoFilterKind:
ysr@777 235 bottom = walk_mem_region_loop(cl, g1h, _hr, bottom, top);
ysr@777 236 break;
ysr@777 237 case IntoCSFilterKind: {
ysr@777 238 FilterIntoCSClosure filt(this, g1h, cl);
ysr@777 239 bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top);
ysr@777 240 break;
ysr@777 241 }
ysr@777 242 case OutOfRegionFilterKind: {
ysr@777 243 FilterOutOfRegionClosure filt(_hr, cl);
ysr@777 244 bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top);
ysr@777 245 break;
ysr@777 246 }
ysr@777 247 default:
ysr@777 248 ShouldNotReachHere();
ysr@777 249 }
ysr@777 250
ysr@777 251 // Last object. Need to do dead-obj filtering here too.
ysr@777 252 if (!g1h->is_obj_dead(oop(bottom), _hr)) {
ysr@777 253 oop(bottom)->oop_iterate(cl2, mr);
ysr@777 254 }
ysr@777 255 }
ysr@777 256 }
ysr@777 257
tonyp@1377 258 // Minimum region size; we won't go lower than that.
tonyp@1377 259 // We might want to decrease this in the future, to deal with small
tonyp@1377 260 // heaps a bit more efficiently.
tonyp@1377 261 #define MIN_REGION_SIZE ( 1024 * 1024 )
tonyp@1377 262
tonyp@1377 263 // Maximum region size; we don't go higher than that. There's a good
tonyp@1377 264 // reason for having an upper bound. We don't want regions to get too
tonyp@1377 265 // large, otherwise cleanup's effectiveness would decrease as there
tonyp@1377 266 // will be fewer opportunities to find totally empty regions after
tonyp@1377 267 // marking.
tonyp@1377 268 #define MAX_REGION_SIZE ( 32 * 1024 * 1024 )
tonyp@1377 269
tonyp@1377 270 // The automatic region size calculation will try to have around this
tonyp@1377 271 // many regions in the heap (based on the min heap size).
tonyp@1377 272 #define TARGET_REGION_NUMBER 2048
tonyp@1377 273
tonyp@1377 274 void HeapRegion::setup_heap_region_size(uintx min_heap_size) {
tonyp@1377 275 // region_size in bytes
tonyp@1377 276 uintx region_size = G1HeapRegionSize;
tonyp@1377 277 if (FLAG_IS_DEFAULT(G1HeapRegionSize)) {
tonyp@1377 278 // We base the automatic calculation on the min heap size. This
tonyp@1377 279 // can be problematic if the spread between min and max is quite
tonyp@1377 280 // wide, imagine -Xms128m -Xmx32g. But, if we decided it based on
tonyp@1377 281 // the max size, the region size might be way too large for the
tonyp@1377 282 // min size. Either way, some users might have to set the region
tonyp@1377 283 // size manually for some -Xms / -Xmx combos.
tonyp@1377 284
tonyp@1377 285 region_size = MAX2(min_heap_size / TARGET_REGION_NUMBER,
tonyp@1377 286 (uintx) MIN_REGION_SIZE);
tonyp@1377 287 }
tonyp@1377 288
tonyp@1377 289 int region_size_log = log2_long((jlong) region_size);
tonyp@1377 290 // Recalculate the region size to make sure it's a power of
tonyp@1377 291 // 2. This means that region_size is the largest power of 2 that's
tonyp@1377 292 // <= what we've calculated so far.
prr@1840 293 region_size = ((uintx)1 << region_size_log);
tonyp@1377 294
tonyp@1377 295 // Now make sure that we don't go over or under our limits.
tonyp@1377 296 if (region_size < MIN_REGION_SIZE) {
tonyp@1377 297 region_size = MIN_REGION_SIZE;
tonyp@1377 298 } else if (region_size > MAX_REGION_SIZE) {
tonyp@1377 299 region_size = MAX_REGION_SIZE;
tonyp@1377 300 }
tonyp@1377 301
tonyp@1377 302 // And recalculate the log.
tonyp@1377 303 region_size_log = log2_long((jlong) region_size);
tonyp@1377 304
tonyp@1377 305 // Now, set up the globals.
tonyp@1377 306 guarantee(LogOfHRGrainBytes == 0, "we should only set it once");
tonyp@1377 307 LogOfHRGrainBytes = region_size_log;
tonyp@1377 308
tonyp@1377 309 guarantee(LogOfHRGrainWords == 0, "we should only set it once");
tonyp@1377 310 LogOfHRGrainWords = LogOfHRGrainBytes - LogHeapWordSize;
tonyp@1377 311
tonyp@1377 312 guarantee(GrainBytes == 0, "we should only set it once");
tonyp@1377 313 // The cast to int is safe, given that we've bounded region_size by
tonyp@1377 314 // MIN_REGION_SIZE and MAX_REGION_SIZE.
tonyp@1377 315 GrainBytes = (int) region_size;
tonyp@1377 316
tonyp@1377 317 guarantee(GrainWords == 0, "we should only set it once");
tonyp@1377 318 GrainWords = GrainBytes >> LogHeapWordSize;
tonyp@1377 319 guarantee(1 << LogOfHRGrainWords == GrainWords, "sanity");
tonyp@1377 320
tonyp@1377 321 guarantee(CardsPerRegion == 0, "we should only set it once");
tonyp@1377 322 CardsPerRegion = GrainBytes >> CardTableModRefBS::card_shift;
tonyp@1377 323 }
tonyp@1377 324
ysr@777 325 void HeapRegion::reset_after_compaction() {
ysr@777 326 G1OffsetTableContigSpace::reset_after_compaction();
ysr@777 327 // After a compaction the mark bitmap is invalid, so we must
ysr@777 328 // treat all objects as being inside the unmarked area.
ysr@777 329 zero_marked_bytes();
ysr@777 330 init_top_at_mark_start();
ysr@777 331 }
ysr@777 332
ysr@777 333 DirtyCardToOopClosure*
ysr@777 334 HeapRegion::new_dcto_closure(OopClosure* cl,
ysr@777 335 CardTableModRefBS::PrecisionStyle precision,
ysr@777 336 HeapRegionDCTOC::FilterKind fk) {
ysr@777 337 return new HeapRegionDCTOC(G1CollectedHeap::heap(),
ysr@777 338 this, cl, precision, fk);
ysr@777 339 }
ysr@777 340
ysr@777 341 void HeapRegion::hr_clear(bool par, bool clear_space) {
tonyp@790 342 _humongous_type = NotHumongous;
ysr@777 343 _humongous_start_region = NULL;
ysr@777 344 _in_collection_set = false;
ysr@777 345 _is_gc_alloc_region = false;
ysr@777 346
ysr@777 347 // Age stuff (if parallel, this will be done separately, since it needs
ysr@777 348 // to be sequential).
ysr@777 349 G1CollectedHeap* g1h = G1CollectedHeap::heap();
ysr@777 350
ysr@777 351 set_young_index_in_cset(-1);
ysr@777 352 uninstall_surv_rate_group();
ysr@777 353 set_young_type(NotYoung);
ysr@777 354
ysr@777 355 // In case it had been the start of a humongous sequence, reset its end.
ysr@777 356 set_end(_orig_end);
ysr@777 357
ysr@777 358 if (!par) {
ysr@777 359 // If this is parallel, this will be done later.
ysr@777 360 HeapRegionRemSet* hrrs = rem_set();
ysr@777 361 if (hrrs != NULL) hrrs->clear();
tonyp@790 362 _claimed = InitialClaimValue;
ysr@777 363 }
ysr@777 364 zero_marked_bytes();
ysr@777 365 set_sort_index(-1);
ysr@777 366
ysr@777 367 _offsets.resize(HeapRegion::GrainWords);
ysr@777 368 init_top_at_mark_start();
tonyp@791 369 if (clear_space) clear(SpaceDecorator::Mangle);
ysr@777 370 }
ysr@777 371
ysr@777 372 // <PREDICTION>
ysr@777 373 void HeapRegion::calc_gc_efficiency() {
ysr@777 374 G1CollectedHeap* g1h = G1CollectedHeap::heap();
ysr@777 375 _gc_efficiency = (double) garbage_bytes() /
ysr@777 376 g1h->predict_region_elapsed_time_ms(this, false);
ysr@777 377 }
ysr@777 378 // </PREDICTION>
ysr@777 379
ysr@777 380 void HeapRegion::set_startsHumongous() {
tonyp@790 381 _humongous_type = StartsHumongous;
ysr@777 382 _humongous_start_region = this;
ysr@777 383 assert(end() == _orig_end, "Should be normal before alloc.");
ysr@777 384 }
ysr@777 385
ysr@777 386 bool HeapRegion::claimHeapRegion(jint claimValue) {
ysr@777 387 jint current = _claimed;
ysr@777 388 if (current != claimValue) {
ysr@777 389 jint res = Atomic::cmpxchg(claimValue, &_claimed, current);
ysr@777 390 if (res == current) {
ysr@777 391 return true;
ysr@777 392 }
ysr@777 393 }
ysr@777 394 return false;
ysr@777 395 }
ysr@777 396
ysr@777 397 HeapWord* HeapRegion::next_block_start_careful(HeapWord* addr) {
ysr@777 398 HeapWord* low = addr;
ysr@777 399 HeapWord* high = end();
ysr@777 400 while (low < high) {
ysr@777 401 size_t diff = pointer_delta(high, low);
ysr@777 402 // Must add one below to bias toward the high amount. Otherwise, if
ysr@777 403 // "high" were at the desired value, and "low" were one less, we
ysr@777 404 // would not converge on "high". This is not symmetric, because
ysr@777 405 // we set "high" to a block start, which might be the right one,
ysr@777 406 // which we don't do for "low".
ysr@777 407 HeapWord* middle = low + (diff+1)/2;
ysr@777 408 if (middle == high) return high;
ysr@777 409 HeapWord* mid_bs = block_start_careful(middle);
ysr@777 410 if (mid_bs < addr) {
ysr@777 411 low = middle;
ysr@777 412 } else {
ysr@777 413 high = mid_bs;
ysr@777 414 }
ysr@777 415 }
ysr@777 416 assert(low == high && low >= addr, "Didn't work.");
ysr@777 417 return low;
ysr@777 418 }
ysr@777 419
ysr@777 420 void HeapRegion::set_next_on_unclean_list(HeapRegion* r) {
ysr@777 421 assert(r == NULL || r->is_on_unclean_list(), "Malformed unclean list.");
ysr@777 422 _next_in_special_set = r;
ysr@777 423 }
ysr@777 424
ysr@777 425 void HeapRegion::set_on_unclean_list(bool b) {
ysr@777 426 _is_on_unclean_list = b;
ysr@777 427 }
ysr@777 428
tonyp@791 429 void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
tonyp@791 430 G1OffsetTableContigSpace::initialize(mr, false, mangle_space);
ysr@777 431 hr_clear(false/*par*/, clear_space);
ysr@777 432 }
ysr@777 433 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
ysr@777 434 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
ysr@777 435 #endif // _MSC_VER
ysr@777 436
ysr@777 437
ysr@777 438 HeapRegion::
ysr@777 439 HeapRegion(G1BlockOffsetSharedArray* sharedOffsetArray,
ysr@777 440 MemRegion mr, bool is_zeroed)
ysr@777 441 : G1OffsetTableContigSpace(sharedOffsetArray, mr, is_zeroed),
ysr@777 442 _next_fk(HeapRegionDCTOC::NoFilterKind),
ysr@777 443 _hrs_index(-1),
tonyp@790 444 _humongous_type(NotHumongous), _humongous_start_region(NULL),
ysr@777 445 _in_collection_set(false), _is_gc_alloc_region(false),
ysr@777 446 _is_on_free_list(false), _is_on_unclean_list(false),
ysr@777 447 _next_in_special_set(NULL), _orig_end(NULL),
tonyp@790 448 _claimed(InitialClaimValue), _evacuation_failed(false),
ysr@777 449 _prev_marked_bytes(0), _next_marked_bytes(0), _sort_index(-1),
ysr@777 450 _young_type(NotYoung), _next_young_region(NULL),
apetrusenko@1231 451 _next_dirty_cards_region(NULL),
ysr@777 452 _young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1),
johnc@1829 453 _rem_set(NULL), _zfs(NotZeroFilled),
johnc@1829 454 _recorded_rs_length(0), _predicted_elapsed_time_ms(0),
johnc@1829 455 _predicted_bytes_to_copy(0)
ysr@777 456 {
ysr@777 457 _orig_end = mr.end();
ysr@777 458 // Note that initialize() will set the start of the unmarked area of the
ysr@777 459 // region.
tonyp@791 460 this->initialize(mr, !is_zeroed, SpaceDecorator::Mangle);
tonyp@791 461 set_top(bottom());
tonyp@791 462 set_saved_mark();
ysr@777 463
ysr@777 464 _rem_set = new HeapRegionRemSet(sharedOffsetArray, this);
ysr@777 465
ysr@777 466 assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant.");
ysr@777 467 // In case the region is allocated during a pause, note the top.
ysr@777 468 // We haven't done any counting on a brand new region.
ysr@777 469 _top_at_conc_mark_count = bottom();
ysr@777 470 }
ysr@777 471
ysr@777 472 class NextCompactionHeapRegionClosure: public HeapRegionClosure {
ysr@777 473 const HeapRegion* _target;
ysr@777 474 bool _target_seen;
ysr@777 475 HeapRegion* _last;
ysr@777 476 CompactibleSpace* _res;
ysr@777 477 public:
ysr@777 478 NextCompactionHeapRegionClosure(const HeapRegion* target) :
ysr@777 479 _target(target), _target_seen(false), _res(NULL) {}
ysr@777 480 bool doHeapRegion(HeapRegion* cur) {
ysr@777 481 if (_target_seen) {
ysr@777 482 if (!cur->isHumongous()) {
ysr@777 483 _res = cur;
ysr@777 484 return true;
ysr@777 485 }
ysr@777 486 } else if (cur == _target) {
ysr@777 487 _target_seen = true;
ysr@777 488 }
ysr@777 489 return false;
ysr@777 490 }
ysr@777 491 CompactibleSpace* result() { return _res; }
ysr@777 492 };
ysr@777 493
ysr@777 494 CompactibleSpace* HeapRegion::next_compaction_space() const {
ysr@777 495 G1CollectedHeap* g1h = G1CollectedHeap::heap();
ysr@777 496 // cast away const-ness
ysr@777 497 HeapRegion* r = (HeapRegion*) this;
ysr@777 498 NextCompactionHeapRegionClosure blk(r);
ysr@777 499 g1h->heap_region_iterate_from(r, &blk);
ysr@777 500 return blk.result();
ysr@777 501 }
ysr@777 502
ysr@777 503 void HeapRegion::set_continuesHumongous(HeapRegion* start) {
ysr@777 504 // The order is important here.
ysr@777 505 start->add_continuingHumongousRegion(this);
tonyp@790 506 _humongous_type = ContinuesHumongous;
ysr@777 507 _humongous_start_region = start;
ysr@777 508 }
ysr@777 509
ysr@777 510 void HeapRegion::add_continuingHumongousRegion(HeapRegion* cont) {
ysr@777 511 // Must join the blocks of the current H region seq with the block of the
ysr@777 512 // added region.
ysr@777 513 offsets()->join_blocks(bottom(), cont->bottom());
ysr@777 514 arrayOop obj = (arrayOop)(bottom());
ysr@777 515 obj->set_length((int) (obj->length() + cont->capacity()/jintSize));
ysr@777 516 set_end(cont->end());
ysr@777 517 set_top(cont->end());
ysr@777 518 }
ysr@777 519
ysr@777 520 void HeapRegion::save_marks() {
ysr@777 521 set_saved_mark();
ysr@777 522 }
ysr@777 523
ysr@777 524 void HeapRegion::oops_in_mr_iterate(MemRegion mr, OopClosure* cl) {
ysr@777 525 HeapWord* p = mr.start();
ysr@777 526 HeapWord* e = mr.end();
ysr@777 527 oop obj;
ysr@777 528 while (p < e) {
ysr@777 529 obj = oop(p);
ysr@777 530 p += obj->oop_iterate(cl);
ysr@777 531 }
ysr@777 532 assert(p == e, "bad memregion: doesn't end on obj boundary");
ysr@777 533 }
ysr@777 534
ysr@777 535 #define HeapRegion_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
ysr@777 536 void HeapRegion::oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \
ysr@777 537 ContiguousSpace::oop_since_save_marks_iterate##nv_suffix(cl); \
ysr@777 538 }
ysr@777 539 SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(HeapRegion_OOP_SINCE_SAVE_MARKS_DEFN)
ysr@777 540
ysr@777 541
ysr@777 542 void HeapRegion::oop_before_save_marks_iterate(OopClosure* cl) {
ysr@777 543 oops_in_mr_iterate(MemRegion(bottom(), saved_mark_word()), cl);
ysr@777 544 }
ysr@777 545
ysr@777 546 #ifdef DEBUG
ysr@777 547 HeapWord* HeapRegion::allocate(size_t size) {
ysr@777 548 jint state = zero_fill_state();
ysr@777 549 assert(!G1CollectedHeap::heap()->allocs_are_zero_filled() ||
ysr@777 550 zero_fill_is_allocated(),
ysr@777 551 "When ZF is on, only alloc in ZF'd regions");
ysr@777 552 return G1OffsetTableContigSpace::allocate(size);
ysr@777 553 }
ysr@777 554 #endif
ysr@777 555
ysr@777 556 void HeapRegion::set_zero_fill_state_work(ZeroFillState zfs) {
ysr@777 557 assert(ZF_mon->owned_by_self() ||
ysr@777 558 Universe::heap()->is_gc_active(),
ysr@777 559 "Must hold the lock or be a full GC to modify.");
apetrusenko@1900 560 #ifdef ASSERT
apetrusenko@1900 561 if (top() != bottom() && zfs != Allocated) {
apetrusenko@1900 562 ResourceMark rm;
apetrusenko@1900 563 stringStream region_str;
apetrusenko@1900 564 print_on(&region_str);
apetrusenko@1900 565 assert(top() == bottom() || zfs == Allocated,
apetrusenko@1900 566 err_msg("Region must be empty, or we must be setting it to allocated. "
apetrusenko@1900 567 "_zfs=%d, zfs=%d, region: %s", _zfs, zfs, region_str.as_string()));
apetrusenko@1900 568 }
apetrusenko@1900 569 #endif
ysr@777 570 _zfs = zfs;
ysr@777 571 }
ysr@777 572
ysr@777 573 void HeapRegion::set_zero_fill_complete() {
ysr@777 574 set_zero_fill_state_work(ZeroFilled);
ysr@777 575 if (ZF_mon->owned_by_self()) {
ysr@777 576 ZF_mon->notify_all();
ysr@777 577 }
ysr@777 578 }
ysr@777 579
ysr@777 580
ysr@777 581 void HeapRegion::ensure_zero_filled() {
ysr@777 582 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
ysr@777 583 ensure_zero_filled_locked();
ysr@777 584 }
ysr@777 585
ysr@777 586 void HeapRegion::ensure_zero_filled_locked() {
ysr@777 587 assert(ZF_mon->owned_by_self(), "Precondition");
ysr@777 588 bool should_ignore_zf = SafepointSynchronize::is_at_safepoint();
ysr@777 589 assert(should_ignore_zf || Heap_lock->is_locked(),
ysr@777 590 "Either we're in a GC or we're allocating a region.");
ysr@777 591 switch (zero_fill_state()) {
ysr@777 592 case HeapRegion::NotZeroFilled:
ysr@777 593 set_zero_fill_in_progress(Thread::current());
ysr@777 594 {
ysr@777 595 ZF_mon->unlock();
ysr@777 596 Copy::fill_to_words(bottom(), capacity()/HeapWordSize);
ysr@777 597 ZF_mon->lock_without_safepoint_check();
ysr@777 598 }
ysr@777 599 // A trap.
ysr@777 600 guarantee(zero_fill_state() == HeapRegion::ZeroFilling
ysr@777 601 && zero_filler() == Thread::current(),
ysr@777 602 "AHA! Tell Dave D if you see this...");
ysr@777 603 set_zero_fill_complete();
ysr@777 604 // gclog_or_tty->print_cr("Did sync ZF.");
ysr@777 605 ConcurrentZFThread::note_sync_zfs();
ysr@777 606 break;
ysr@777 607 case HeapRegion::ZeroFilling:
ysr@777 608 if (should_ignore_zf) {
ysr@777 609 // We can "break" the lock and take over the work.
ysr@777 610 Copy::fill_to_words(bottom(), capacity()/HeapWordSize);
ysr@777 611 set_zero_fill_complete();
ysr@777 612 ConcurrentZFThread::note_sync_zfs();
ysr@777 613 break;
ysr@777 614 } else {
ysr@777 615 ConcurrentZFThread::wait_for_ZF_completed(this);
ysr@777 616 }
ysr@777 617 case HeapRegion::ZeroFilled:
ysr@777 618 // Nothing to do.
ysr@777 619 break;
ysr@777 620 case HeapRegion::Allocated:
ysr@777 621 guarantee(false, "Should not call on allocated regions.");
ysr@777 622 }
ysr@777 623 assert(zero_fill_state() == HeapRegion::ZeroFilled, "Post");
ysr@777 624 }
ysr@777 625
ysr@777 626 HeapWord*
ysr@777 627 HeapRegion::object_iterate_mem_careful(MemRegion mr,
ysr@777 628 ObjectClosure* cl) {
ysr@777 629 G1CollectedHeap* g1h = G1CollectedHeap::heap();
ysr@777 630 // We used to use "block_start_careful" here. But we're actually happy
ysr@777 631 // to update the BOT while we do this...
ysr@777 632 HeapWord* cur = block_start(mr.start());
ysr@777 633 mr = mr.intersection(used_region());
ysr@777 634 if (mr.is_empty()) return NULL;
ysr@777 635 // Otherwise, find the obj that extends onto mr.start().
ysr@777 636
ysr@777 637 assert(cur <= mr.start()
ysr@1280 638 && (oop(cur)->klass_or_null() == NULL ||
ysr@777 639 cur + oop(cur)->size() > mr.start()),
ysr@777 640 "postcondition of block_start");
ysr@777 641 oop obj;
ysr@777 642 while (cur < mr.end()) {
ysr@777 643 obj = oop(cur);
ysr@1280 644 if (obj->klass_or_null() == NULL) {
ysr@777 645 // Ran into an unparseable point.
ysr@777 646 return cur;
ysr@777 647 } else if (!g1h->is_obj_dead(obj)) {
ysr@777 648 cl->do_object(obj);
ysr@777 649 }
ysr@777 650 if (cl->abort()) return cur;
ysr@777 651 // The check above must occur before the operation below, since an
ysr@777 652 // abort might invalidate the "size" operation.
ysr@777 653 cur += obj->size();
ysr@777 654 }
ysr@777 655 return NULL;
ysr@777 656 }
ysr@777 657
ysr@777 658 HeapWord*
ysr@777 659 HeapRegion::
ysr@777 660 oops_on_card_seq_iterate_careful(MemRegion mr,
johnc@2021 661 FilterOutOfRegionClosure* cl,
johnc@2021 662 bool filter_young) {
ysr@777 663 G1CollectedHeap* g1h = G1CollectedHeap::heap();
ysr@777 664
ysr@777 665 // If we're within a stop-world GC, then we might look at a card in a
ysr@777 666 // GC alloc region that extends onto a GC LAB, which may not be
ysr@777 667 // parseable. Stop such at the "saved_mark" of the region.
ysr@777 668 if (G1CollectedHeap::heap()->is_gc_active()) {
ysr@777 669 mr = mr.intersection(used_region_at_save_marks());
ysr@777 670 } else {
ysr@777 671 mr = mr.intersection(used_region());
ysr@777 672 }
ysr@777 673 if (mr.is_empty()) return NULL;
ysr@777 674 // Otherwise, find the obj that extends onto mr.start().
ysr@777 675
johnc@2021 676 // The intersection of the incoming mr (for the card) and the
johnc@2021 677 // allocated part of the region is non-empty. This implies that
johnc@2021 678 // we have actually allocated into this region. The code in
johnc@2021 679 // G1CollectedHeap.cpp that allocates a new region sets the
johnc@2021 680 // is_young tag on the region before allocating. Thus we
johnc@2021 681 // safely know if this region is young.
johnc@2021 682 if (is_young() && filter_young) {
johnc@2021 683 return NULL;
johnc@2021 684 }
johnc@2021 685
johnc@2060 686 assert(!is_young(), "check value of filter_young");
johnc@2060 687
ysr@777 688 // We used to use "block_start_careful" here. But we're actually happy
ysr@777 689 // to update the BOT while we do this...
ysr@777 690 HeapWord* cur = block_start(mr.start());
ysr@777 691 assert(cur <= mr.start(), "Postcondition");
ysr@777 692
ysr@777 693 while (cur <= mr.start()) {
ysr@1280 694 if (oop(cur)->klass_or_null() == NULL) {
ysr@777 695 // Ran into an unparseable point.
ysr@777 696 return cur;
ysr@777 697 }
ysr@777 698 // Otherwise...
ysr@777 699 int sz = oop(cur)->size();
ysr@777 700 if (cur + sz > mr.start()) break;
ysr@777 701 // Otherwise, go on.
ysr@777 702 cur = cur + sz;
ysr@777 703 }
ysr@777 704 oop obj;
ysr@777 705 obj = oop(cur);
ysr@777 706 // If we finish this loop...
ysr@777 707 assert(cur <= mr.start()
ysr@1280 708 && obj->klass_or_null() != NULL
ysr@777 709 && cur + obj->size() > mr.start(),
ysr@777 710 "Loop postcondition");
ysr@777 711 if (!g1h->is_obj_dead(obj)) {
ysr@777 712 obj->oop_iterate(cl, mr);
ysr@777 713 }
ysr@777 714
ysr@777 715 HeapWord* next;
ysr@777 716 while (cur < mr.end()) {
ysr@777 717 obj = oop(cur);
ysr@1280 718 if (obj->klass_or_null() == NULL) {
ysr@777 719 // Ran into an unparseable point.
ysr@777 720 return cur;
ysr@777 721 };
ysr@777 722 // Otherwise:
ysr@777 723 next = (cur + obj->size());
ysr@777 724 if (!g1h->is_obj_dead(obj)) {
ysr@777 725 if (next < mr.end()) {
ysr@777 726 obj->oop_iterate(cl);
ysr@777 727 } else {
ysr@777 728 // this obj spans the boundary. If it's an array, stop at the
ysr@777 729 // boundary.
ysr@777 730 if (obj->is_objArray()) {
ysr@777 731 obj->oop_iterate(cl, mr);
ysr@777 732 } else {
ysr@777 733 obj->oop_iterate(cl);
ysr@777 734 }
ysr@777 735 }
ysr@777 736 }
ysr@777 737 cur = next;
ysr@777 738 }
ysr@777 739 return NULL;
ysr@777 740 }
ysr@777 741
ysr@777 742 void HeapRegion::print() const { print_on(gclog_or_tty); }
ysr@777 743 void HeapRegion::print_on(outputStream* st) const {
ysr@777 744 if (isHumongous()) {
ysr@777 745 if (startsHumongous())
ysr@777 746 st->print(" HS");
ysr@777 747 else
ysr@777 748 st->print(" HC");
ysr@777 749 } else {
ysr@777 750 st->print(" ");
ysr@777 751 }
ysr@777 752 if (in_collection_set())
ysr@777 753 st->print(" CS");
ysr@777 754 else if (is_gc_alloc_region())
ysr@777 755 st->print(" A ");
ysr@777 756 else
ysr@777 757 st->print(" ");
ysr@777 758 if (is_young())
johnc@1829 759 st->print(is_survivor() ? " SU" : " Y ");
ysr@777 760 else
ysr@777 761 st->print(" ");
ysr@777 762 if (is_empty())
ysr@777 763 st->print(" F");
ysr@777 764 else
ysr@777 765 st->print(" ");
tonyp@1455 766 st->print(" %5d", _gc_time_stamp);
tonyp@1823 767 st->print(" PTAMS "PTR_FORMAT" NTAMS "PTR_FORMAT,
tonyp@1823 768 prev_top_at_mark_start(), next_top_at_mark_start());
ysr@777 769 G1OffsetTableContigSpace::print_on(st);
ysr@777 770 }
ysr@777 771
tonyp@1246 772 void HeapRegion::verify(bool allow_dirty) const {
tonyp@1455 773 bool dummy = false;
tonyp@1455 774 verify(allow_dirty, /* use_prev_marking */ true, /* failures */ &dummy);
tonyp@1246 775 }
tonyp@1246 776
ysr@777 777 #define OBJ_SAMPLE_INTERVAL 0
ysr@777 778 #define BLOCK_SAMPLE_INTERVAL 100
ysr@777 779
ysr@777 780 // This really ought to be commoned up into OffsetTableContigSpace somehow.
ysr@777 781 // We would need a mechanism to make that code skip dead objects.
ysr@777 782
tonyp@1455 783 void HeapRegion::verify(bool allow_dirty,
tonyp@1455 784 bool use_prev_marking,
tonyp@1455 785 bool* failures) const {
ysr@777 786 G1CollectedHeap* g1 = G1CollectedHeap::heap();
tonyp@1455 787 *failures = false;
ysr@777 788 HeapWord* p = bottom();
ysr@777 789 HeapWord* prev_p = NULL;
ysr@777 790 int objs = 0;
ysr@777 791 int blocks = 0;
tonyp@1246 792 VerifyLiveClosure vl_cl(g1, use_prev_marking);
ysr@777 793 while (p < top()) {
ysr@777 794 size_t size = oop(p)->size();
ysr@777 795 if (blocks == BLOCK_SAMPLE_INTERVAL) {
tonyp@1455 796 HeapWord* res = block_start_const(p + (size/2));
tonyp@1455 797 if (p != res) {
tonyp@1455 798 gclog_or_tty->print_cr("offset computation 1 for "PTR_FORMAT" and "
tonyp@1455 799 SIZE_FORMAT" returned "PTR_FORMAT,
tonyp@1455 800 p, size, res);
tonyp@1455 801 *failures = true;
tonyp@1455 802 return;
tonyp@1455 803 }
ysr@777 804 blocks = 0;
ysr@777 805 } else {
ysr@777 806 blocks++;
ysr@777 807 }
ysr@777 808 if (objs == OBJ_SAMPLE_INTERVAL) {
ysr@777 809 oop obj = oop(p);
tonyp@1246 810 if (!g1->is_obj_dead_cond(obj, this, use_prev_marking)) {
tonyp@1455 811 if (obj->is_oop()) {
tonyp@1455 812 klassOop klass = obj->klass();
tonyp@1455 813 if (!klass->is_perm()) {
tonyp@1455 814 gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" "
tonyp@1455 815 "not in perm", klass, obj);
tonyp@1455 816 *failures = true;
tonyp@1455 817 return;
tonyp@1455 818 } else if (!klass->is_klass()) {
tonyp@1455 819 gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" "
tonyp@1455 820 "not a klass", klass, obj);
tonyp@1455 821 *failures = true;
tonyp@1455 822 return;
tonyp@1455 823 } else {
tonyp@1455 824 vl_cl.set_containing_obj(obj);
tonyp@1455 825 obj->oop_iterate(&vl_cl);
tonyp@1455 826 if (vl_cl.failures()) {
tonyp@1455 827 *failures = true;
tonyp@1455 828 }
tonyp@1455 829 if (G1MaxVerifyFailures >= 0 &&
tonyp@1455 830 vl_cl.n_failures() >= G1MaxVerifyFailures) {
tonyp@1455 831 return;
tonyp@1455 832 }
tonyp@1455 833 }
tonyp@1455 834 } else {
tonyp@1455 835 gclog_or_tty->print_cr(PTR_FORMAT" no an oop", obj);
tonyp@1455 836 *failures = true;
tonyp@1455 837 return;
tonyp@1455 838 }
ysr@777 839 }
ysr@777 840 objs = 0;
ysr@777 841 } else {
ysr@777 842 objs++;
ysr@777 843 }
ysr@777 844 prev_p = p;
ysr@777 845 p += size;
ysr@777 846 }
ysr@777 847 HeapWord* rend = end();
ysr@777 848 HeapWord* rtop = top();
ysr@777 849 if (rtop < rend) {
tonyp@1455 850 HeapWord* res = block_start_const(rtop + (rend - rtop) / 2);
tonyp@1455 851 if (res != rtop) {
tonyp@1455 852 gclog_or_tty->print_cr("offset computation 2 for "PTR_FORMAT" and "
tonyp@1455 853 PTR_FORMAT" returned "PTR_FORMAT,
tonyp@1455 854 rtop, rend, res);
tonyp@1455 855 *failures = true;
tonyp@1455 856 return;
tonyp@1455 857 }
ysr@777 858 }
tonyp@1455 859
tonyp@1455 860 if (p != top()) {
tonyp@1455 861 gclog_or_tty->print_cr("end of last object "PTR_FORMAT" "
tonyp@1455 862 "does not match top "PTR_FORMAT, p, top());
tonyp@1455 863 *failures = true;
tonyp@1455 864 return;
ysr@777 865 }
ysr@777 866 }
ysr@777 867
ysr@777 868 // G1OffsetTableContigSpace code; copied from space.cpp. Hope this can go
ysr@777 869 // away eventually.
ysr@777 870
tonyp@791 871 void G1OffsetTableContigSpace::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
ysr@777 872 // false ==> we'll do the clearing if there's clearing to be done.
tonyp@791 873 ContiguousSpace::initialize(mr, false, mangle_space);
ysr@777 874 _offsets.zero_bottom_entry();
ysr@777 875 _offsets.initialize_threshold();
tonyp@791 876 if (clear_space) clear(mangle_space);
ysr@777 877 }
ysr@777 878
tonyp@791 879 void G1OffsetTableContigSpace::clear(bool mangle_space) {
tonyp@791 880 ContiguousSpace::clear(mangle_space);
ysr@777 881 _offsets.zero_bottom_entry();
ysr@777 882 _offsets.initialize_threshold();
ysr@777 883 }
ysr@777 884
ysr@777 885 void G1OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) {
ysr@777 886 Space::set_bottom(new_bottom);
ysr@777 887 _offsets.set_bottom(new_bottom);
ysr@777 888 }
ysr@777 889
ysr@777 890 void G1OffsetTableContigSpace::set_end(HeapWord* new_end) {
ysr@777 891 Space::set_end(new_end);
ysr@777 892 _offsets.resize(new_end - bottom());
ysr@777 893 }
ysr@777 894
ysr@777 895 void G1OffsetTableContigSpace::print() const {
ysr@777 896 print_short();
ysr@777 897 gclog_or_tty->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", "
ysr@777 898 INTPTR_FORMAT ", " INTPTR_FORMAT ")",
ysr@777 899 bottom(), top(), _offsets.threshold(), end());
ysr@777 900 }
ysr@777 901
ysr@777 902 HeapWord* G1OffsetTableContigSpace::initialize_threshold() {
ysr@777 903 return _offsets.initialize_threshold();
ysr@777 904 }
ysr@777 905
ysr@777 906 HeapWord* G1OffsetTableContigSpace::cross_threshold(HeapWord* start,
ysr@777 907 HeapWord* end) {
ysr@777 908 _offsets.alloc_block(start, end);
ysr@777 909 return _offsets.threshold();
ysr@777 910 }
ysr@777 911
ysr@777 912 HeapWord* G1OffsetTableContigSpace::saved_mark_word() const {
ysr@777 913 G1CollectedHeap* g1h = G1CollectedHeap::heap();
ysr@777 914 assert( _gc_time_stamp <= g1h->get_gc_time_stamp(), "invariant" );
ysr@777 915 if (_gc_time_stamp < g1h->get_gc_time_stamp())
ysr@777 916 return top();
ysr@777 917 else
ysr@777 918 return ContiguousSpace::saved_mark_word();
ysr@777 919 }
ysr@777 920
ysr@777 921 void G1OffsetTableContigSpace::set_saved_mark() {
ysr@777 922 G1CollectedHeap* g1h = G1CollectedHeap::heap();
ysr@777 923 unsigned curr_gc_time_stamp = g1h->get_gc_time_stamp();
ysr@777 924
ysr@777 925 if (_gc_time_stamp < curr_gc_time_stamp) {
ysr@777 926 // The order of these is important, as another thread might be
ysr@777 927 // about to start scanning this region. If it does so after
ysr@777 928 // set_saved_mark and before _gc_time_stamp = ..., then the latter
ysr@777 929 // will be false, and it will pick up top() as the high water mark
ysr@777 930 // of region. If it does so after _gc_time_stamp = ..., then it
ysr@777 931 // will pick up the right saved_mark_word() as the high water mark
ysr@777 932 // of the region. Either way, the behaviour will be correct.
ysr@777 933 ContiguousSpace::set_saved_mark();
ysr@1280 934 OrderAccess::storestore();
iveresov@788 935 _gc_time_stamp = curr_gc_time_stamp;
ysr@1280 936 // The following fence is to force a flush of the writes above, but
ysr@1280 937 // is strictly not needed because when an allocating worker thread
ysr@1280 938 // calls set_saved_mark() it does so under the ParGCRareEvent_lock;
ysr@1280 939 // when the lock is released, the write will be flushed.
ysr@1280 940 // OrderAccess::fence();
ysr@777 941 }
ysr@777 942 }
ysr@777 943
ysr@777 944 G1OffsetTableContigSpace::
ysr@777 945 G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
ysr@777 946 MemRegion mr, bool is_zeroed) :
ysr@777 947 _offsets(sharedOffsetArray, mr),
ysr@777 948 _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true),
ysr@777 949 _gc_time_stamp(0)
ysr@777 950 {
ysr@777 951 _offsets.set_space(this);
tonyp@791 952 initialize(mr, !is_zeroed, SpaceDecorator::Mangle);
ysr@777 953 }
ysr@777 954
ysr@777 955 size_t RegionList::length() {
ysr@777 956 size_t len = 0;
ysr@777 957 HeapRegion* cur = hd();
ysr@777 958 DEBUG_ONLY(HeapRegion* last = NULL);
ysr@777 959 while (cur != NULL) {
ysr@777 960 len++;
ysr@777 961 DEBUG_ONLY(last = cur);
ysr@777 962 cur = get_next(cur);
ysr@777 963 }
ysr@777 964 assert(last == tl(), "Invariant");
ysr@777 965 return len;
ysr@777 966 }
ysr@777 967
ysr@777 968 void RegionList::insert_before_head(HeapRegion* r) {
ysr@777 969 assert(well_formed(), "Inv");
ysr@777 970 set_next(r, hd());
ysr@777 971 _hd = r;
ysr@777 972 _sz++;
ysr@777 973 if (tl() == NULL) _tl = r;
ysr@777 974 assert(well_formed(), "Inv");
ysr@777 975 }
ysr@777 976
ysr@777 977 void RegionList::prepend_list(RegionList* new_list) {
ysr@777 978 assert(well_formed(), "Precondition");
ysr@777 979 assert(new_list->well_formed(), "Precondition");
ysr@777 980 HeapRegion* new_tl = new_list->tl();
ysr@777 981 if (new_tl != NULL) {
ysr@777 982 set_next(new_tl, hd());
ysr@777 983 _hd = new_list->hd();
ysr@777 984 _sz += new_list->sz();
ysr@777 985 if (tl() == NULL) _tl = new_list->tl();
ysr@777 986 } else {
ysr@777 987 assert(new_list->hd() == NULL && new_list->sz() == 0, "Inv");
ysr@777 988 }
ysr@777 989 assert(well_formed(), "Inv");
ysr@777 990 }
ysr@777 991
ysr@777 992 void RegionList::delete_after(HeapRegion* r) {
ysr@777 993 assert(well_formed(), "Precondition");
ysr@777 994 HeapRegion* next = get_next(r);
ysr@777 995 assert(r != NULL, "Precondition");
ysr@777 996 HeapRegion* next_tl = get_next(next);
ysr@777 997 set_next(r, next_tl);
ysr@777 998 dec_sz();
ysr@777 999 if (next == tl()) {
ysr@777 1000 assert(next_tl == NULL, "Inv");
ysr@777 1001 _tl = r;
ysr@777 1002 }
ysr@777 1003 assert(well_formed(), "Inv");
ysr@777 1004 }
ysr@777 1005
ysr@777 1006 HeapRegion* RegionList::pop() {
ysr@777 1007 assert(well_formed(), "Inv");
ysr@777 1008 HeapRegion* res = hd();
ysr@777 1009 if (res != NULL) {
ysr@777 1010 _hd = get_next(res);
ysr@777 1011 _sz--;
ysr@777 1012 set_next(res, NULL);
ysr@777 1013 if (sz() == 0) _tl = NULL;
ysr@777 1014 }
ysr@777 1015 assert(well_formed(), "Inv");
ysr@777 1016 return res;
ysr@777 1017 }

mercurial