src/share/vm/gc_implementation/g1/heapRegion.cpp

Mon, 03 Aug 2009 12:59:30 -0700

author
johnc
date
Mon, 03 Aug 2009 12:59:30 -0700
changeset 1324
15c5903cf9e1
parent 1301
18f526145aea
child 1377
2c79770d1f6e
permissions
-rw-r--r--

6865703: G1: Parallelize hot card cache cleanup
Summary: Have the GC worker threads clear the hot card cache in parallel by having each worker thread claim a chunk of the card cache and process the cards in that chunk. The size of the chunks that each thread will claim is determined at VM initialization from the size of the card cache and the number of worker threads.
Reviewed-by: jmasa, tonyp

ysr@777 1 /*
xdono@1279 2 * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
ysr@777 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
ysr@777 20 * CA 95054 USA or visit www.sun.com if you need additional information or
ysr@777 21 * have any questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
ysr@777 25 #include "incls/_precompiled.incl"
ysr@777 26 #include "incls/_heapRegion.cpp.incl"
ysr@777 27
ysr@777 28 HeapRegionDCTOC::HeapRegionDCTOC(G1CollectedHeap* g1,
ysr@777 29 HeapRegion* hr, OopClosure* cl,
ysr@777 30 CardTableModRefBS::PrecisionStyle precision,
ysr@777 31 FilterKind fk) :
ysr@777 32 ContiguousSpaceDCTOC(hr, cl, precision, NULL),
ysr@777 33 _hr(hr), _fk(fk), _g1(g1)
ysr@777 34 {}
ysr@777 35
ysr@777 36 FilterOutOfRegionClosure::FilterOutOfRegionClosure(HeapRegion* r,
ysr@777 37 OopClosure* oc) :
ysr@777 38 _r_bottom(r->bottom()), _r_end(r->end()),
ysr@777 39 _oc(oc), _out_of_region(0)
ysr@777 40 {}
ysr@777 41
ysr@777 42 class VerifyLiveClosure: public OopClosure {
tonyp@1246 43 private:
ysr@777 44 G1CollectedHeap* _g1h;
ysr@777 45 CardTableModRefBS* _bs;
ysr@777 46 oop _containing_obj;
ysr@777 47 bool _failures;
ysr@777 48 int _n_failures;
tonyp@1246 49 bool _use_prev_marking;
ysr@777 50 public:
tonyp@1246 51 // use_prev_marking == true -> use "prev" marking information,
tonyp@1246 52 // use_prev_marking == false -> use "next" marking information
tonyp@1246 53 VerifyLiveClosure(G1CollectedHeap* g1h, bool use_prev_marking) :
ysr@777 54 _g1h(g1h), _bs(NULL), _containing_obj(NULL),
tonyp@1246 55 _failures(false), _n_failures(0), _use_prev_marking(use_prev_marking)
ysr@777 56 {
ysr@777 57 BarrierSet* bs = _g1h->barrier_set();
ysr@777 58 if (bs->is_a(BarrierSet::CardTableModRef))
ysr@777 59 _bs = (CardTableModRefBS*)bs;
ysr@777 60 }
ysr@777 61
ysr@777 62 void set_containing_obj(oop obj) {
ysr@777 63 _containing_obj = obj;
ysr@777 64 }
ysr@777 65
ysr@777 66 bool failures() { return _failures; }
ysr@777 67 int n_failures() { return _n_failures; }
ysr@777 68
ysr@1280 69 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
ysr@1280 70 virtual void do_oop( oop* p) { do_oop_work(p); }
ysr@777 71
ysr@1280 72 template <class T> void do_oop_work(T* p) {
ysr@777 73 assert(_containing_obj != NULL, "Precondition");
tonyp@1246 74 assert(!_g1h->is_obj_dead_cond(_containing_obj, _use_prev_marking),
tonyp@1246 75 "Precondition");
ysr@1280 76 T heap_oop = oopDesc::load_heap_oop(p);
ysr@1280 77 if (!oopDesc::is_null(heap_oop)) {
ysr@1280 78 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
ysr@777 79 bool failed = false;
tonyp@1246 80 if (!_g1h->is_in_closed_subset(obj) ||
tonyp@1246 81 _g1h->is_obj_dead_cond(obj, _use_prev_marking)) {
ysr@777 82 if (!_failures) {
ysr@777 83 gclog_or_tty->print_cr("");
ysr@777 84 gclog_or_tty->print_cr("----------");
ysr@777 85 }
ysr@777 86 if (!_g1h->is_in_closed_subset(obj)) {
ysr@777 87 gclog_or_tty->print_cr("Field "PTR_FORMAT
ysr@777 88 " of live obj "PTR_FORMAT
ysr@777 89 " points to obj "PTR_FORMAT
ysr@777 90 " not in the heap.",
ysr@777 91 p, (void*) _containing_obj, (void*) obj);
ysr@777 92 } else {
ysr@777 93 gclog_or_tty->print_cr("Field "PTR_FORMAT
ysr@777 94 " of live obj "PTR_FORMAT
ysr@777 95 " points to dead obj "PTR_FORMAT".",
ysr@777 96 p, (void*) _containing_obj, (void*) obj);
ysr@777 97 }
ysr@777 98 gclog_or_tty->print_cr("Live obj:");
ysr@777 99 _containing_obj->print_on(gclog_or_tty);
ysr@777 100 gclog_or_tty->print_cr("Bad referent:");
ysr@777 101 obj->print_on(gclog_or_tty);
ysr@777 102 gclog_or_tty->print_cr("----------");
ysr@777 103 _failures = true;
ysr@777 104 failed = true;
ysr@777 105 _n_failures++;
ysr@777 106 }
ysr@777 107
ysr@777 108 if (!_g1h->full_collection()) {
ysr@1280 109 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
ysr@1280 110 HeapRegion* to = _g1h->heap_region_containing(obj);
ysr@777 111 if (from != NULL && to != NULL &&
ysr@777 112 from != to &&
ysr@777 113 !to->isHumongous()) {
ysr@777 114 jbyte cv_obj = *_bs->byte_for_const(_containing_obj);
ysr@777 115 jbyte cv_field = *_bs->byte_for_const(p);
ysr@777 116 const jbyte dirty = CardTableModRefBS::dirty_card_val();
ysr@777 117
ysr@777 118 bool is_bad = !(from->is_young()
ysr@777 119 || to->rem_set()->contains_reference(p)
ysr@777 120 || !G1HRRSFlushLogBuffersOnVerify && // buffers were not flushed
ysr@777 121 (_containing_obj->is_objArray() ?
ysr@777 122 cv_field == dirty
ysr@777 123 : cv_obj == dirty || cv_field == dirty));
ysr@777 124 if (is_bad) {
ysr@777 125 if (!_failures) {
ysr@777 126 gclog_or_tty->print_cr("");
ysr@777 127 gclog_or_tty->print_cr("----------");
ysr@777 128 }
ysr@777 129 gclog_or_tty->print_cr("Missing rem set entry:");
ysr@777 130 gclog_or_tty->print_cr("Field "PTR_FORMAT
ysr@777 131 " of obj "PTR_FORMAT
ysr@777 132 ", in region %d ["PTR_FORMAT
ysr@777 133 ", "PTR_FORMAT"),",
ysr@777 134 p, (void*) _containing_obj,
ysr@777 135 from->hrs_index(),
ysr@777 136 from->bottom(),
ysr@777 137 from->end());
ysr@777 138 _containing_obj->print_on(gclog_or_tty);
ysr@777 139 gclog_or_tty->print_cr("points to obj "PTR_FORMAT
ysr@777 140 " in region %d ["PTR_FORMAT
ysr@777 141 ", "PTR_FORMAT").",
ysr@777 142 (void*) obj, to->hrs_index(),
ysr@777 143 to->bottom(), to->end());
ysr@777 144 obj->print_on(gclog_or_tty);
ysr@777 145 gclog_or_tty->print_cr("Obj head CTE = %d, field CTE = %d.",
ysr@777 146 cv_obj, cv_field);
ysr@777 147 gclog_or_tty->print_cr("----------");
ysr@777 148 _failures = true;
ysr@777 149 if (!failed) _n_failures++;
ysr@777 150 }
ysr@777 151 }
ysr@777 152 }
ysr@777 153 }
ysr@777 154 }
ysr@777 155 };
ysr@777 156
ysr@777 157 template<class ClosureType>
ysr@777 158 HeapWord* walk_mem_region_loop(ClosureType* cl, G1CollectedHeap* g1h,
ysr@777 159 HeapRegion* hr,
ysr@777 160 HeapWord* cur, HeapWord* top) {
ysr@777 161 oop cur_oop = oop(cur);
ysr@777 162 int oop_size = cur_oop->size();
ysr@777 163 HeapWord* next_obj = cur + oop_size;
ysr@777 164 while (next_obj < top) {
ysr@777 165 // Keep filtering the remembered set.
ysr@777 166 if (!g1h->is_obj_dead(cur_oop, hr)) {
ysr@777 167 // Bottom lies entirely below top, so we can call the
ysr@777 168 // non-memRegion version of oop_iterate below.
ysr@777 169 cur_oop->oop_iterate(cl);
ysr@777 170 }
ysr@777 171 cur = next_obj;
ysr@777 172 cur_oop = oop(cur);
ysr@777 173 oop_size = cur_oop->size();
ysr@777 174 next_obj = cur + oop_size;
ysr@777 175 }
ysr@777 176 return cur;
ysr@777 177 }
ysr@777 178
ysr@777 179 void HeapRegionDCTOC::walk_mem_region_with_cl(MemRegion mr,
ysr@777 180 HeapWord* bottom,
ysr@777 181 HeapWord* top,
ysr@777 182 OopClosure* cl) {
ysr@777 183 G1CollectedHeap* g1h = _g1;
ysr@777 184
ysr@777 185 int oop_size;
ysr@777 186
ysr@777 187 OopClosure* cl2 = cl;
ysr@777 188 FilterIntoCSClosure intoCSFilt(this, g1h, cl);
ysr@777 189 FilterOutOfRegionClosure outOfRegionFilt(_hr, cl);
ysr@777 190 switch (_fk) {
ysr@777 191 case IntoCSFilterKind: cl2 = &intoCSFilt; break;
ysr@777 192 case OutOfRegionFilterKind: cl2 = &outOfRegionFilt; break;
ysr@777 193 }
ysr@777 194
ysr@777 195 // Start filtering what we add to the remembered set. If the object is
ysr@777 196 // not considered dead, either because it is marked (in the mark bitmap)
ysr@777 197 // or it was allocated after marking finished, then we add it. Otherwise
ysr@777 198 // we can safely ignore the object.
ysr@777 199 if (!g1h->is_obj_dead(oop(bottom), _hr)) {
ysr@777 200 oop_size = oop(bottom)->oop_iterate(cl2, mr);
ysr@777 201 } else {
ysr@777 202 oop_size = oop(bottom)->size();
ysr@777 203 }
ysr@777 204
ysr@777 205 bottom += oop_size;
ysr@777 206
ysr@777 207 if (bottom < top) {
ysr@777 208 // We replicate the loop below for several kinds of possible filters.
ysr@777 209 switch (_fk) {
ysr@777 210 case NoFilterKind:
ysr@777 211 bottom = walk_mem_region_loop(cl, g1h, _hr, bottom, top);
ysr@777 212 break;
ysr@777 213 case IntoCSFilterKind: {
ysr@777 214 FilterIntoCSClosure filt(this, g1h, cl);
ysr@777 215 bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top);
ysr@777 216 break;
ysr@777 217 }
ysr@777 218 case OutOfRegionFilterKind: {
ysr@777 219 FilterOutOfRegionClosure filt(_hr, cl);
ysr@777 220 bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top);
ysr@777 221 break;
ysr@777 222 }
ysr@777 223 default:
ysr@777 224 ShouldNotReachHere();
ysr@777 225 }
ysr@777 226
ysr@777 227 // Last object. Need to do dead-obj filtering here too.
ysr@777 228 if (!g1h->is_obj_dead(oop(bottom), _hr)) {
ysr@777 229 oop(bottom)->oop_iterate(cl2, mr);
ysr@777 230 }
ysr@777 231 }
ysr@777 232 }
ysr@777 233
ysr@777 234 void HeapRegion::reset_after_compaction() {
ysr@777 235 G1OffsetTableContigSpace::reset_after_compaction();
ysr@777 236 // After a compaction the mark bitmap is invalid, so we must
ysr@777 237 // treat all objects as being inside the unmarked area.
ysr@777 238 zero_marked_bytes();
ysr@777 239 init_top_at_mark_start();
ysr@777 240 }
ysr@777 241
ysr@777 242 DirtyCardToOopClosure*
ysr@777 243 HeapRegion::new_dcto_closure(OopClosure* cl,
ysr@777 244 CardTableModRefBS::PrecisionStyle precision,
ysr@777 245 HeapRegionDCTOC::FilterKind fk) {
ysr@777 246 return new HeapRegionDCTOC(G1CollectedHeap::heap(),
ysr@777 247 this, cl, precision, fk);
ysr@777 248 }
ysr@777 249
ysr@777 250 void HeapRegion::hr_clear(bool par, bool clear_space) {
tonyp@790 251 _humongous_type = NotHumongous;
ysr@777 252 _humongous_start_region = NULL;
ysr@777 253 _in_collection_set = false;
ysr@777 254 _is_gc_alloc_region = false;
ysr@777 255
ysr@777 256 // Age stuff (if parallel, this will be done separately, since it needs
ysr@777 257 // to be sequential).
ysr@777 258 G1CollectedHeap* g1h = G1CollectedHeap::heap();
ysr@777 259
ysr@777 260 set_young_index_in_cset(-1);
ysr@777 261 uninstall_surv_rate_group();
ysr@777 262 set_young_type(NotYoung);
ysr@777 263
ysr@777 264 // In case it had been the start of a humongous sequence, reset its end.
ysr@777 265 set_end(_orig_end);
ysr@777 266
ysr@777 267 if (!par) {
ysr@777 268 // If this is parallel, this will be done later.
ysr@777 269 HeapRegionRemSet* hrrs = rem_set();
ysr@777 270 if (hrrs != NULL) hrrs->clear();
tonyp@790 271 _claimed = InitialClaimValue;
ysr@777 272 }
ysr@777 273 zero_marked_bytes();
ysr@777 274 set_sort_index(-1);
ysr@777 275
ysr@777 276 _offsets.resize(HeapRegion::GrainWords);
ysr@777 277 init_top_at_mark_start();
tonyp@791 278 if (clear_space) clear(SpaceDecorator::Mangle);
ysr@777 279 }
ysr@777 280
ysr@777 281 // <PREDICTION>
ysr@777 282 void HeapRegion::calc_gc_efficiency() {
ysr@777 283 G1CollectedHeap* g1h = G1CollectedHeap::heap();
ysr@777 284 _gc_efficiency = (double) garbage_bytes() /
ysr@777 285 g1h->predict_region_elapsed_time_ms(this, false);
ysr@777 286 }
ysr@777 287 // </PREDICTION>
ysr@777 288
ysr@777 289 void HeapRegion::set_startsHumongous() {
tonyp@790 290 _humongous_type = StartsHumongous;
ysr@777 291 _humongous_start_region = this;
ysr@777 292 assert(end() == _orig_end, "Should be normal before alloc.");
ysr@777 293 }
ysr@777 294
ysr@777 295 bool HeapRegion::claimHeapRegion(jint claimValue) {
ysr@777 296 jint current = _claimed;
ysr@777 297 if (current != claimValue) {
ysr@777 298 jint res = Atomic::cmpxchg(claimValue, &_claimed, current);
ysr@777 299 if (res == current) {
ysr@777 300 return true;
ysr@777 301 }
ysr@777 302 }
ysr@777 303 return false;
ysr@777 304 }
ysr@777 305
ysr@777 306 HeapWord* HeapRegion::next_block_start_careful(HeapWord* addr) {
ysr@777 307 HeapWord* low = addr;
ysr@777 308 HeapWord* high = end();
ysr@777 309 while (low < high) {
ysr@777 310 size_t diff = pointer_delta(high, low);
ysr@777 311 // Must add one below to bias toward the high amount. Otherwise, if
ysr@777 312 // "high" were at the desired value, and "low" were one less, we
ysr@777 313 // would not converge on "high". This is not symmetric, because
ysr@777 314 // we set "high" to a block start, which might be the right one,
ysr@777 315 // which we don't do for "low".
ysr@777 316 HeapWord* middle = low + (diff+1)/2;
ysr@777 317 if (middle == high) return high;
ysr@777 318 HeapWord* mid_bs = block_start_careful(middle);
ysr@777 319 if (mid_bs < addr) {
ysr@777 320 low = middle;
ysr@777 321 } else {
ysr@777 322 high = mid_bs;
ysr@777 323 }
ysr@777 324 }
ysr@777 325 assert(low == high && low >= addr, "Didn't work.");
ysr@777 326 return low;
ysr@777 327 }
ysr@777 328
ysr@777 329 void HeapRegion::set_next_on_unclean_list(HeapRegion* r) {
ysr@777 330 assert(r == NULL || r->is_on_unclean_list(), "Malformed unclean list.");
ysr@777 331 _next_in_special_set = r;
ysr@777 332 }
ysr@777 333
ysr@777 334 void HeapRegion::set_on_unclean_list(bool b) {
ysr@777 335 _is_on_unclean_list = b;
ysr@777 336 }
ysr@777 337
tonyp@791 338 void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
tonyp@791 339 G1OffsetTableContigSpace::initialize(mr, false, mangle_space);
ysr@777 340 hr_clear(false/*par*/, clear_space);
ysr@777 341 }
ysr@777 342 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
ysr@777 343 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
ysr@777 344 #endif // _MSC_VER
ysr@777 345
ysr@777 346
ysr@777 347 HeapRegion::
ysr@777 348 HeapRegion(G1BlockOffsetSharedArray* sharedOffsetArray,
ysr@777 349 MemRegion mr, bool is_zeroed)
ysr@777 350 : G1OffsetTableContigSpace(sharedOffsetArray, mr, is_zeroed),
ysr@777 351 _next_fk(HeapRegionDCTOC::NoFilterKind),
ysr@777 352 _hrs_index(-1),
tonyp@790 353 _humongous_type(NotHumongous), _humongous_start_region(NULL),
ysr@777 354 _in_collection_set(false), _is_gc_alloc_region(false),
ysr@777 355 _is_on_free_list(false), _is_on_unclean_list(false),
ysr@777 356 _next_in_special_set(NULL), _orig_end(NULL),
tonyp@790 357 _claimed(InitialClaimValue), _evacuation_failed(false),
ysr@777 358 _prev_marked_bytes(0), _next_marked_bytes(0), _sort_index(-1),
ysr@777 359 _young_type(NotYoung), _next_young_region(NULL),
apetrusenko@1231 360 _next_dirty_cards_region(NULL),
ysr@777 361 _young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1),
ysr@777 362 _rem_set(NULL), _zfs(NotZeroFilled)
ysr@777 363 {
ysr@777 364 _orig_end = mr.end();
ysr@777 365 // Note that initialize() will set the start of the unmarked area of the
ysr@777 366 // region.
tonyp@791 367 this->initialize(mr, !is_zeroed, SpaceDecorator::Mangle);
tonyp@791 368 set_top(bottom());
tonyp@791 369 set_saved_mark();
ysr@777 370
ysr@777 371 _rem_set = new HeapRegionRemSet(sharedOffsetArray, this);
ysr@777 372
ysr@777 373 assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant.");
ysr@777 374 // In case the region is allocated during a pause, note the top.
ysr@777 375 // We haven't done any counting on a brand new region.
ysr@777 376 _top_at_conc_mark_count = bottom();
ysr@777 377 }
ysr@777 378
ysr@777 379 class NextCompactionHeapRegionClosure: public HeapRegionClosure {
ysr@777 380 const HeapRegion* _target;
ysr@777 381 bool _target_seen;
ysr@777 382 HeapRegion* _last;
ysr@777 383 CompactibleSpace* _res;
ysr@777 384 public:
ysr@777 385 NextCompactionHeapRegionClosure(const HeapRegion* target) :
ysr@777 386 _target(target), _target_seen(false), _res(NULL) {}
ysr@777 387 bool doHeapRegion(HeapRegion* cur) {
ysr@777 388 if (_target_seen) {
ysr@777 389 if (!cur->isHumongous()) {
ysr@777 390 _res = cur;
ysr@777 391 return true;
ysr@777 392 }
ysr@777 393 } else if (cur == _target) {
ysr@777 394 _target_seen = true;
ysr@777 395 }
ysr@777 396 return false;
ysr@777 397 }
ysr@777 398 CompactibleSpace* result() { return _res; }
ysr@777 399 };
ysr@777 400
ysr@777 401 CompactibleSpace* HeapRegion::next_compaction_space() const {
ysr@777 402 G1CollectedHeap* g1h = G1CollectedHeap::heap();
ysr@777 403 // cast away const-ness
ysr@777 404 HeapRegion* r = (HeapRegion*) this;
ysr@777 405 NextCompactionHeapRegionClosure blk(r);
ysr@777 406 g1h->heap_region_iterate_from(r, &blk);
ysr@777 407 return blk.result();
ysr@777 408 }
ysr@777 409
ysr@777 410 void HeapRegion::set_continuesHumongous(HeapRegion* start) {
ysr@777 411 // The order is important here.
ysr@777 412 start->add_continuingHumongousRegion(this);
tonyp@790 413 _humongous_type = ContinuesHumongous;
ysr@777 414 _humongous_start_region = start;
ysr@777 415 }
ysr@777 416
ysr@777 417 void HeapRegion::add_continuingHumongousRegion(HeapRegion* cont) {
ysr@777 418 // Must join the blocks of the current H region seq with the block of the
ysr@777 419 // added region.
ysr@777 420 offsets()->join_blocks(bottom(), cont->bottom());
ysr@777 421 arrayOop obj = (arrayOop)(bottom());
ysr@777 422 obj->set_length((int) (obj->length() + cont->capacity()/jintSize));
ysr@777 423 set_end(cont->end());
ysr@777 424 set_top(cont->end());
ysr@777 425 }
ysr@777 426
ysr@777 427 void HeapRegion::save_marks() {
ysr@777 428 set_saved_mark();
ysr@777 429 }
ysr@777 430
ysr@777 431 void HeapRegion::oops_in_mr_iterate(MemRegion mr, OopClosure* cl) {
ysr@777 432 HeapWord* p = mr.start();
ysr@777 433 HeapWord* e = mr.end();
ysr@777 434 oop obj;
ysr@777 435 while (p < e) {
ysr@777 436 obj = oop(p);
ysr@777 437 p += obj->oop_iterate(cl);
ysr@777 438 }
ysr@777 439 assert(p == e, "bad memregion: doesn't end on obj boundary");
ysr@777 440 }
ysr@777 441
ysr@777 442 #define HeapRegion_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
ysr@777 443 void HeapRegion::oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \
ysr@777 444 ContiguousSpace::oop_since_save_marks_iterate##nv_suffix(cl); \
ysr@777 445 }
ysr@777 446 SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(HeapRegion_OOP_SINCE_SAVE_MARKS_DEFN)
ysr@777 447
ysr@777 448
ysr@777 449 void HeapRegion::oop_before_save_marks_iterate(OopClosure* cl) {
ysr@777 450 oops_in_mr_iterate(MemRegion(bottom(), saved_mark_word()), cl);
ysr@777 451 }
ysr@777 452
ysr@777 453 #ifdef DEBUG
ysr@777 454 HeapWord* HeapRegion::allocate(size_t size) {
ysr@777 455 jint state = zero_fill_state();
ysr@777 456 assert(!G1CollectedHeap::heap()->allocs_are_zero_filled() ||
ysr@777 457 zero_fill_is_allocated(),
ysr@777 458 "When ZF is on, only alloc in ZF'd regions");
ysr@777 459 return G1OffsetTableContigSpace::allocate(size);
ysr@777 460 }
ysr@777 461 #endif
ysr@777 462
ysr@777 463 void HeapRegion::set_zero_fill_state_work(ZeroFillState zfs) {
ysr@777 464 assert(top() == bottom() || zfs == Allocated,
ysr@777 465 "Region must be empty, or we must be setting it to allocated.");
ysr@777 466 assert(ZF_mon->owned_by_self() ||
ysr@777 467 Universe::heap()->is_gc_active(),
ysr@777 468 "Must hold the lock or be a full GC to modify.");
ysr@777 469 _zfs = zfs;
ysr@777 470 }
ysr@777 471
ysr@777 472 void HeapRegion::set_zero_fill_complete() {
ysr@777 473 set_zero_fill_state_work(ZeroFilled);
ysr@777 474 if (ZF_mon->owned_by_self()) {
ysr@777 475 ZF_mon->notify_all();
ysr@777 476 }
ysr@777 477 }
ysr@777 478
ysr@777 479
ysr@777 480 void HeapRegion::ensure_zero_filled() {
ysr@777 481 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
ysr@777 482 ensure_zero_filled_locked();
ysr@777 483 }
ysr@777 484
ysr@777 485 void HeapRegion::ensure_zero_filled_locked() {
ysr@777 486 assert(ZF_mon->owned_by_self(), "Precondition");
ysr@777 487 bool should_ignore_zf = SafepointSynchronize::is_at_safepoint();
ysr@777 488 assert(should_ignore_zf || Heap_lock->is_locked(),
ysr@777 489 "Either we're in a GC or we're allocating a region.");
ysr@777 490 switch (zero_fill_state()) {
ysr@777 491 case HeapRegion::NotZeroFilled:
ysr@777 492 set_zero_fill_in_progress(Thread::current());
ysr@777 493 {
ysr@777 494 ZF_mon->unlock();
ysr@777 495 Copy::fill_to_words(bottom(), capacity()/HeapWordSize);
ysr@777 496 ZF_mon->lock_without_safepoint_check();
ysr@777 497 }
ysr@777 498 // A trap.
ysr@777 499 guarantee(zero_fill_state() == HeapRegion::ZeroFilling
ysr@777 500 && zero_filler() == Thread::current(),
ysr@777 501 "AHA! Tell Dave D if you see this...");
ysr@777 502 set_zero_fill_complete();
ysr@777 503 // gclog_or_tty->print_cr("Did sync ZF.");
ysr@777 504 ConcurrentZFThread::note_sync_zfs();
ysr@777 505 break;
ysr@777 506 case HeapRegion::ZeroFilling:
ysr@777 507 if (should_ignore_zf) {
ysr@777 508 // We can "break" the lock and take over the work.
ysr@777 509 Copy::fill_to_words(bottom(), capacity()/HeapWordSize);
ysr@777 510 set_zero_fill_complete();
ysr@777 511 ConcurrentZFThread::note_sync_zfs();
ysr@777 512 break;
ysr@777 513 } else {
ysr@777 514 ConcurrentZFThread::wait_for_ZF_completed(this);
ysr@777 515 }
ysr@777 516 case HeapRegion::ZeroFilled:
ysr@777 517 // Nothing to do.
ysr@777 518 break;
ysr@777 519 case HeapRegion::Allocated:
ysr@777 520 guarantee(false, "Should not call on allocated regions.");
ysr@777 521 }
ysr@777 522 assert(zero_fill_state() == HeapRegion::ZeroFilled, "Post");
ysr@777 523 }
ysr@777 524
ysr@777 525 HeapWord*
ysr@777 526 HeapRegion::object_iterate_mem_careful(MemRegion mr,
ysr@777 527 ObjectClosure* cl) {
ysr@777 528 G1CollectedHeap* g1h = G1CollectedHeap::heap();
ysr@777 529 // We used to use "block_start_careful" here. But we're actually happy
ysr@777 530 // to update the BOT while we do this...
ysr@777 531 HeapWord* cur = block_start(mr.start());
ysr@777 532 mr = mr.intersection(used_region());
ysr@777 533 if (mr.is_empty()) return NULL;
ysr@777 534 // Otherwise, find the obj that extends onto mr.start().
ysr@777 535
ysr@777 536 assert(cur <= mr.start()
ysr@1280 537 && (oop(cur)->klass_or_null() == NULL ||
ysr@777 538 cur + oop(cur)->size() > mr.start()),
ysr@777 539 "postcondition of block_start");
ysr@777 540 oop obj;
ysr@777 541 while (cur < mr.end()) {
ysr@777 542 obj = oop(cur);
ysr@1280 543 if (obj->klass_or_null() == NULL) {
ysr@777 544 // Ran into an unparseable point.
ysr@777 545 return cur;
ysr@777 546 } else if (!g1h->is_obj_dead(obj)) {
ysr@777 547 cl->do_object(obj);
ysr@777 548 }
ysr@777 549 if (cl->abort()) return cur;
ysr@777 550 // The check above must occur before the operation below, since an
ysr@777 551 // abort might invalidate the "size" operation.
ysr@777 552 cur += obj->size();
ysr@777 553 }
ysr@777 554 return NULL;
ysr@777 555 }
ysr@777 556
ysr@777 557 HeapWord*
ysr@777 558 HeapRegion::
ysr@777 559 oops_on_card_seq_iterate_careful(MemRegion mr,
ysr@777 560 FilterOutOfRegionClosure* cl) {
ysr@777 561 G1CollectedHeap* g1h = G1CollectedHeap::heap();
ysr@777 562
ysr@777 563 // If we're within a stop-world GC, then we might look at a card in a
ysr@777 564 // GC alloc region that extends onto a GC LAB, which may not be
ysr@777 565 // parseable. Stop such at the "saved_mark" of the region.
ysr@777 566 if (G1CollectedHeap::heap()->is_gc_active()) {
ysr@777 567 mr = mr.intersection(used_region_at_save_marks());
ysr@777 568 } else {
ysr@777 569 mr = mr.intersection(used_region());
ysr@777 570 }
ysr@777 571 if (mr.is_empty()) return NULL;
ysr@777 572 // Otherwise, find the obj that extends onto mr.start().
ysr@777 573
ysr@777 574 // We used to use "block_start_careful" here. But we're actually happy
ysr@777 575 // to update the BOT while we do this...
ysr@777 576 HeapWord* cur = block_start(mr.start());
ysr@777 577 assert(cur <= mr.start(), "Postcondition");
ysr@777 578
ysr@777 579 while (cur <= mr.start()) {
ysr@1280 580 if (oop(cur)->klass_or_null() == NULL) {
ysr@777 581 // Ran into an unparseable point.
ysr@777 582 return cur;
ysr@777 583 }
ysr@777 584 // Otherwise...
ysr@777 585 int sz = oop(cur)->size();
ysr@777 586 if (cur + sz > mr.start()) break;
ysr@777 587 // Otherwise, go on.
ysr@777 588 cur = cur + sz;
ysr@777 589 }
ysr@777 590 oop obj;
ysr@777 591 obj = oop(cur);
ysr@777 592 // If we finish this loop...
ysr@777 593 assert(cur <= mr.start()
ysr@1280 594 && obj->klass_or_null() != NULL
ysr@777 595 && cur + obj->size() > mr.start(),
ysr@777 596 "Loop postcondition");
ysr@777 597 if (!g1h->is_obj_dead(obj)) {
ysr@777 598 obj->oop_iterate(cl, mr);
ysr@777 599 }
ysr@777 600
ysr@777 601 HeapWord* next;
ysr@777 602 while (cur < mr.end()) {
ysr@777 603 obj = oop(cur);
ysr@1280 604 if (obj->klass_or_null() == NULL) {
ysr@777 605 // Ran into an unparseable point.
ysr@777 606 return cur;
ysr@777 607 };
ysr@777 608 // Otherwise:
ysr@777 609 next = (cur + obj->size());
ysr@777 610 if (!g1h->is_obj_dead(obj)) {
ysr@777 611 if (next < mr.end()) {
ysr@777 612 obj->oop_iterate(cl);
ysr@777 613 } else {
ysr@777 614 // this obj spans the boundary. If it's an array, stop at the
ysr@777 615 // boundary.
ysr@777 616 if (obj->is_objArray()) {
ysr@777 617 obj->oop_iterate(cl, mr);
ysr@777 618 } else {
ysr@777 619 obj->oop_iterate(cl);
ysr@777 620 }
ysr@777 621 }
ysr@777 622 }
ysr@777 623 cur = next;
ysr@777 624 }
ysr@777 625 return NULL;
ysr@777 626 }
ysr@777 627
ysr@777 628 void HeapRegion::print() const { print_on(gclog_or_tty); }
ysr@777 629 void HeapRegion::print_on(outputStream* st) const {
ysr@777 630 if (isHumongous()) {
ysr@777 631 if (startsHumongous())
ysr@777 632 st->print(" HS");
ysr@777 633 else
ysr@777 634 st->print(" HC");
ysr@777 635 } else {
ysr@777 636 st->print(" ");
ysr@777 637 }
ysr@777 638 if (in_collection_set())
ysr@777 639 st->print(" CS");
ysr@777 640 else if (is_gc_alloc_region())
ysr@777 641 st->print(" A ");
ysr@777 642 else
ysr@777 643 st->print(" ");
ysr@777 644 if (is_young())
ysr@777 645 st->print(is_scan_only() ? " SO" : (is_survivor() ? " SU" : " Y "));
ysr@777 646 else
ysr@777 647 st->print(" ");
ysr@777 648 if (is_empty())
ysr@777 649 st->print(" F");
ysr@777 650 else
ysr@777 651 st->print(" ");
ysr@777 652 st->print(" %d", _gc_time_stamp);
ysr@777 653 G1OffsetTableContigSpace::print_on(st);
ysr@777 654 }
ysr@777 655
tonyp@1246 656 void HeapRegion::verify(bool allow_dirty) const {
tonyp@1246 657 verify(allow_dirty, /* use_prev_marking */ true);
tonyp@1246 658 }
tonyp@1246 659
ysr@777 660 #define OBJ_SAMPLE_INTERVAL 0
ysr@777 661 #define BLOCK_SAMPLE_INTERVAL 100
ysr@777 662
ysr@777 663 // This really ought to be commoned up into OffsetTableContigSpace somehow.
ysr@777 664 // We would need a mechanism to make that code skip dead objects.
ysr@777 665
tonyp@1246 666 void HeapRegion::verify(bool allow_dirty, bool use_prev_marking) const {
ysr@777 667 G1CollectedHeap* g1 = G1CollectedHeap::heap();
ysr@777 668 HeapWord* p = bottom();
ysr@777 669 HeapWord* prev_p = NULL;
ysr@777 670 int objs = 0;
ysr@777 671 int blocks = 0;
tonyp@1246 672 VerifyLiveClosure vl_cl(g1, use_prev_marking);
ysr@777 673 while (p < top()) {
ysr@777 674 size_t size = oop(p)->size();
ysr@777 675 if (blocks == BLOCK_SAMPLE_INTERVAL) {
ysr@777 676 guarantee(p == block_start_const(p + (size/2)),
ysr@777 677 "check offset computation");
ysr@777 678 blocks = 0;
ysr@777 679 } else {
ysr@777 680 blocks++;
ysr@777 681 }
ysr@777 682 if (objs == OBJ_SAMPLE_INTERVAL) {
ysr@777 683 oop obj = oop(p);
tonyp@1246 684 if (!g1->is_obj_dead_cond(obj, this, use_prev_marking)) {
ysr@777 685 obj->verify();
ysr@777 686 vl_cl.set_containing_obj(obj);
ysr@777 687 obj->oop_iterate(&vl_cl);
ysr@777 688 if (G1MaxVerifyFailures >= 0
ysr@777 689 && vl_cl.n_failures() >= G1MaxVerifyFailures) break;
ysr@777 690 }
ysr@777 691 objs = 0;
ysr@777 692 } else {
ysr@777 693 objs++;
ysr@777 694 }
ysr@777 695 prev_p = p;
ysr@777 696 p += size;
ysr@777 697 }
ysr@777 698 HeapWord* rend = end();
ysr@777 699 HeapWord* rtop = top();
ysr@777 700 if (rtop < rend) {
ysr@777 701 guarantee(block_start_const(rtop + (rend - rtop) / 2) == rtop,
ysr@777 702 "check offset computation");
ysr@777 703 }
ysr@777 704 if (vl_cl.failures()) {
ysr@777 705 gclog_or_tty->print_cr("Heap:");
tonyp@1273 706 G1CollectedHeap::heap()->print_on(gclog_or_tty, true /* extended */);
ysr@777 707 gclog_or_tty->print_cr("");
ysr@777 708 }
johnc@1186 709 if (VerifyDuringGC &&
ysr@777 710 G1VerifyConcMarkPrintReachable &&
ysr@777 711 vl_cl.failures()) {
ysr@777 712 g1->concurrent_mark()->print_prev_bitmap_reachable();
ysr@777 713 }
tonyp@1180 714 guarantee(!vl_cl.failures(), "region verification failed");
ysr@777 715 guarantee(p == top(), "end of last object must match end of space");
ysr@777 716 }
ysr@777 717
ysr@777 718 // G1OffsetTableContigSpace code; copied from space.cpp. Hope this can go
ysr@777 719 // away eventually.
ysr@777 720
tonyp@791 721 void G1OffsetTableContigSpace::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
ysr@777 722 // false ==> we'll do the clearing if there's clearing to be done.
tonyp@791 723 ContiguousSpace::initialize(mr, false, mangle_space);
ysr@777 724 _offsets.zero_bottom_entry();
ysr@777 725 _offsets.initialize_threshold();
tonyp@791 726 if (clear_space) clear(mangle_space);
ysr@777 727 }
ysr@777 728
tonyp@791 729 void G1OffsetTableContigSpace::clear(bool mangle_space) {
tonyp@791 730 ContiguousSpace::clear(mangle_space);
ysr@777 731 _offsets.zero_bottom_entry();
ysr@777 732 _offsets.initialize_threshold();
ysr@777 733 }
ysr@777 734
ysr@777 735 void G1OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) {
ysr@777 736 Space::set_bottom(new_bottom);
ysr@777 737 _offsets.set_bottom(new_bottom);
ysr@777 738 }
ysr@777 739
ysr@777 740 void G1OffsetTableContigSpace::set_end(HeapWord* new_end) {
ysr@777 741 Space::set_end(new_end);
ysr@777 742 _offsets.resize(new_end - bottom());
ysr@777 743 }
ysr@777 744
ysr@777 745 void G1OffsetTableContigSpace::print() const {
ysr@777 746 print_short();
ysr@777 747 gclog_or_tty->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", "
ysr@777 748 INTPTR_FORMAT ", " INTPTR_FORMAT ")",
ysr@777 749 bottom(), top(), _offsets.threshold(), end());
ysr@777 750 }
ysr@777 751
ysr@777 752 HeapWord* G1OffsetTableContigSpace::initialize_threshold() {
ysr@777 753 return _offsets.initialize_threshold();
ysr@777 754 }
ysr@777 755
ysr@777 756 HeapWord* G1OffsetTableContigSpace::cross_threshold(HeapWord* start,
ysr@777 757 HeapWord* end) {
ysr@777 758 _offsets.alloc_block(start, end);
ysr@777 759 return _offsets.threshold();
ysr@777 760 }
ysr@777 761
ysr@777 762 HeapWord* G1OffsetTableContigSpace::saved_mark_word() const {
ysr@777 763 G1CollectedHeap* g1h = G1CollectedHeap::heap();
ysr@777 764 assert( _gc_time_stamp <= g1h->get_gc_time_stamp(), "invariant" );
ysr@777 765 if (_gc_time_stamp < g1h->get_gc_time_stamp())
ysr@777 766 return top();
ysr@777 767 else
ysr@777 768 return ContiguousSpace::saved_mark_word();
ysr@777 769 }
ysr@777 770
ysr@777 771 void G1OffsetTableContigSpace::set_saved_mark() {
ysr@777 772 G1CollectedHeap* g1h = G1CollectedHeap::heap();
ysr@777 773 unsigned curr_gc_time_stamp = g1h->get_gc_time_stamp();
ysr@777 774
ysr@777 775 if (_gc_time_stamp < curr_gc_time_stamp) {
ysr@777 776 // The order of these is important, as another thread might be
ysr@777 777 // about to start scanning this region. If it does so after
ysr@777 778 // set_saved_mark and before _gc_time_stamp = ..., then the latter
ysr@777 779 // will be false, and it will pick up top() as the high water mark
ysr@777 780 // of region. If it does so after _gc_time_stamp = ..., then it
ysr@777 781 // will pick up the right saved_mark_word() as the high water mark
ysr@777 782 // of the region. Either way, the behaviour will be correct.
ysr@777 783 ContiguousSpace::set_saved_mark();
ysr@1280 784 OrderAccess::storestore();
iveresov@788 785 _gc_time_stamp = curr_gc_time_stamp;
ysr@1280 786 // The following fence is to force a flush of the writes above, but
ysr@1280 787 // is strictly not needed because when an allocating worker thread
ysr@1280 788 // calls set_saved_mark() it does so under the ParGCRareEvent_lock;
ysr@1280 789 // when the lock is released, the write will be flushed.
ysr@1280 790 // OrderAccess::fence();
ysr@777 791 }
ysr@777 792 }
ysr@777 793
ysr@777 794 G1OffsetTableContigSpace::
ysr@777 795 G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
ysr@777 796 MemRegion mr, bool is_zeroed) :
ysr@777 797 _offsets(sharedOffsetArray, mr),
ysr@777 798 _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true),
ysr@777 799 _gc_time_stamp(0)
ysr@777 800 {
ysr@777 801 _offsets.set_space(this);
tonyp@791 802 initialize(mr, !is_zeroed, SpaceDecorator::Mangle);
ysr@777 803 }
ysr@777 804
ysr@777 805 size_t RegionList::length() {
ysr@777 806 size_t len = 0;
ysr@777 807 HeapRegion* cur = hd();
ysr@777 808 DEBUG_ONLY(HeapRegion* last = NULL);
ysr@777 809 while (cur != NULL) {
ysr@777 810 len++;
ysr@777 811 DEBUG_ONLY(last = cur);
ysr@777 812 cur = get_next(cur);
ysr@777 813 }
ysr@777 814 assert(last == tl(), "Invariant");
ysr@777 815 return len;
ysr@777 816 }
ysr@777 817
ysr@777 818 void RegionList::insert_before_head(HeapRegion* r) {
ysr@777 819 assert(well_formed(), "Inv");
ysr@777 820 set_next(r, hd());
ysr@777 821 _hd = r;
ysr@777 822 _sz++;
ysr@777 823 if (tl() == NULL) _tl = r;
ysr@777 824 assert(well_formed(), "Inv");
ysr@777 825 }
ysr@777 826
ysr@777 827 void RegionList::prepend_list(RegionList* new_list) {
ysr@777 828 assert(well_formed(), "Precondition");
ysr@777 829 assert(new_list->well_formed(), "Precondition");
ysr@777 830 HeapRegion* new_tl = new_list->tl();
ysr@777 831 if (new_tl != NULL) {
ysr@777 832 set_next(new_tl, hd());
ysr@777 833 _hd = new_list->hd();
ysr@777 834 _sz += new_list->sz();
ysr@777 835 if (tl() == NULL) _tl = new_list->tl();
ysr@777 836 } else {
ysr@777 837 assert(new_list->hd() == NULL && new_list->sz() == 0, "Inv");
ysr@777 838 }
ysr@777 839 assert(well_formed(), "Inv");
ysr@777 840 }
ysr@777 841
ysr@777 842 void RegionList::delete_after(HeapRegion* r) {
ysr@777 843 assert(well_formed(), "Precondition");
ysr@777 844 HeapRegion* next = get_next(r);
ysr@777 845 assert(r != NULL, "Precondition");
ysr@777 846 HeapRegion* next_tl = get_next(next);
ysr@777 847 set_next(r, next_tl);
ysr@777 848 dec_sz();
ysr@777 849 if (next == tl()) {
ysr@777 850 assert(next_tl == NULL, "Inv");
ysr@777 851 _tl = r;
ysr@777 852 }
ysr@777 853 assert(well_formed(), "Inv");
ysr@777 854 }
ysr@777 855
ysr@777 856 HeapRegion* RegionList::pop() {
ysr@777 857 assert(well_formed(), "Inv");
ysr@777 858 HeapRegion* res = hd();
ysr@777 859 if (res != NULL) {
ysr@777 860 _hd = get_next(res);
ysr@777 861 _sz--;
ysr@777 862 set_next(res, NULL);
ysr@777 863 if (sz() == 0) _tl = NULL;
ysr@777 864 }
ysr@777 865 assert(well_formed(), "Inv");
ysr@777 866 return res;
ysr@777 867 }

mercurial