Mon, 09 Aug 2010 05:41:05 -0700
6966222: G1: simplify TaskQueue overflow handling
Reviewed-by: tonyp, ysr
1 /*
2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "incls/_precompiled.incl"
26 #include "incls/_heapRegion.cpp.incl"
28 int HeapRegion::LogOfHRGrainBytes = 0;
29 int HeapRegion::LogOfHRGrainWords = 0;
30 int HeapRegion::GrainBytes = 0;
31 int HeapRegion::GrainWords = 0;
32 int HeapRegion::CardsPerRegion = 0;
34 HeapRegionDCTOC::HeapRegionDCTOC(G1CollectedHeap* g1,
35 HeapRegion* hr, OopClosure* cl,
36 CardTableModRefBS::PrecisionStyle precision,
37 FilterKind fk) :
38 ContiguousSpaceDCTOC(hr, cl, precision, NULL),
39 _hr(hr), _fk(fk), _g1(g1)
40 {}
42 FilterOutOfRegionClosure::FilterOutOfRegionClosure(HeapRegion* r,
43 OopClosure* oc) :
44 _r_bottom(r->bottom()), _r_end(r->end()),
45 _oc(oc), _out_of_region(0)
46 {}
48 class VerifyLiveClosure: public OopClosure {
49 private:
50 G1CollectedHeap* _g1h;
51 CardTableModRefBS* _bs;
52 oop _containing_obj;
53 bool _failures;
54 int _n_failures;
55 bool _use_prev_marking;
56 public:
57 // use_prev_marking == true -> use "prev" marking information,
58 // use_prev_marking == false -> use "next" marking information
59 VerifyLiveClosure(G1CollectedHeap* g1h, bool use_prev_marking) :
60 _g1h(g1h), _bs(NULL), _containing_obj(NULL),
61 _failures(false), _n_failures(0), _use_prev_marking(use_prev_marking)
62 {
63 BarrierSet* bs = _g1h->barrier_set();
64 if (bs->is_a(BarrierSet::CardTableModRef))
65 _bs = (CardTableModRefBS*)bs;
66 }
68 void set_containing_obj(oop obj) {
69 _containing_obj = obj;
70 }
72 bool failures() { return _failures; }
73 int n_failures() { return _n_failures; }
75 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
76 virtual void do_oop( oop* p) { do_oop_work(p); }
78 void print_object(outputStream* out, oop obj) {
79 #ifdef PRODUCT
80 klassOop k = obj->klass();
81 const char* class_name = instanceKlass::cast(k)->external_name();
82 out->print_cr("class name %s", class_name);
83 #else // PRODUCT
84 obj->print_on(out);
85 #endif // PRODUCT
86 }
88 template <class T> void do_oop_work(T* p) {
89 assert(_containing_obj != NULL, "Precondition");
90 assert(!_g1h->is_obj_dead_cond(_containing_obj, _use_prev_marking),
91 "Precondition");
92 T heap_oop = oopDesc::load_heap_oop(p);
93 if (!oopDesc::is_null(heap_oop)) {
94 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
95 bool failed = false;
96 if (!_g1h->is_in_closed_subset(obj) ||
97 _g1h->is_obj_dead_cond(obj, _use_prev_marking)) {
98 if (!_failures) {
99 gclog_or_tty->print_cr("");
100 gclog_or_tty->print_cr("----------");
101 }
102 if (!_g1h->is_in_closed_subset(obj)) {
103 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
104 gclog_or_tty->print_cr("Field "PTR_FORMAT
105 " of live obj "PTR_FORMAT" in region "
106 "["PTR_FORMAT", "PTR_FORMAT")",
107 p, (void*) _containing_obj,
108 from->bottom(), from->end());
109 print_object(gclog_or_tty, _containing_obj);
110 gclog_or_tty->print_cr("points to obj "PTR_FORMAT" not in the heap",
111 (void*) obj);
112 } else {
113 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
114 HeapRegion* to = _g1h->heap_region_containing((HeapWord*)obj);
115 gclog_or_tty->print_cr("Field "PTR_FORMAT
116 " of live obj "PTR_FORMAT" in region "
117 "["PTR_FORMAT", "PTR_FORMAT")",
118 p, (void*) _containing_obj,
119 from->bottom(), from->end());
120 print_object(gclog_or_tty, _containing_obj);
121 gclog_or_tty->print_cr("points to dead obj "PTR_FORMAT" in region "
122 "["PTR_FORMAT", "PTR_FORMAT")",
123 (void*) obj, to->bottom(), to->end());
124 print_object(gclog_or_tty, obj);
125 }
126 gclog_or_tty->print_cr("----------");
127 _failures = true;
128 failed = true;
129 _n_failures++;
130 }
132 if (!_g1h->full_collection()) {
133 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
134 HeapRegion* to = _g1h->heap_region_containing(obj);
135 if (from != NULL && to != NULL &&
136 from != to &&
137 !to->isHumongous()) {
138 jbyte cv_obj = *_bs->byte_for_const(_containing_obj);
139 jbyte cv_field = *_bs->byte_for_const(p);
140 const jbyte dirty = CardTableModRefBS::dirty_card_val();
142 bool is_bad = !(from->is_young()
143 || to->rem_set()->contains_reference(p)
144 || !G1HRRSFlushLogBuffersOnVerify && // buffers were not flushed
145 (_containing_obj->is_objArray() ?
146 cv_field == dirty
147 : cv_obj == dirty || cv_field == dirty));
148 if (is_bad) {
149 if (!_failures) {
150 gclog_or_tty->print_cr("");
151 gclog_or_tty->print_cr("----------");
152 }
153 gclog_or_tty->print_cr("Missing rem set entry:");
154 gclog_or_tty->print_cr("Field "PTR_FORMAT
155 " of obj "PTR_FORMAT
156 ", in region %d ["PTR_FORMAT
157 ", "PTR_FORMAT"),",
158 p, (void*) _containing_obj,
159 from->hrs_index(),
160 from->bottom(),
161 from->end());
162 _containing_obj->print_on(gclog_or_tty);
163 gclog_or_tty->print_cr("points to obj "PTR_FORMAT
164 " in region %d ["PTR_FORMAT
165 ", "PTR_FORMAT").",
166 (void*) obj, to->hrs_index(),
167 to->bottom(), to->end());
168 obj->print_on(gclog_or_tty);
169 gclog_or_tty->print_cr("Obj head CTE = %d, field CTE = %d.",
170 cv_obj, cv_field);
171 gclog_or_tty->print_cr("----------");
172 _failures = true;
173 if (!failed) _n_failures++;
174 }
175 }
176 }
177 }
178 }
179 };
181 template<class ClosureType>
182 HeapWord* walk_mem_region_loop(ClosureType* cl, G1CollectedHeap* g1h,
183 HeapRegion* hr,
184 HeapWord* cur, HeapWord* top) {
185 oop cur_oop = oop(cur);
186 int oop_size = cur_oop->size();
187 HeapWord* next_obj = cur + oop_size;
188 while (next_obj < top) {
189 // Keep filtering the remembered set.
190 if (!g1h->is_obj_dead(cur_oop, hr)) {
191 // Bottom lies entirely below top, so we can call the
192 // non-memRegion version of oop_iterate below.
193 cur_oop->oop_iterate(cl);
194 }
195 cur = next_obj;
196 cur_oop = oop(cur);
197 oop_size = cur_oop->size();
198 next_obj = cur + oop_size;
199 }
200 return cur;
201 }
203 void HeapRegionDCTOC::walk_mem_region_with_cl(MemRegion mr,
204 HeapWord* bottom,
205 HeapWord* top,
206 OopClosure* cl) {
207 G1CollectedHeap* g1h = _g1;
209 int oop_size;
211 OopClosure* cl2 = cl;
212 FilterIntoCSClosure intoCSFilt(this, g1h, cl);
213 FilterOutOfRegionClosure outOfRegionFilt(_hr, cl);
214 switch (_fk) {
215 case IntoCSFilterKind: cl2 = &intoCSFilt; break;
216 case OutOfRegionFilterKind: cl2 = &outOfRegionFilt; break;
217 }
219 // Start filtering what we add to the remembered set. If the object is
220 // not considered dead, either because it is marked (in the mark bitmap)
221 // or it was allocated after marking finished, then we add it. Otherwise
222 // we can safely ignore the object.
223 if (!g1h->is_obj_dead(oop(bottom), _hr)) {
224 oop_size = oop(bottom)->oop_iterate(cl2, mr);
225 } else {
226 oop_size = oop(bottom)->size();
227 }
229 bottom += oop_size;
231 if (bottom < top) {
232 // We replicate the loop below for several kinds of possible filters.
233 switch (_fk) {
234 case NoFilterKind:
235 bottom = walk_mem_region_loop(cl, g1h, _hr, bottom, top);
236 break;
237 case IntoCSFilterKind: {
238 FilterIntoCSClosure filt(this, g1h, cl);
239 bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top);
240 break;
241 }
242 case OutOfRegionFilterKind: {
243 FilterOutOfRegionClosure filt(_hr, cl);
244 bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top);
245 break;
246 }
247 default:
248 ShouldNotReachHere();
249 }
251 // Last object. Need to do dead-obj filtering here too.
252 if (!g1h->is_obj_dead(oop(bottom), _hr)) {
253 oop(bottom)->oop_iterate(cl2, mr);
254 }
255 }
256 }
258 // Minimum region size; we won't go lower than that.
259 // We might want to decrease this in the future, to deal with small
260 // heaps a bit more efficiently.
261 #define MIN_REGION_SIZE ( 1024 * 1024 )
263 // Maximum region size; we don't go higher than that. There's a good
264 // reason for having an upper bound. We don't want regions to get too
265 // large, otherwise cleanup's effectiveness would decrease as there
266 // will be fewer opportunities to find totally empty regions after
267 // marking.
268 #define MAX_REGION_SIZE ( 32 * 1024 * 1024 )
270 // The automatic region size calculation will try to have around this
271 // many regions in the heap (based on the min heap size).
272 #define TARGET_REGION_NUMBER 2048
274 void HeapRegion::setup_heap_region_size(uintx min_heap_size) {
275 // region_size in bytes
276 uintx region_size = G1HeapRegionSize;
277 if (FLAG_IS_DEFAULT(G1HeapRegionSize)) {
278 // We base the automatic calculation on the min heap size. This
279 // can be problematic if the spread between min and max is quite
280 // wide, imagine -Xms128m -Xmx32g. But, if we decided it based on
281 // the max size, the region size might be way too large for the
282 // min size. Either way, some users might have to set the region
283 // size manually for some -Xms / -Xmx combos.
285 region_size = MAX2(min_heap_size / TARGET_REGION_NUMBER,
286 (uintx) MIN_REGION_SIZE);
287 }
289 int region_size_log = log2_long((jlong) region_size);
290 // Recalculate the region size to make sure it's a power of
291 // 2. This means that region_size is the largest power of 2 that's
292 // <= what we've calculated so far.
293 region_size = ((uintx)1 << region_size_log);
295 // Now make sure that we don't go over or under our limits.
296 if (region_size < MIN_REGION_SIZE) {
297 region_size = MIN_REGION_SIZE;
298 } else if (region_size > MAX_REGION_SIZE) {
299 region_size = MAX_REGION_SIZE;
300 }
302 // And recalculate the log.
303 region_size_log = log2_long((jlong) region_size);
305 // Now, set up the globals.
306 guarantee(LogOfHRGrainBytes == 0, "we should only set it once");
307 LogOfHRGrainBytes = region_size_log;
309 guarantee(LogOfHRGrainWords == 0, "we should only set it once");
310 LogOfHRGrainWords = LogOfHRGrainBytes - LogHeapWordSize;
312 guarantee(GrainBytes == 0, "we should only set it once");
313 // The cast to int is safe, given that we've bounded region_size by
314 // MIN_REGION_SIZE and MAX_REGION_SIZE.
315 GrainBytes = (int) region_size;
317 guarantee(GrainWords == 0, "we should only set it once");
318 GrainWords = GrainBytes >> LogHeapWordSize;
319 guarantee(1 << LogOfHRGrainWords == GrainWords, "sanity");
321 guarantee(CardsPerRegion == 0, "we should only set it once");
322 CardsPerRegion = GrainBytes >> CardTableModRefBS::card_shift;
323 }
325 void HeapRegion::reset_after_compaction() {
326 G1OffsetTableContigSpace::reset_after_compaction();
327 // After a compaction the mark bitmap is invalid, so we must
328 // treat all objects as being inside the unmarked area.
329 zero_marked_bytes();
330 init_top_at_mark_start();
331 }
333 DirtyCardToOopClosure*
334 HeapRegion::new_dcto_closure(OopClosure* cl,
335 CardTableModRefBS::PrecisionStyle precision,
336 HeapRegionDCTOC::FilterKind fk) {
337 return new HeapRegionDCTOC(G1CollectedHeap::heap(),
338 this, cl, precision, fk);
339 }
341 void HeapRegion::hr_clear(bool par, bool clear_space) {
342 _humongous_type = NotHumongous;
343 _humongous_start_region = NULL;
344 _in_collection_set = false;
345 _is_gc_alloc_region = false;
347 // Age stuff (if parallel, this will be done separately, since it needs
348 // to be sequential).
349 G1CollectedHeap* g1h = G1CollectedHeap::heap();
351 set_young_index_in_cset(-1);
352 uninstall_surv_rate_group();
353 set_young_type(NotYoung);
355 // In case it had been the start of a humongous sequence, reset its end.
356 set_end(_orig_end);
358 if (!par) {
359 // If this is parallel, this will be done later.
360 HeapRegionRemSet* hrrs = rem_set();
361 if (hrrs != NULL) hrrs->clear();
362 _claimed = InitialClaimValue;
363 }
364 zero_marked_bytes();
365 set_sort_index(-1);
367 _offsets.resize(HeapRegion::GrainWords);
368 init_top_at_mark_start();
369 if (clear_space) clear(SpaceDecorator::Mangle);
370 }
372 // <PREDICTION>
373 void HeapRegion::calc_gc_efficiency() {
374 G1CollectedHeap* g1h = G1CollectedHeap::heap();
375 _gc_efficiency = (double) garbage_bytes() /
376 g1h->predict_region_elapsed_time_ms(this, false);
377 }
378 // </PREDICTION>
380 void HeapRegion::set_startsHumongous() {
381 _humongous_type = StartsHumongous;
382 _humongous_start_region = this;
383 assert(end() == _orig_end, "Should be normal before alloc.");
384 }
386 bool HeapRegion::claimHeapRegion(jint claimValue) {
387 jint current = _claimed;
388 if (current != claimValue) {
389 jint res = Atomic::cmpxchg(claimValue, &_claimed, current);
390 if (res == current) {
391 return true;
392 }
393 }
394 return false;
395 }
397 HeapWord* HeapRegion::next_block_start_careful(HeapWord* addr) {
398 HeapWord* low = addr;
399 HeapWord* high = end();
400 while (low < high) {
401 size_t diff = pointer_delta(high, low);
402 // Must add one below to bias toward the high amount. Otherwise, if
403 // "high" were at the desired value, and "low" were one less, we
404 // would not converge on "high". This is not symmetric, because
405 // we set "high" to a block start, which might be the right one,
406 // which we don't do for "low".
407 HeapWord* middle = low + (diff+1)/2;
408 if (middle == high) return high;
409 HeapWord* mid_bs = block_start_careful(middle);
410 if (mid_bs < addr) {
411 low = middle;
412 } else {
413 high = mid_bs;
414 }
415 }
416 assert(low == high && low >= addr, "Didn't work.");
417 return low;
418 }
420 void HeapRegion::set_next_on_unclean_list(HeapRegion* r) {
421 assert(r == NULL || r->is_on_unclean_list(), "Malformed unclean list.");
422 _next_in_special_set = r;
423 }
425 void HeapRegion::set_on_unclean_list(bool b) {
426 _is_on_unclean_list = b;
427 }
429 void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
430 G1OffsetTableContigSpace::initialize(mr, false, mangle_space);
431 hr_clear(false/*par*/, clear_space);
432 }
433 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
434 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
435 #endif // _MSC_VER
438 HeapRegion::
439 HeapRegion(G1BlockOffsetSharedArray* sharedOffsetArray,
440 MemRegion mr, bool is_zeroed)
441 : G1OffsetTableContigSpace(sharedOffsetArray, mr, is_zeroed),
442 _next_fk(HeapRegionDCTOC::NoFilterKind),
443 _hrs_index(-1),
444 _humongous_type(NotHumongous), _humongous_start_region(NULL),
445 _in_collection_set(false), _is_gc_alloc_region(false),
446 _is_on_free_list(false), _is_on_unclean_list(false),
447 _next_in_special_set(NULL), _orig_end(NULL),
448 _claimed(InitialClaimValue), _evacuation_failed(false),
449 _prev_marked_bytes(0), _next_marked_bytes(0), _sort_index(-1),
450 _young_type(NotYoung), _next_young_region(NULL),
451 _next_dirty_cards_region(NULL),
452 _young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1),
453 _rem_set(NULL), _zfs(NotZeroFilled),
454 _recorded_rs_length(0), _predicted_elapsed_time_ms(0),
455 _predicted_bytes_to_copy(0)
456 {
457 _orig_end = mr.end();
458 // Note that initialize() will set the start of the unmarked area of the
459 // region.
460 this->initialize(mr, !is_zeroed, SpaceDecorator::Mangle);
461 set_top(bottom());
462 set_saved_mark();
464 _rem_set = new HeapRegionRemSet(sharedOffsetArray, this);
466 assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant.");
467 // In case the region is allocated during a pause, note the top.
468 // We haven't done any counting on a brand new region.
469 _top_at_conc_mark_count = bottom();
470 }
472 class NextCompactionHeapRegionClosure: public HeapRegionClosure {
473 const HeapRegion* _target;
474 bool _target_seen;
475 HeapRegion* _last;
476 CompactibleSpace* _res;
477 public:
478 NextCompactionHeapRegionClosure(const HeapRegion* target) :
479 _target(target), _target_seen(false), _res(NULL) {}
480 bool doHeapRegion(HeapRegion* cur) {
481 if (_target_seen) {
482 if (!cur->isHumongous()) {
483 _res = cur;
484 return true;
485 }
486 } else if (cur == _target) {
487 _target_seen = true;
488 }
489 return false;
490 }
491 CompactibleSpace* result() { return _res; }
492 };
494 CompactibleSpace* HeapRegion::next_compaction_space() const {
495 G1CollectedHeap* g1h = G1CollectedHeap::heap();
496 // cast away const-ness
497 HeapRegion* r = (HeapRegion*) this;
498 NextCompactionHeapRegionClosure blk(r);
499 g1h->heap_region_iterate_from(r, &blk);
500 return blk.result();
501 }
503 void HeapRegion::set_continuesHumongous(HeapRegion* start) {
504 // The order is important here.
505 start->add_continuingHumongousRegion(this);
506 _humongous_type = ContinuesHumongous;
507 _humongous_start_region = start;
508 }
510 void HeapRegion::add_continuingHumongousRegion(HeapRegion* cont) {
511 // Must join the blocks of the current H region seq with the block of the
512 // added region.
513 offsets()->join_blocks(bottom(), cont->bottom());
514 arrayOop obj = (arrayOop)(bottom());
515 obj->set_length((int) (obj->length() + cont->capacity()/jintSize));
516 set_end(cont->end());
517 set_top(cont->end());
518 }
520 void HeapRegion::save_marks() {
521 set_saved_mark();
522 }
524 void HeapRegion::oops_in_mr_iterate(MemRegion mr, OopClosure* cl) {
525 HeapWord* p = mr.start();
526 HeapWord* e = mr.end();
527 oop obj;
528 while (p < e) {
529 obj = oop(p);
530 p += obj->oop_iterate(cl);
531 }
532 assert(p == e, "bad memregion: doesn't end on obj boundary");
533 }
535 #define HeapRegion_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
536 void HeapRegion::oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \
537 ContiguousSpace::oop_since_save_marks_iterate##nv_suffix(cl); \
538 }
539 SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(HeapRegion_OOP_SINCE_SAVE_MARKS_DEFN)
542 void HeapRegion::oop_before_save_marks_iterate(OopClosure* cl) {
543 oops_in_mr_iterate(MemRegion(bottom(), saved_mark_word()), cl);
544 }
546 #ifdef DEBUG
547 HeapWord* HeapRegion::allocate(size_t size) {
548 jint state = zero_fill_state();
549 assert(!G1CollectedHeap::heap()->allocs_are_zero_filled() ||
550 zero_fill_is_allocated(),
551 "When ZF is on, only alloc in ZF'd regions");
552 return G1OffsetTableContigSpace::allocate(size);
553 }
554 #endif
556 void HeapRegion::set_zero_fill_state_work(ZeroFillState zfs) {
557 assert(ZF_mon->owned_by_self() ||
558 Universe::heap()->is_gc_active(),
559 "Must hold the lock or be a full GC to modify.");
560 #ifdef ASSERT
561 if (top() != bottom() && zfs != Allocated) {
562 ResourceMark rm;
563 stringStream region_str;
564 print_on(®ion_str);
565 assert(top() == bottom() || zfs == Allocated,
566 err_msg("Region must be empty, or we must be setting it to allocated. "
567 "_zfs=%d, zfs=%d, region: %s", _zfs, zfs, region_str.as_string()));
568 }
569 #endif
570 _zfs = zfs;
571 }
573 void HeapRegion::set_zero_fill_complete() {
574 set_zero_fill_state_work(ZeroFilled);
575 if (ZF_mon->owned_by_self()) {
576 ZF_mon->notify_all();
577 }
578 }
581 void HeapRegion::ensure_zero_filled() {
582 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
583 ensure_zero_filled_locked();
584 }
586 void HeapRegion::ensure_zero_filled_locked() {
587 assert(ZF_mon->owned_by_self(), "Precondition");
588 bool should_ignore_zf = SafepointSynchronize::is_at_safepoint();
589 assert(should_ignore_zf || Heap_lock->is_locked(),
590 "Either we're in a GC or we're allocating a region.");
591 switch (zero_fill_state()) {
592 case HeapRegion::NotZeroFilled:
593 set_zero_fill_in_progress(Thread::current());
594 {
595 ZF_mon->unlock();
596 Copy::fill_to_words(bottom(), capacity()/HeapWordSize);
597 ZF_mon->lock_without_safepoint_check();
598 }
599 // A trap.
600 guarantee(zero_fill_state() == HeapRegion::ZeroFilling
601 && zero_filler() == Thread::current(),
602 "AHA! Tell Dave D if you see this...");
603 set_zero_fill_complete();
604 // gclog_or_tty->print_cr("Did sync ZF.");
605 ConcurrentZFThread::note_sync_zfs();
606 break;
607 case HeapRegion::ZeroFilling:
608 if (should_ignore_zf) {
609 // We can "break" the lock and take over the work.
610 Copy::fill_to_words(bottom(), capacity()/HeapWordSize);
611 set_zero_fill_complete();
612 ConcurrentZFThread::note_sync_zfs();
613 break;
614 } else {
615 ConcurrentZFThread::wait_for_ZF_completed(this);
616 }
617 case HeapRegion::ZeroFilled:
618 // Nothing to do.
619 break;
620 case HeapRegion::Allocated:
621 guarantee(false, "Should not call on allocated regions.");
622 }
623 assert(zero_fill_state() == HeapRegion::ZeroFilled, "Post");
624 }
626 HeapWord*
627 HeapRegion::object_iterate_mem_careful(MemRegion mr,
628 ObjectClosure* cl) {
629 G1CollectedHeap* g1h = G1CollectedHeap::heap();
630 // We used to use "block_start_careful" here. But we're actually happy
631 // to update the BOT while we do this...
632 HeapWord* cur = block_start(mr.start());
633 mr = mr.intersection(used_region());
634 if (mr.is_empty()) return NULL;
635 // Otherwise, find the obj that extends onto mr.start().
637 assert(cur <= mr.start()
638 && (oop(cur)->klass_or_null() == NULL ||
639 cur + oop(cur)->size() > mr.start()),
640 "postcondition of block_start");
641 oop obj;
642 while (cur < mr.end()) {
643 obj = oop(cur);
644 if (obj->klass_or_null() == NULL) {
645 // Ran into an unparseable point.
646 return cur;
647 } else if (!g1h->is_obj_dead(obj)) {
648 cl->do_object(obj);
649 }
650 if (cl->abort()) return cur;
651 // The check above must occur before the operation below, since an
652 // abort might invalidate the "size" operation.
653 cur += obj->size();
654 }
655 return NULL;
656 }
658 HeapWord*
659 HeapRegion::
660 oops_on_card_seq_iterate_careful(MemRegion mr,
661 FilterOutOfRegionClosure* cl,
662 bool filter_young) {
663 G1CollectedHeap* g1h = G1CollectedHeap::heap();
665 // If we're within a stop-world GC, then we might look at a card in a
666 // GC alloc region that extends onto a GC LAB, which may not be
667 // parseable. Stop such at the "saved_mark" of the region.
668 if (G1CollectedHeap::heap()->is_gc_active()) {
669 mr = mr.intersection(used_region_at_save_marks());
670 } else {
671 mr = mr.intersection(used_region());
672 }
673 if (mr.is_empty()) return NULL;
674 // Otherwise, find the obj that extends onto mr.start().
676 // The intersection of the incoming mr (for the card) and the
677 // allocated part of the region is non-empty. This implies that
678 // we have actually allocated into this region. The code in
679 // G1CollectedHeap.cpp that allocates a new region sets the
680 // is_young tag on the region before allocating. Thus we
681 // safely know if this region is young.
682 if (is_young() && filter_young) {
683 return NULL;
684 }
686 assert(!is_young(), "check value of filter_young");
688 // We used to use "block_start_careful" here. But we're actually happy
689 // to update the BOT while we do this...
690 HeapWord* cur = block_start(mr.start());
691 assert(cur <= mr.start(), "Postcondition");
693 while (cur <= mr.start()) {
694 if (oop(cur)->klass_or_null() == NULL) {
695 // Ran into an unparseable point.
696 return cur;
697 }
698 // Otherwise...
699 int sz = oop(cur)->size();
700 if (cur + sz > mr.start()) break;
701 // Otherwise, go on.
702 cur = cur + sz;
703 }
704 oop obj;
705 obj = oop(cur);
706 // If we finish this loop...
707 assert(cur <= mr.start()
708 && obj->klass_or_null() != NULL
709 && cur + obj->size() > mr.start(),
710 "Loop postcondition");
711 if (!g1h->is_obj_dead(obj)) {
712 obj->oop_iterate(cl, mr);
713 }
715 HeapWord* next;
716 while (cur < mr.end()) {
717 obj = oop(cur);
718 if (obj->klass_or_null() == NULL) {
719 // Ran into an unparseable point.
720 return cur;
721 };
722 // Otherwise:
723 next = (cur + obj->size());
724 if (!g1h->is_obj_dead(obj)) {
725 if (next < mr.end()) {
726 obj->oop_iterate(cl);
727 } else {
728 // this obj spans the boundary. If it's an array, stop at the
729 // boundary.
730 if (obj->is_objArray()) {
731 obj->oop_iterate(cl, mr);
732 } else {
733 obj->oop_iterate(cl);
734 }
735 }
736 }
737 cur = next;
738 }
739 return NULL;
740 }
742 void HeapRegion::print() const { print_on(gclog_or_tty); }
743 void HeapRegion::print_on(outputStream* st) const {
744 if (isHumongous()) {
745 if (startsHumongous())
746 st->print(" HS");
747 else
748 st->print(" HC");
749 } else {
750 st->print(" ");
751 }
752 if (in_collection_set())
753 st->print(" CS");
754 else if (is_gc_alloc_region())
755 st->print(" A ");
756 else
757 st->print(" ");
758 if (is_young())
759 st->print(is_survivor() ? " SU" : " Y ");
760 else
761 st->print(" ");
762 if (is_empty())
763 st->print(" F");
764 else
765 st->print(" ");
766 st->print(" %5d", _gc_time_stamp);
767 st->print(" PTAMS "PTR_FORMAT" NTAMS "PTR_FORMAT,
768 prev_top_at_mark_start(), next_top_at_mark_start());
769 G1OffsetTableContigSpace::print_on(st);
770 }
772 void HeapRegion::verify(bool allow_dirty) const {
773 bool dummy = false;
774 verify(allow_dirty, /* use_prev_marking */ true, /* failures */ &dummy);
775 }
777 #define OBJ_SAMPLE_INTERVAL 0
778 #define BLOCK_SAMPLE_INTERVAL 100
780 // This really ought to be commoned up into OffsetTableContigSpace somehow.
781 // We would need a mechanism to make that code skip dead objects.
783 void HeapRegion::verify(bool allow_dirty,
784 bool use_prev_marking,
785 bool* failures) const {
786 G1CollectedHeap* g1 = G1CollectedHeap::heap();
787 *failures = false;
788 HeapWord* p = bottom();
789 HeapWord* prev_p = NULL;
790 int objs = 0;
791 int blocks = 0;
792 VerifyLiveClosure vl_cl(g1, use_prev_marking);
793 while (p < top()) {
794 size_t size = oop(p)->size();
795 if (blocks == BLOCK_SAMPLE_INTERVAL) {
796 HeapWord* res = block_start_const(p + (size/2));
797 if (p != res) {
798 gclog_or_tty->print_cr("offset computation 1 for "PTR_FORMAT" and "
799 SIZE_FORMAT" returned "PTR_FORMAT,
800 p, size, res);
801 *failures = true;
802 return;
803 }
804 blocks = 0;
805 } else {
806 blocks++;
807 }
808 if (objs == OBJ_SAMPLE_INTERVAL) {
809 oop obj = oop(p);
810 if (!g1->is_obj_dead_cond(obj, this, use_prev_marking)) {
811 if (obj->is_oop()) {
812 klassOop klass = obj->klass();
813 if (!klass->is_perm()) {
814 gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" "
815 "not in perm", klass, obj);
816 *failures = true;
817 return;
818 } else if (!klass->is_klass()) {
819 gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" "
820 "not a klass", klass, obj);
821 *failures = true;
822 return;
823 } else {
824 vl_cl.set_containing_obj(obj);
825 obj->oop_iterate(&vl_cl);
826 if (vl_cl.failures()) {
827 *failures = true;
828 }
829 if (G1MaxVerifyFailures >= 0 &&
830 vl_cl.n_failures() >= G1MaxVerifyFailures) {
831 return;
832 }
833 }
834 } else {
835 gclog_or_tty->print_cr(PTR_FORMAT" no an oop", obj);
836 *failures = true;
837 return;
838 }
839 }
840 objs = 0;
841 } else {
842 objs++;
843 }
844 prev_p = p;
845 p += size;
846 }
847 HeapWord* rend = end();
848 HeapWord* rtop = top();
849 if (rtop < rend) {
850 HeapWord* res = block_start_const(rtop + (rend - rtop) / 2);
851 if (res != rtop) {
852 gclog_or_tty->print_cr("offset computation 2 for "PTR_FORMAT" and "
853 PTR_FORMAT" returned "PTR_FORMAT,
854 rtop, rend, res);
855 *failures = true;
856 return;
857 }
858 }
860 if (p != top()) {
861 gclog_or_tty->print_cr("end of last object "PTR_FORMAT" "
862 "does not match top "PTR_FORMAT, p, top());
863 *failures = true;
864 return;
865 }
866 }
868 // G1OffsetTableContigSpace code; copied from space.cpp. Hope this can go
869 // away eventually.
871 void G1OffsetTableContigSpace::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
872 // false ==> we'll do the clearing if there's clearing to be done.
873 ContiguousSpace::initialize(mr, false, mangle_space);
874 _offsets.zero_bottom_entry();
875 _offsets.initialize_threshold();
876 if (clear_space) clear(mangle_space);
877 }
879 void G1OffsetTableContigSpace::clear(bool mangle_space) {
880 ContiguousSpace::clear(mangle_space);
881 _offsets.zero_bottom_entry();
882 _offsets.initialize_threshold();
883 }
885 void G1OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) {
886 Space::set_bottom(new_bottom);
887 _offsets.set_bottom(new_bottom);
888 }
890 void G1OffsetTableContigSpace::set_end(HeapWord* new_end) {
891 Space::set_end(new_end);
892 _offsets.resize(new_end - bottom());
893 }
895 void G1OffsetTableContigSpace::print() const {
896 print_short();
897 gclog_or_tty->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", "
898 INTPTR_FORMAT ", " INTPTR_FORMAT ")",
899 bottom(), top(), _offsets.threshold(), end());
900 }
902 HeapWord* G1OffsetTableContigSpace::initialize_threshold() {
903 return _offsets.initialize_threshold();
904 }
906 HeapWord* G1OffsetTableContigSpace::cross_threshold(HeapWord* start,
907 HeapWord* end) {
908 _offsets.alloc_block(start, end);
909 return _offsets.threshold();
910 }
912 HeapWord* G1OffsetTableContigSpace::saved_mark_word() const {
913 G1CollectedHeap* g1h = G1CollectedHeap::heap();
914 assert( _gc_time_stamp <= g1h->get_gc_time_stamp(), "invariant" );
915 if (_gc_time_stamp < g1h->get_gc_time_stamp())
916 return top();
917 else
918 return ContiguousSpace::saved_mark_word();
919 }
921 void G1OffsetTableContigSpace::set_saved_mark() {
922 G1CollectedHeap* g1h = G1CollectedHeap::heap();
923 unsigned curr_gc_time_stamp = g1h->get_gc_time_stamp();
925 if (_gc_time_stamp < curr_gc_time_stamp) {
926 // The order of these is important, as another thread might be
927 // about to start scanning this region. If it does so after
928 // set_saved_mark and before _gc_time_stamp = ..., then the latter
929 // will be false, and it will pick up top() as the high water mark
930 // of region. If it does so after _gc_time_stamp = ..., then it
931 // will pick up the right saved_mark_word() as the high water mark
932 // of the region. Either way, the behaviour will be correct.
933 ContiguousSpace::set_saved_mark();
934 OrderAccess::storestore();
935 _gc_time_stamp = curr_gc_time_stamp;
936 // The following fence is to force a flush of the writes above, but
937 // is strictly not needed because when an allocating worker thread
938 // calls set_saved_mark() it does so under the ParGCRareEvent_lock;
939 // when the lock is released, the write will be flushed.
940 // OrderAccess::fence();
941 }
942 }
944 G1OffsetTableContigSpace::
945 G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
946 MemRegion mr, bool is_zeroed) :
947 _offsets(sharedOffsetArray, mr),
948 _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true),
949 _gc_time_stamp(0)
950 {
951 _offsets.set_space(this);
952 initialize(mr, !is_zeroed, SpaceDecorator::Mangle);
953 }
955 size_t RegionList::length() {
956 size_t len = 0;
957 HeapRegion* cur = hd();
958 DEBUG_ONLY(HeapRegion* last = NULL);
959 while (cur != NULL) {
960 len++;
961 DEBUG_ONLY(last = cur);
962 cur = get_next(cur);
963 }
964 assert(last == tl(), "Invariant");
965 return len;
966 }
968 void RegionList::insert_before_head(HeapRegion* r) {
969 assert(well_formed(), "Inv");
970 set_next(r, hd());
971 _hd = r;
972 _sz++;
973 if (tl() == NULL) _tl = r;
974 assert(well_formed(), "Inv");
975 }
977 void RegionList::prepend_list(RegionList* new_list) {
978 assert(well_formed(), "Precondition");
979 assert(new_list->well_formed(), "Precondition");
980 HeapRegion* new_tl = new_list->tl();
981 if (new_tl != NULL) {
982 set_next(new_tl, hd());
983 _hd = new_list->hd();
984 _sz += new_list->sz();
985 if (tl() == NULL) _tl = new_list->tl();
986 } else {
987 assert(new_list->hd() == NULL && new_list->sz() == 0, "Inv");
988 }
989 assert(well_formed(), "Inv");
990 }
992 void RegionList::delete_after(HeapRegion* r) {
993 assert(well_formed(), "Precondition");
994 HeapRegion* next = get_next(r);
995 assert(r != NULL, "Precondition");
996 HeapRegion* next_tl = get_next(next);
997 set_next(r, next_tl);
998 dec_sz();
999 if (next == tl()) {
1000 assert(next_tl == NULL, "Inv");
1001 _tl = r;
1002 }
1003 assert(well_formed(), "Inv");
1004 }
1006 HeapRegion* RegionList::pop() {
1007 assert(well_formed(), "Inv");
1008 HeapRegion* res = hd();
1009 if (res != NULL) {
1010 _hd = get_next(res);
1011 _sz--;
1012 set_next(res, NULL);
1013 if (sz() == 0) _tl = NULL;
1014 }
1015 assert(well_formed(), "Inv");
1016 return res;
1017 }