Tue, 05 Jun 2012 22:30:24 +0200
7172388: G1: _total_full_collections should not be incremented for concurrent cycles
Reviewed-by: azeemj, jmasa
1 /*
2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
27 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
28 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
29 #include "gc_implementation/g1/heapRegion.inline.hpp"
30 #include "gc_implementation/g1/heapRegionRemSet.hpp"
31 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
32 #include "memory/genOopClosures.inline.hpp"
33 #include "memory/iterator.hpp"
34 #include "oops/oop.inline.hpp"
36 int HeapRegion::LogOfHRGrainBytes = 0;
37 int HeapRegion::LogOfHRGrainWords = 0;
38 size_t HeapRegion::GrainBytes = 0;
39 size_t HeapRegion::GrainWords = 0;
40 size_t HeapRegion::CardsPerRegion = 0;
42 HeapRegionDCTOC::HeapRegionDCTOC(G1CollectedHeap* g1,
43 HeapRegion* hr, OopClosure* cl,
44 CardTableModRefBS::PrecisionStyle precision,
45 FilterKind fk) :
46 ContiguousSpaceDCTOC(hr, cl, precision, NULL),
47 _hr(hr), _fk(fk), _g1(g1)
48 { }
50 FilterOutOfRegionClosure::FilterOutOfRegionClosure(HeapRegion* r,
51 OopClosure* oc) :
52 _r_bottom(r->bottom()), _r_end(r->end()),
53 _oc(oc), _out_of_region(0)
54 {}
56 class VerifyLiveClosure: public OopClosure {
57 private:
58 G1CollectedHeap* _g1h;
59 CardTableModRefBS* _bs;
60 oop _containing_obj;
61 bool _failures;
62 int _n_failures;
63 VerifyOption _vo;
64 public:
65 // _vo == UsePrevMarking -> use "prev" marking information,
66 // _vo == UseNextMarking -> use "next" marking information,
67 // _vo == UseMarkWord -> use mark word from object header.
68 VerifyLiveClosure(G1CollectedHeap* g1h, VerifyOption vo) :
69 _g1h(g1h), _bs(NULL), _containing_obj(NULL),
70 _failures(false), _n_failures(0), _vo(vo)
71 {
72 BarrierSet* bs = _g1h->barrier_set();
73 if (bs->is_a(BarrierSet::CardTableModRef))
74 _bs = (CardTableModRefBS*)bs;
75 }
77 void set_containing_obj(oop obj) {
78 _containing_obj = obj;
79 }
81 bool failures() { return _failures; }
82 int n_failures() { return _n_failures; }
84 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
85 virtual void do_oop( oop* p) { do_oop_work(p); }
87 void print_object(outputStream* out, oop obj) {
88 #ifdef PRODUCT
89 klassOop k = obj->klass();
90 const char* class_name = instanceKlass::cast(k)->external_name();
91 out->print_cr("class name %s", class_name);
92 #else // PRODUCT
93 obj->print_on(out);
94 #endif // PRODUCT
95 }
97 template <class T>
98 void do_oop_work(T* p) {
99 assert(_containing_obj != NULL, "Precondition");
100 assert(!_g1h->is_obj_dead_cond(_containing_obj, _vo),
101 "Precondition");
102 T heap_oop = oopDesc::load_heap_oop(p);
103 if (!oopDesc::is_null(heap_oop)) {
104 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
105 bool failed = false;
106 if (!_g1h->is_in_closed_subset(obj) || _g1h->is_obj_dead_cond(obj, _vo)) {
107 MutexLockerEx x(ParGCRareEvent_lock,
108 Mutex::_no_safepoint_check_flag);
110 if (!_failures) {
111 gclog_or_tty->print_cr("");
112 gclog_or_tty->print_cr("----------");
113 }
114 if (!_g1h->is_in_closed_subset(obj)) {
115 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
116 gclog_or_tty->print_cr("Field "PTR_FORMAT
117 " of live obj "PTR_FORMAT" in region "
118 "["PTR_FORMAT", "PTR_FORMAT")",
119 p, (void*) _containing_obj,
120 from->bottom(), from->end());
121 print_object(gclog_or_tty, _containing_obj);
122 gclog_or_tty->print_cr("points to obj "PTR_FORMAT" not in the heap",
123 (void*) obj);
124 } else {
125 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
126 HeapRegion* to = _g1h->heap_region_containing((HeapWord*)obj);
127 gclog_or_tty->print_cr("Field "PTR_FORMAT
128 " of live obj "PTR_FORMAT" in region "
129 "["PTR_FORMAT", "PTR_FORMAT")",
130 p, (void*) _containing_obj,
131 from->bottom(), from->end());
132 print_object(gclog_or_tty, _containing_obj);
133 gclog_or_tty->print_cr("points to dead obj "PTR_FORMAT" in region "
134 "["PTR_FORMAT", "PTR_FORMAT")",
135 (void*) obj, to->bottom(), to->end());
136 print_object(gclog_or_tty, obj);
137 }
138 gclog_or_tty->print_cr("----------");
139 gclog_or_tty->flush();
140 _failures = true;
141 failed = true;
142 _n_failures++;
143 }
145 if (!_g1h->full_collection()) {
146 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
147 HeapRegion* to = _g1h->heap_region_containing(obj);
148 if (from != NULL && to != NULL &&
149 from != to &&
150 !to->isHumongous()) {
151 jbyte cv_obj = *_bs->byte_for_const(_containing_obj);
152 jbyte cv_field = *_bs->byte_for_const(p);
153 const jbyte dirty = CardTableModRefBS::dirty_card_val();
155 bool is_bad = !(from->is_young()
156 || to->rem_set()->contains_reference(p)
157 || !G1HRRSFlushLogBuffersOnVerify && // buffers were not flushed
158 (_containing_obj->is_objArray() ?
159 cv_field == dirty
160 : cv_obj == dirty || cv_field == dirty));
161 if (is_bad) {
162 MutexLockerEx x(ParGCRareEvent_lock,
163 Mutex::_no_safepoint_check_flag);
165 if (!_failures) {
166 gclog_or_tty->print_cr("");
167 gclog_or_tty->print_cr("----------");
168 }
169 gclog_or_tty->print_cr("Missing rem set entry:");
170 gclog_or_tty->print_cr("Field "PTR_FORMAT" "
171 "of obj "PTR_FORMAT", "
172 "in region "HR_FORMAT,
173 p, (void*) _containing_obj,
174 HR_FORMAT_PARAMS(from));
175 _containing_obj->print_on(gclog_or_tty);
176 gclog_or_tty->print_cr("points to obj "PTR_FORMAT" "
177 "in region "HR_FORMAT,
178 (void*) obj,
179 HR_FORMAT_PARAMS(to));
180 obj->print_on(gclog_or_tty);
181 gclog_or_tty->print_cr("Obj head CTE = %d, field CTE = %d.",
182 cv_obj, cv_field);
183 gclog_or_tty->print_cr("----------");
184 gclog_or_tty->flush();
185 _failures = true;
186 if (!failed) _n_failures++;
187 }
188 }
189 }
190 }
191 }
192 };
194 template<class ClosureType>
195 HeapWord* walk_mem_region_loop(ClosureType* cl, G1CollectedHeap* g1h,
196 HeapRegion* hr,
197 HeapWord* cur, HeapWord* top) {
198 oop cur_oop = oop(cur);
199 int oop_size = cur_oop->size();
200 HeapWord* next_obj = cur + oop_size;
201 while (next_obj < top) {
202 // Keep filtering the remembered set.
203 if (!g1h->is_obj_dead(cur_oop, hr)) {
204 // Bottom lies entirely below top, so we can call the
205 // non-memRegion version of oop_iterate below.
206 cur_oop->oop_iterate(cl);
207 }
208 cur = next_obj;
209 cur_oop = oop(cur);
210 oop_size = cur_oop->size();
211 next_obj = cur + oop_size;
212 }
213 return cur;
214 }
216 void HeapRegionDCTOC::walk_mem_region_with_cl(MemRegion mr,
217 HeapWord* bottom,
218 HeapWord* top,
219 OopClosure* cl) {
220 G1CollectedHeap* g1h = _g1;
221 int oop_size;
222 OopClosure* cl2 = NULL;
224 FilterIntoCSClosure intoCSFilt(this, g1h, cl);
225 FilterOutOfRegionClosure outOfRegionFilt(_hr, cl);
227 switch (_fk) {
228 case NoFilterKind: cl2 = cl; break;
229 case IntoCSFilterKind: cl2 = &intoCSFilt; break;
230 case OutOfRegionFilterKind: cl2 = &outOfRegionFilt; break;
231 default: ShouldNotReachHere();
232 }
234 // Start filtering what we add to the remembered set. If the object is
235 // not considered dead, either because it is marked (in the mark bitmap)
236 // or it was allocated after marking finished, then we add it. Otherwise
237 // we can safely ignore the object.
238 if (!g1h->is_obj_dead(oop(bottom), _hr)) {
239 oop_size = oop(bottom)->oop_iterate(cl2, mr);
240 } else {
241 oop_size = oop(bottom)->size();
242 }
244 bottom += oop_size;
246 if (bottom < top) {
247 // We replicate the loop below for several kinds of possible filters.
248 switch (_fk) {
249 case NoFilterKind:
250 bottom = walk_mem_region_loop(cl, g1h, _hr, bottom, top);
251 break;
253 case IntoCSFilterKind: {
254 FilterIntoCSClosure filt(this, g1h, cl);
255 bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top);
256 break;
257 }
259 case OutOfRegionFilterKind: {
260 FilterOutOfRegionClosure filt(_hr, cl);
261 bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top);
262 break;
263 }
265 default:
266 ShouldNotReachHere();
267 }
269 // Last object. Need to do dead-obj filtering here too.
270 if (!g1h->is_obj_dead(oop(bottom), _hr)) {
271 oop(bottom)->oop_iterate(cl2, mr);
272 }
273 }
274 }
276 // Minimum region size; we won't go lower than that.
277 // We might want to decrease this in the future, to deal with small
278 // heaps a bit more efficiently.
279 #define MIN_REGION_SIZE ( 1024 * 1024 )
281 // Maximum region size; we don't go higher than that. There's a good
282 // reason for having an upper bound. We don't want regions to get too
283 // large, otherwise cleanup's effectiveness would decrease as there
284 // will be fewer opportunities to find totally empty regions after
285 // marking.
286 #define MAX_REGION_SIZE ( 32 * 1024 * 1024 )
288 // The automatic region size calculation will try to have around this
289 // many regions in the heap (based on the min heap size).
290 #define TARGET_REGION_NUMBER 2048
292 void HeapRegion::setup_heap_region_size(uintx min_heap_size) {
293 // region_size in bytes
294 uintx region_size = G1HeapRegionSize;
295 if (FLAG_IS_DEFAULT(G1HeapRegionSize)) {
296 // We base the automatic calculation on the min heap size. This
297 // can be problematic if the spread between min and max is quite
298 // wide, imagine -Xms128m -Xmx32g. But, if we decided it based on
299 // the max size, the region size might be way too large for the
300 // min size. Either way, some users might have to set the region
301 // size manually for some -Xms / -Xmx combos.
303 region_size = MAX2(min_heap_size / TARGET_REGION_NUMBER,
304 (uintx) MIN_REGION_SIZE);
305 }
307 int region_size_log = log2_long((jlong) region_size);
308 // Recalculate the region size to make sure it's a power of
309 // 2. This means that region_size is the largest power of 2 that's
310 // <= what we've calculated so far.
311 region_size = ((uintx)1 << region_size_log);
313 // Now make sure that we don't go over or under our limits.
314 if (region_size < MIN_REGION_SIZE) {
315 region_size = MIN_REGION_SIZE;
316 } else if (region_size > MAX_REGION_SIZE) {
317 region_size = MAX_REGION_SIZE;
318 }
320 // And recalculate the log.
321 region_size_log = log2_long((jlong) region_size);
323 // Now, set up the globals.
324 guarantee(LogOfHRGrainBytes == 0, "we should only set it once");
325 LogOfHRGrainBytes = region_size_log;
327 guarantee(LogOfHRGrainWords == 0, "we should only set it once");
328 LogOfHRGrainWords = LogOfHRGrainBytes - LogHeapWordSize;
330 guarantee(GrainBytes == 0, "we should only set it once");
331 // The cast to int is safe, given that we've bounded region_size by
332 // MIN_REGION_SIZE and MAX_REGION_SIZE.
333 GrainBytes = (size_t)region_size;
335 guarantee(GrainWords == 0, "we should only set it once");
336 GrainWords = GrainBytes >> LogHeapWordSize;
337 guarantee((size_t) 1 << LogOfHRGrainWords == GrainWords, "sanity");
339 guarantee(CardsPerRegion == 0, "we should only set it once");
340 CardsPerRegion = GrainBytes >> CardTableModRefBS::card_shift;
341 }
343 void HeapRegion::reset_after_compaction() {
344 G1OffsetTableContigSpace::reset_after_compaction();
345 // After a compaction the mark bitmap is invalid, so we must
346 // treat all objects as being inside the unmarked area.
347 zero_marked_bytes();
348 init_top_at_mark_start();
349 }
351 void HeapRegion::hr_clear(bool par, bool clear_space) {
352 assert(_humongous_type == NotHumongous,
353 "we should have already filtered out humongous regions");
354 assert(_humongous_start_region == NULL,
355 "we should have already filtered out humongous regions");
356 assert(_end == _orig_end,
357 "we should have already filtered out humongous regions");
359 _in_collection_set = false;
361 set_young_index_in_cset(-1);
362 uninstall_surv_rate_group();
363 set_young_type(NotYoung);
364 reset_pre_dummy_top();
366 if (!par) {
367 // If this is parallel, this will be done later.
368 HeapRegionRemSet* hrrs = rem_set();
369 if (hrrs != NULL) hrrs->clear();
370 _claimed = InitialClaimValue;
371 }
372 zero_marked_bytes();
374 _offsets.resize(HeapRegion::GrainWords);
375 init_top_at_mark_start();
376 if (clear_space) clear(SpaceDecorator::Mangle);
377 }
379 void HeapRegion::par_clear() {
380 assert(used() == 0, "the region should have been already cleared");
381 assert(capacity() == HeapRegion::GrainBytes, "should be back to normal");
382 HeapRegionRemSet* hrrs = rem_set();
383 hrrs->clear();
384 CardTableModRefBS* ct_bs =
385 (CardTableModRefBS*)G1CollectedHeap::heap()->barrier_set();
386 ct_bs->clear(MemRegion(bottom(), end()));
387 }
389 void HeapRegion::calc_gc_efficiency() {
390 G1CollectedHeap* g1h = G1CollectedHeap::heap();
391 G1CollectorPolicy* g1p = g1h->g1_policy();
392 _gc_efficiency = (double) reclaimable_bytes() /
393 g1p->predict_region_elapsed_time_ms(this, false);
394 }
396 void HeapRegion::set_startsHumongous(HeapWord* new_top, HeapWord* new_end) {
397 assert(!isHumongous(), "sanity / pre-condition");
398 assert(end() == _orig_end,
399 "Should be normal before the humongous object allocation");
400 assert(top() == bottom(), "should be empty");
401 assert(bottom() <= new_top && new_top <= new_end, "pre-condition");
403 _humongous_type = StartsHumongous;
404 _humongous_start_region = this;
406 set_end(new_end);
407 _offsets.set_for_starts_humongous(new_top);
408 }
410 void HeapRegion::set_continuesHumongous(HeapRegion* first_hr) {
411 assert(!isHumongous(), "sanity / pre-condition");
412 assert(end() == _orig_end,
413 "Should be normal before the humongous object allocation");
414 assert(top() == bottom(), "should be empty");
415 assert(first_hr->startsHumongous(), "pre-condition");
417 _humongous_type = ContinuesHumongous;
418 _humongous_start_region = first_hr;
419 }
421 void HeapRegion::set_notHumongous() {
422 assert(isHumongous(), "pre-condition");
424 if (startsHumongous()) {
425 assert(top() <= end(), "pre-condition");
426 set_end(_orig_end);
427 if (top() > end()) {
428 // at least one "continues humongous" region after it
429 set_top(end());
430 }
431 } else {
432 // continues humongous
433 assert(end() == _orig_end, "sanity");
434 }
436 assert(capacity() == HeapRegion::GrainBytes, "pre-condition");
437 _humongous_type = NotHumongous;
438 _humongous_start_region = NULL;
439 }
441 bool HeapRegion::claimHeapRegion(jint claimValue) {
442 jint current = _claimed;
443 if (current != claimValue) {
444 jint res = Atomic::cmpxchg(claimValue, &_claimed, current);
445 if (res == current) {
446 return true;
447 }
448 }
449 return false;
450 }
452 HeapWord* HeapRegion::next_block_start_careful(HeapWord* addr) {
453 HeapWord* low = addr;
454 HeapWord* high = end();
455 while (low < high) {
456 size_t diff = pointer_delta(high, low);
457 // Must add one below to bias toward the high amount. Otherwise, if
458 // "high" were at the desired value, and "low" were one less, we
459 // would not converge on "high". This is not symmetric, because
460 // we set "high" to a block start, which might be the right one,
461 // which we don't do for "low".
462 HeapWord* middle = low + (diff+1)/2;
463 if (middle == high) return high;
464 HeapWord* mid_bs = block_start_careful(middle);
465 if (mid_bs < addr) {
466 low = middle;
467 } else {
468 high = mid_bs;
469 }
470 }
471 assert(low == high && low >= addr, "Didn't work.");
472 return low;
473 }
475 void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
476 G1OffsetTableContigSpace::initialize(mr, false, mangle_space);
477 hr_clear(false/*par*/, clear_space);
478 }
479 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
480 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
481 #endif // _MSC_VER
484 HeapRegion::HeapRegion(uint hrs_index,
485 G1BlockOffsetSharedArray* sharedOffsetArray,
486 MemRegion mr, bool is_zeroed) :
487 G1OffsetTableContigSpace(sharedOffsetArray, mr, is_zeroed),
488 _hrs_index(hrs_index),
489 _humongous_type(NotHumongous), _humongous_start_region(NULL),
490 _in_collection_set(false),
491 _next_in_special_set(NULL), _orig_end(NULL),
492 _claimed(InitialClaimValue), _evacuation_failed(false),
493 _prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0),
494 _young_type(NotYoung), _next_young_region(NULL),
495 _next_dirty_cards_region(NULL), _next(NULL), _pending_removal(false),
496 #ifdef ASSERT
497 _containing_set(NULL),
498 #endif // ASSERT
499 _young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1),
500 _rem_set(NULL), _recorded_rs_length(0), _predicted_elapsed_time_ms(0),
501 _predicted_bytes_to_copy(0)
502 {
503 _orig_end = mr.end();
504 // Note that initialize() will set the start of the unmarked area of the
505 // region.
506 this->initialize(mr, !is_zeroed, SpaceDecorator::Mangle);
507 set_top(bottom());
508 set_saved_mark();
510 _rem_set = new HeapRegionRemSet(sharedOffsetArray, this);
512 assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant.");
513 }
515 class NextCompactionHeapRegionClosure: public HeapRegionClosure {
516 const HeapRegion* _target;
517 bool _target_seen;
518 HeapRegion* _last;
519 CompactibleSpace* _res;
520 public:
521 NextCompactionHeapRegionClosure(const HeapRegion* target) :
522 _target(target), _target_seen(false), _res(NULL) {}
523 bool doHeapRegion(HeapRegion* cur) {
524 if (_target_seen) {
525 if (!cur->isHumongous()) {
526 _res = cur;
527 return true;
528 }
529 } else if (cur == _target) {
530 _target_seen = true;
531 }
532 return false;
533 }
534 CompactibleSpace* result() { return _res; }
535 };
537 CompactibleSpace* HeapRegion::next_compaction_space() const {
538 G1CollectedHeap* g1h = G1CollectedHeap::heap();
539 // cast away const-ness
540 HeapRegion* r = (HeapRegion*) this;
541 NextCompactionHeapRegionClosure blk(r);
542 g1h->heap_region_iterate_from(r, &blk);
543 return blk.result();
544 }
546 void HeapRegion::save_marks() {
547 set_saved_mark();
548 }
550 void HeapRegion::oops_in_mr_iterate(MemRegion mr, OopClosure* cl) {
551 HeapWord* p = mr.start();
552 HeapWord* e = mr.end();
553 oop obj;
554 while (p < e) {
555 obj = oop(p);
556 p += obj->oop_iterate(cl);
557 }
558 assert(p == e, "bad memregion: doesn't end on obj boundary");
559 }
561 #define HeapRegion_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
562 void HeapRegion::oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \
563 ContiguousSpace::oop_since_save_marks_iterate##nv_suffix(cl); \
564 }
565 SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(HeapRegion_OOP_SINCE_SAVE_MARKS_DEFN)
568 void HeapRegion::oop_before_save_marks_iterate(OopClosure* cl) {
569 oops_in_mr_iterate(MemRegion(bottom(), saved_mark_word()), cl);
570 }
572 void HeapRegion::note_self_forwarding_removal_start(bool during_initial_mark,
573 bool during_conc_mark) {
574 // We always recreate the prev marking info and we'll explicitly
575 // mark all objects we find to be self-forwarded on the prev
576 // bitmap. So all objects need to be below PTAMS.
577 _prev_top_at_mark_start = top();
578 _prev_marked_bytes = 0;
580 if (during_initial_mark) {
581 // During initial-mark, we'll also explicitly mark all objects
582 // we find to be self-forwarded on the next bitmap. So all
583 // objects need to be below NTAMS.
584 _next_top_at_mark_start = top();
585 _next_marked_bytes = 0;
586 } else if (during_conc_mark) {
587 // During concurrent mark, all objects in the CSet (including
588 // the ones we find to be self-forwarded) are implicitly live.
589 // So all objects need to be above NTAMS.
590 _next_top_at_mark_start = bottom();
591 _next_marked_bytes = 0;
592 }
593 }
595 void HeapRegion::note_self_forwarding_removal_end(bool during_initial_mark,
596 bool during_conc_mark,
597 size_t marked_bytes) {
598 assert(0 <= marked_bytes && marked_bytes <= used(),
599 err_msg("marked: "SIZE_FORMAT" used: "SIZE_FORMAT,
600 marked_bytes, used()));
601 _prev_marked_bytes = marked_bytes;
602 }
604 HeapWord*
605 HeapRegion::object_iterate_mem_careful(MemRegion mr,
606 ObjectClosure* cl) {
607 G1CollectedHeap* g1h = G1CollectedHeap::heap();
608 // We used to use "block_start_careful" here. But we're actually happy
609 // to update the BOT while we do this...
610 HeapWord* cur = block_start(mr.start());
611 mr = mr.intersection(used_region());
612 if (mr.is_empty()) return NULL;
613 // Otherwise, find the obj that extends onto mr.start().
615 assert(cur <= mr.start()
616 && (oop(cur)->klass_or_null() == NULL ||
617 cur + oop(cur)->size() > mr.start()),
618 "postcondition of block_start");
619 oop obj;
620 while (cur < mr.end()) {
621 obj = oop(cur);
622 if (obj->klass_or_null() == NULL) {
623 // Ran into an unparseable point.
624 return cur;
625 } else if (!g1h->is_obj_dead(obj)) {
626 cl->do_object(obj);
627 }
628 if (cl->abort()) return cur;
629 // The check above must occur before the operation below, since an
630 // abort might invalidate the "size" operation.
631 cur += obj->size();
632 }
633 return NULL;
634 }
636 HeapWord*
637 HeapRegion::
638 oops_on_card_seq_iterate_careful(MemRegion mr,
639 FilterOutOfRegionClosure* cl,
640 bool filter_young,
641 jbyte* card_ptr) {
642 // Currently, we should only have to clean the card if filter_young
643 // is true and vice versa.
644 if (filter_young) {
645 assert(card_ptr != NULL, "pre-condition");
646 } else {
647 assert(card_ptr == NULL, "pre-condition");
648 }
649 G1CollectedHeap* g1h = G1CollectedHeap::heap();
651 // If we're within a stop-world GC, then we might look at a card in a
652 // GC alloc region that extends onto a GC LAB, which may not be
653 // parseable. Stop such at the "saved_mark" of the region.
654 if (g1h->is_gc_active()) {
655 mr = mr.intersection(used_region_at_save_marks());
656 } else {
657 mr = mr.intersection(used_region());
658 }
659 if (mr.is_empty()) return NULL;
660 // Otherwise, find the obj that extends onto mr.start().
662 // The intersection of the incoming mr (for the card) and the
663 // allocated part of the region is non-empty. This implies that
664 // we have actually allocated into this region. The code in
665 // G1CollectedHeap.cpp that allocates a new region sets the
666 // is_young tag on the region before allocating. Thus we
667 // safely know if this region is young.
668 if (is_young() && filter_young) {
669 return NULL;
670 }
672 assert(!is_young(), "check value of filter_young");
674 // We can only clean the card here, after we make the decision that
675 // the card is not young. And we only clean the card if we have been
676 // asked to (i.e., card_ptr != NULL).
677 if (card_ptr != NULL) {
678 *card_ptr = CardTableModRefBS::clean_card_val();
679 // We must complete this write before we do any of the reads below.
680 OrderAccess::storeload();
681 }
683 // Cache the boundaries of the memory region in some const locals
684 HeapWord* const start = mr.start();
685 HeapWord* const end = mr.end();
687 // We used to use "block_start_careful" here. But we're actually happy
688 // to update the BOT while we do this...
689 HeapWord* cur = block_start(start);
690 assert(cur <= start, "Postcondition");
692 oop obj;
694 HeapWord* next = cur;
695 while (next <= start) {
696 cur = next;
697 obj = oop(cur);
698 if (obj->klass_or_null() == NULL) {
699 // Ran into an unparseable point.
700 return cur;
701 }
702 // Otherwise...
703 next = (cur + obj->size());
704 }
706 // If we finish the above loop...We have a parseable object that
707 // begins on or before the start of the memory region, and ends
708 // inside or spans the entire region.
710 assert(obj == oop(cur), "sanity");
711 assert(cur <= start &&
712 obj->klass_or_null() != NULL &&
713 (cur + obj->size()) > start,
714 "Loop postcondition");
716 if (!g1h->is_obj_dead(obj)) {
717 obj->oop_iterate(cl, mr);
718 }
720 while (cur < end) {
721 obj = oop(cur);
722 if (obj->klass_or_null() == NULL) {
723 // Ran into an unparseable point.
724 return cur;
725 };
727 // Otherwise:
728 next = (cur + obj->size());
730 if (!g1h->is_obj_dead(obj)) {
731 if (next < end || !obj->is_objArray()) {
732 // This object either does not span the MemRegion
733 // boundary, or if it does it's not an array.
734 // Apply closure to whole object.
735 obj->oop_iterate(cl);
736 } else {
737 // This obj is an array that spans the boundary.
738 // Stop at the boundary.
739 obj->oop_iterate(cl, mr);
740 }
741 }
742 cur = next;
743 }
744 return NULL;
745 }
747 void HeapRegion::print() const { print_on(gclog_or_tty); }
748 void HeapRegion::print_on(outputStream* st) const {
749 if (isHumongous()) {
750 if (startsHumongous())
751 st->print(" HS");
752 else
753 st->print(" HC");
754 } else {
755 st->print(" ");
756 }
757 if (in_collection_set())
758 st->print(" CS");
759 else
760 st->print(" ");
761 if (is_young())
762 st->print(is_survivor() ? " SU" : " Y ");
763 else
764 st->print(" ");
765 if (is_empty())
766 st->print(" F");
767 else
768 st->print(" ");
769 st->print(" TS %5d", _gc_time_stamp);
770 st->print(" PTAMS "PTR_FORMAT" NTAMS "PTR_FORMAT,
771 prev_top_at_mark_start(), next_top_at_mark_start());
772 G1OffsetTableContigSpace::print_on(st);
773 }
775 void HeapRegion::verify() const {
776 bool dummy = false;
777 verify(VerifyOption_G1UsePrevMarking, /* failures */ &dummy);
778 }
780 // This really ought to be commoned up into OffsetTableContigSpace somehow.
781 // We would need a mechanism to make that code skip dead objects.
783 void HeapRegion::verify(VerifyOption vo,
784 bool* failures) const {
785 G1CollectedHeap* g1 = G1CollectedHeap::heap();
786 *failures = false;
787 HeapWord* p = bottom();
788 HeapWord* prev_p = NULL;
789 VerifyLiveClosure vl_cl(g1, vo);
790 bool is_humongous = isHumongous();
791 bool do_bot_verify = !is_young();
792 size_t object_num = 0;
793 while (p < top()) {
794 oop obj = oop(p);
795 size_t obj_size = obj->size();
796 object_num += 1;
798 if (is_humongous != g1->isHumongous(obj_size)) {
799 gclog_or_tty->print_cr("obj "PTR_FORMAT" is of %shumongous size ("
800 SIZE_FORMAT" words) in a %shumongous region",
801 p, g1->isHumongous(obj_size) ? "" : "non-",
802 obj_size, is_humongous ? "" : "non-");
803 *failures = true;
804 return;
805 }
807 // If it returns false, verify_for_object() will output the
808 // appropriate messasge.
809 if (do_bot_verify && !_offsets.verify_for_object(p, obj_size)) {
810 *failures = true;
811 return;
812 }
814 if (!g1->is_obj_dead_cond(obj, this, vo)) {
815 if (obj->is_oop()) {
816 klassOop klass = obj->klass();
817 if (!klass->is_perm()) {
818 gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" "
819 "not in perm", klass, obj);
820 *failures = true;
821 return;
822 } else if (!klass->is_klass()) {
823 gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" "
824 "not a klass", klass, obj);
825 *failures = true;
826 return;
827 } else {
828 vl_cl.set_containing_obj(obj);
829 obj->oop_iterate(&vl_cl);
830 if (vl_cl.failures()) {
831 *failures = true;
832 }
833 if (G1MaxVerifyFailures >= 0 &&
834 vl_cl.n_failures() >= G1MaxVerifyFailures) {
835 return;
836 }
837 }
838 } else {
839 gclog_or_tty->print_cr(PTR_FORMAT" no an oop", obj);
840 *failures = true;
841 return;
842 }
843 }
844 prev_p = p;
845 p += obj_size;
846 }
848 if (p != top()) {
849 gclog_or_tty->print_cr("end of last object "PTR_FORMAT" "
850 "does not match top "PTR_FORMAT, p, top());
851 *failures = true;
852 return;
853 }
855 HeapWord* the_end = end();
856 assert(p == top(), "it should still hold");
857 // Do some extra BOT consistency checking for addresses in the
858 // range [top, end). BOT look-ups in this range should yield
859 // top. No point in doing that if top == end (there's nothing there).
860 if (p < the_end) {
861 // Look up top
862 HeapWord* addr_1 = p;
863 HeapWord* b_start_1 = _offsets.block_start_const(addr_1);
864 if (b_start_1 != p) {
865 gclog_or_tty->print_cr("BOT look up for top: "PTR_FORMAT" "
866 " yielded "PTR_FORMAT", expecting "PTR_FORMAT,
867 addr_1, b_start_1, p);
868 *failures = true;
869 return;
870 }
872 // Look up top + 1
873 HeapWord* addr_2 = p + 1;
874 if (addr_2 < the_end) {
875 HeapWord* b_start_2 = _offsets.block_start_const(addr_2);
876 if (b_start_2 != p) {
877 gclog_or_tty->print_cr("BOT look up for top + 1: "PTR_FORMAT" "
878 " yielded "PTR_FORMAT", expecting "PTR_FORMAT,
879 addr_2, b_start_2, p);
880 *failures = true;
881 return;
882 }
883 }
885 // Look up an address between top and end
886 size_t diff = pointer_delta(the_end, p) / 2;
887 HeapWord* addr_3 = p + diff;
888 if (addr_3 < the_end) {
889 HeapWord* b_start_3 = _offsets.block_start_const(addr_3);
890 if (b_start_3 != p) {
891 gclog_or_tty->print_cr("BOT look up for top + diff: "PTR_FORMAT" "
892 " yielded "PTR_FORMAT", expecting "PTR_FORMAT,
893 addr_3, b_start_3, p);
894 *failures = true;
895 return;
896 }
897 }
899 // Loook up end - 1
900 HeapWord* addr_4 = the_end - 1;
901 HeapWord* b_start_4 = _offsets.block_start_const(addr_4);
902 if (b_start_4 != p) {
903 gclog_or_tty->print_cr("BOT look up for end - 1: "PTR_FORMAT" "
904 " yielded "PTR_FORMAT", expecting "PTR_FORMAT,
905 addr_4, b_start_4, p);
906 *failures = true;
907 return;
908 }
909 }
911 if (is_humongous && object_num > 1) {
912 gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] is humongous "
913 "but has "SIZE_FORMAT", objects",
914 bottom(), end(), object_num);
915 *failures = true;
916 return;
917 }
918 }
920 // G1OffsetTableContigSpace code; copied from space.cpp. Hope this can go
921 // away eventually.
923 void G1OffsetTableContigSpace::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
924 // false ==> we'll do the clearing if there's clearing to be done.
925 ContiguousSpace::initialize(mr, false, mangle_space);
926 _offsets.zero_bottom_entry();
927 _offsets.initialize_threshold();
928 if (clear_space) clear(mangle_space);
929 }
931 void G1OffsetTableContigSpace::clear(bool mangle_space) {
932 ContiguousSpace::clear(mangle_space);
933 _offsets.zero_bottom_entry();
934 _offsets.initialize_threshold();
935 }
937 void G1OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) {
938 Space::set_bottom(new_bottom);
939 _offsets.set_bottom(new_bottom);
940 }
942 void G1OffsetTableContigSpace::set_end(HeapWord* new_end) {
943 Space::set_end(new_end);
944 _offsets.resize(new_end - bottom());
945 }
947 void G1OffsetTableContigSpace::print() const {
948 print_short();
949 gclog_or_tty->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", "
950 INTPTR_FORMAT ", " INTPTR_FORMAT ")",
951 bottom(), top(), _offsets.threshold(), end());
952 }
954 HeapWord* G1OffsetTableContigSpace::initialize_threshold() {
955 return _offsets.initialize_threshold();
956 }
958 HeapWord* G1OffsetTableContigSpace::cross_threshold(HeapWord* start,
959 HeapWord* end) {
960 _offsets.alloc_block(start, end);
961 return _offsets.threshold();
962 }
964 HeapWord* G1OffsetTableContigSpace::saved_mark_word() const {
965 G1CollectedHeap* g1h = G1CollectedHeap::heap();
966 assert( _gc_time_stamp <= g1h->get_gc_time_stamp(), "invariant" );
967 if (_gc_time_stamp < g1h->get_gc_time_stamp())
968 return top();
969 else
970 return ContiguousSpace::saved_mark_word();
971 }
973 void G1OffsetTableContigSpace::set_saved_mark() {
974 G1CollectedHeap* g1h = G1CollectedHeap::heap();
975 unsigned curr_gc_time_stamp = g1h->get_gc_time_stamp();
977 if (_gc_time_stamp < curr_gc_time_stamp) {
978 // The order of these is important, as another thread might be
979 // about to start scanning this region. If it does so after
980 // set_saved_mark and before _gc_time_stamp = ..., then the latter
981 // will be false, and it will pick up top() as the high water mark
982 // of region. If it does so after _gc_time_stamp = ..., then it
983 // will pick up the right saved_mark_word() as the high water mark
984 // of the region. Either way, the behaviour will be correct.
985 ContiguousSpace::set_saved_mark();
986 OrderAccess::storestore();
987 _gc_time_stamp = curr_gc_time_stamp;
988 // No need to do another barrier to flush the writes above. If
989 // this is called in parallel with other threads trying to
990 // allocate into the region, the caller should call this while
991 // holding a lock and when the lock is released the writes will be
992 // flushed.
993 }
994 }
996 G1OffsetTableContigSpace::
997 G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
998 MemRegion mr, bool is_zeroed) :
999 _offsets(sharedOffsetArray, mr),
1000 _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true),
1001 _gc_time_stamp(0)
1002 {
1003 _offsets.set_space(this);
1004 initialize(mr, !is_zeroed, SpaceDecorator::Mangle);
1005 }