Wed, 11 Sep 2013 16:25:02 +0200
8010722: assert: failed: heap size is too big for compressed oops
Summary: Use conservative assumptions of required alignment for the various garbage collector components into account when determining the maximum heap size that supports compressed oops. Using this conservative value avoids several circular dependencies in the calculation.
Reviewed-by: stefank, dholmes
1 /*
2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "code/nmethod.hpp"
27 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
28 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
29 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
30 #include "gc_implementation/g1/heapRegion.inline.hpp"
31 #include "gc_implementation/g1/heapRegionRemSet.hpp"
32 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
33 #include "memory/genOopClosures.inline.hpp"
34 #include "memory/iterator.hpp"
35 #include "oops/oop.inline.hpp"
37 int HeapRegion::LogOfHRGrainBytes = 0;
38 int HeapRegion::LogOfHRGrainWords = 0;
39 size_t HeapRegion::GrainBytes = 0;
40 size_t HeapRegion::GrainWords = 0;
41 size_t HeapRegion::CardsPerRegion = 0;
43 HeapRegionDCTOC::HeapRegionDCTOC(G1CollectedHeap* g1,
44 HeapRegion* hr, ExtendedOopClosure* cl,
45 CardTableModRefBS::PrecisionStyle precision,
46 FilterKind fk) :
47 ContiguousSpaceDCTOC(hr, cl, precision, NULL),
48 _hr(hr), _fk(fk), _g1(g1) { }
50 FilterOutOfRegionClosure::FilterOutOfRegionClosure(HeapRegion* r,
51 OopClosure* oc) :
52 _r_bottom(r->bottom()), _r_end(r->end()), _oc(oc) { }
54 template<class ClosureType>
55 HeapWord* walk_mem_region_loop(ClosureType* cl, G1CollectedHeap* g1h,
56 HeapRegion* hr,
57 HeapWord* cur, HeapWord* top) {
58 oop cur_oop = oop(cur);
59 int oop_size = cur_oop->size();
60 HeapWord* next_obj = cur + oop_size;
61 while (next_obj < top) {
62 // Keep filtering the remembered set.
63 if (!g1h->is_obj_dead(cur_oop, hr)) {
64 // Bottom lies entirely below top, so we can call the
65 // non-memRegion version of oop_iterate below.
66 cur_oop->oop_iterate(cl);
67 }
68 cur = next_obj;
69 cur_oop = oop(cur);
70 oop_size = cur_oop->size();
71 next_obj = cur + oop_size;
72 }
73 return cur;
74 }
76 void HeapRegionDCTOC::walk_mem_region_with_cl(MemRegion mr,
77 HeapWord* bottom,
78 HeapWord* top,
79 ExtendedOopClosure* cl) {
80 G1CollectedHeap* g1h = _g1;
81 int oop_size;
82 ExtendedOopClosure* cl2 = NULL;
84 FilterIntoCSClosure intoCSFilt(this, g1h, cl);
85 FilterOutOfRegionClosure outOfRegionFilt(_hr, cl);
87 switch (_fk) {
88 case NoFilterKind: cl2 = cl; break;
89 case IntoCSFilterKind: cl2 = &intoCSFilt; break;
90 case OutOfRegionFilterKind: cl2 = &outOfRegionFilt; break;
91 default: ShouldNotReachHere();
92 }
94 // Start filtering what we add to the remembered set. If the object is
95 // not considered dead, either because it is marked (in the mark bitmap)
96 // or it was allocated after marking finished, then we add it. Otherwise
97 // we can safely ignore the object.
98 if (!g1h->is_obj_dead(oop(bottom), _hr)) {
99 oop_size = oop(bottom)->oop_iterate(cl2, mr);
100 } else {
101 oop_size = oop(bottom)->size();
102 }
104 bottom += oop_size;
106 if (bottom < top) {
107 // We replicate the loop below for several kinds of possible filters.
108 switch (_fk) {
109 case NoFilterKind:
110 bottom = walk_mem_region_loop(cl, g1h, _hr, bottom, top);
111 break;
113 case IntoCSFilterKind: {
114 FilterIntoCSClosure filt(this, g1h, cl);
115 bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top);
116 break;
117 }
119 case OutOfRegionFilterKind: {
120 FilterOutOfRegionClosure filt(_hr, cl);
121 bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top);
122 break;
123 }
125 default:
126 ShouldNotReachHere();
127 }
129 // Last object. Need to do dead-obj filtering here too.
130 if (!g1h->is_obj_dead(oop(bottom), _hr)) {
131 oop(bottom)->oop_iterate(cl2, mr);
132 }
133 }
134 }
136 // Minimum region size; we won't go lower than that.
137 // We might want to decrease this in the future, to deal with small
138 // heaps a bit more efficiently.
139 #define MIN_REGION_SIZE ( 1024 * 1024 )
141 // Maximum region size; we don't go higher than that. There's a good
142 // reason for having an upper bound. We don't want regions to get too
143 // large, otherwise cleanup's effectiveness would decrease as there
144 // will be fewer opportunities to find totally empty regions after
145 // marking.
146 #define MAX_REGION_SIZE ( 32 * 1024 * 1024 )
148 // The automatic region size calculation will try to have around this
149 // many regions in the heap (based on the min heap size).
150 #define TARGET_REGION_NUMBER 2048
152 size_t HeapRegion::max_region_size() {
153 return (size_t)MAX_REGION_SIZE;
154 }
156 void HeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size) {
157 uintx region_size = G1HeapRegionSize;
158 if (FLAG_IS_DEFAULT(G1HeapRegionSize)) {
159 size_t average_heap_size = (initial_heap_size + max_heap_size) / 2;
160 region_size = MAX2(average_heap_size / TARGET_REGION_NUMBER,
161 (uintx) MIN_REGION_SIZE);
162 }
164 int region_size_log = log2_long((jlong) region_size);
165 // Recalculate the region size to make sure it's a power of
166 // 2. This means that region_size is the largest power of 2 that's
167 // <= what we've calculated so far.
168 region_size = ((uintx)1 << region_size_log);
170 // Now make sure that we don't go over or under our limits.
171 if (region_size < MIN_REGION_SIZE) {
172 region_size = MIN_REGION_SIZE;
173 } else if (region_size > MAX_REGION_SIZE) {
174 region_size = MAX_REGION_SIZE;
175 }
177 if (region_size != G1HeapRegionSize) {
178 // Update the flag to make sure that PrintFlagsFinal logs the correct value
179 FLAG_SET_ERGO(uintx, G1HeapRegionSize, region_size);
180 }
182 // And recalculate the log.
183 region_size_log = log2_long((jlong) region_size);
185 // Now, set up the globals.
186 guarantee(LogOfHRGrainBytes == 0, "we should only set it once");
187 LogOfHRGrainBytes = region_size_log;
189 guarantee(LogOfHRGrainWords == 0, "we should only set it once");
190 LogOfHRGrainWords = LogOfHRGrainBytes - LogHeapWordSize;
192 guarantee(GrainBytes == 0, "we should only set it once");
193 // The cast to int is safe, given that we've bounded region_size by
194 // MIN_REGION_SIZE and MAX_REGION_SIZE.
195 GrainBytes = (size_t)region_size;
197 guarantee(GrainWords == 0, "we should only set it once");
198 GrainWords = GrainBytes >> LogHeapWordSize;
199 guarantee((size_t) 1 << LogOfHRGrainWords == GrainWords, "sanity");
201 guarantee(CardsPerRegion == 0, "we should only set it once");
202 CardsPerRegion = GrainBytes >> CardTableModRefBS::card_shift;
203 }
205 void HeapRegion::reset_after_compaction() {
206 G1OffsetTableContigSpace::reset_after_compaction();
207 // After a compaction the mark bitmap is invalid, so we must
208 // treat all objects as being inside the unmarked area.
209 zero_marked_bytes();
210 init_top_at_mark_start();
211 }
213 void HeapRegion::hr_clear(bool par, bool clear_space) {
214 assert(_humongous_type == NotHumongous,
215 "we should have already filtered out humongous regions");
216 assert(_humongous_start_region == NULL,
217 "we should have already filtered out humongous regions");
218 assert(_end == _orig_end,
219 "we should have already filtered out humongous regions");
221 _in_collection_set = false;
223 set_young_index_in_cset(-1);
224 uninstall_surv_rate_group();
225 set_young_type(NotYoung);
226 reset_pre_dummy_top();
228 if (!par) {
229 // If this is parallel, this will be done later.
230 HeapRegionRemSet* hrrs = rem_set();
231 hrrs->clear();
232 _claimed = InitialClaimValue;
233 }
234 zero_marked_bytes();
236 _offsets.resize(HeapRegion::GrainWords);
237 init_top_at_mark_start();
238 if (clear_space) clear(SpaceDecorator::Mangle);
239 }
241 void HeapRegion::par_clear() {
242 assert(used() == 0, "the region should have been already cleared");
243 assert(capacity() == HeapRegion::GrainBytes, "should be back to normal");
244 HeapRegionRemSet* hrrs = rem_set();
245 hrrs->clear();
246 CardTableModRefBS* ct_bs =
247 (CardTableModRefBS*)G1CollectedHeap::heap()->barrier_set();
248 ct_bs->clear(MemRegion(bottom(), end()));
249 }
251 void HeapRegion::calc_gc_efficiency() {
252 // GC efficiency is the ratio of how much space would be
253 // reclaimed over how long we predict it would take to reclaim it.
254 G1CollectedHeap* g1h = G1CollectedHeap::heap();
255 G1CollectorPolicy* g1p = g1h->g1_policy();
257 // Retrieve a prediction of the elapsed time for this region for
258 // a mixed gc because the region will only be evacuated during a
259 // mixed gc.
260 double region_elapsed_time_ms =
261 g1p->predict_region_elapsed_time_ms(this, false /* for_young_gc */);
262 _gc_efficiency = (double) reclaimable_bytes() / region_elapsed_time_ms;
263 }
265 void HeapRegion::set_startsHumongous(HeapWord* new_top, HeapWord* new_end) {
266 assert(!isHumongous(), "sanity / pre-condition");
267 assert(end() == _orig_end,
268 "Should be normal before the humongous object allocation");
269 assert(top() == bottom(), "should be empty");
270 assert(bottom() <= new_top && new_top <= new_end, "pre-condition");
272 _humongous_type = StartsHumongous;
273 _humongous_start_region = this;
275 set_end(new_end);
276 _offsets.set_for_starts_humongous(new_top);
277 }
279 void HeapRegion::set_continuesHumongous(HeapRegion* first_hr) {
280 assert(!isHumongous(), "sanity / pre-condition");
281 assert(end() == _orig_end,
282 "Should be normal before the humongous object allocation");
283 assert(top() == bottom(), "should be empty");
284 assert(first_hr->startsHumongous(), "pre-condition");
286 _humongous_type = ContinuesHumongous;
287 _humongous_start_region = first_hr;
288 }
290 void HeapRegion::set_notHumongous() {
291 assert(isHumongous(), "pre-condition");
293 if (startsHumongous()) {
294 assert(top() <= end(), "pre-condition");
295 set_end(_orig_end);
296 if (top() > end()) {
297 // at least one "continues humongous" region after it
298 set_top(end());
299 }
300 } else {
301 // continues humongous
302 assert(end() == _orig_end, "sanity");
303 }
305 assert(capacity() == HeapRegion::GrainBytes, "pre-condition");
306 _humongous_type = NotHumongous;
307 _humongous_start_region = NULL;
308 }
310 bool HeapRegion::claimHeapRegion(jint claimValue) {
311 jint current = _claimed;
312 if (current != claimValue) {
313 jint res = Atomic::cmpxchg(claimValue, &_claimed, current);
314 if (res == current) {
315 return true;
316 }
317 }
318 return false;
319 }
321 HeapWord* HeapRegion::next_block_start_careful(HeapWord* addr) {
322 HeapWord* low = addr;
323 HeapWord* high = end();
324 while (low < high) {
325 size_t diff = pointer_delta(high, low);
326 // Must add one below to bias toward the high amount. Otherwise, if
327 // "high" were at the desired value, and "low" were one less, we
328 // would not converge on "high". This is not symmetric, because
329 // we set "high" to a block start, which might be the right one,
330 // which we don't do for "low".
331 HeapWord* middle = low + (diff+1)/2;
332 if (middle == high) return high;
333 HeapWord* mid_bs = block_start_careful(middle);
334 if (mid_bs < addr) {
335 low = middle;
336 } else {
337 high = mid_bs;
338 }
339 }
340 assert(low == high && low >= addr, "Didn't work.");
341 return low;
342 }
344 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
345 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
346 #endif // _MSC_VER
349 HeapRegion::HeapRegion(uint hrs_index,
350 G1BlockOffsetSharedArray* sharedOffsetArray,
351 MemRegion mr) :
352 G1OffsetTableContigSpace(sharedOffsetArray, mr),
353 _hrs_index(hrs_index),
354 _humongous_type(NotHumongous), _humongous_start_region(NULL),
355 _in_collection_set(false),
356 _next_in_special_set(NULL), _orig_end(NULL),
357 _claimed(InitialClaimValue), _evacuation_failed(false),
358 _prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0),
359 _young_type(NotYoung), _next_young_region(NULL),
360 _next_dirty_cards_region(NULL), _next(NULL), _pending_removal(false),
361 #ifdef ASSERT
362 _containing_set(NULL),
363 #endif // ASSERT
364 _young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1),
365 _rem_set(NULL), _recorded_rs_length(0), _predicted_elapsed_time_ms(0),
366 _predicted_bytes_to_copy(0)
367 {
368 _rem_set = new HeapRegionRemSet(sharedOffsetArray, this);
369 _orig_end = mr.end();
370 // Note that initialize() will set the start of the unmarked area of the
371 // region.
372 hr_clear(false /*par*/, false /*clear_space*/);
373 set_top(bottom());
374 set_saved_mark();
376 assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant.");
377 }
379 CompactibleSpace* HeapRegion::next_compaction_space() const {
380 // We're not using an iterator given that it will wrap around when
381 // it reaches the last region and this is not what we want here.
382 G1CollectedHeap* g1h = G1CollectedHeap::heap();
383 uint index = hrs_index() + 1;
384 while (index < g1h->n_regions()) {
385 HeapRegion* hr = g1h->region_at(index);
386 if (!hr->isHumongous()) {
387 return hr;
388 }
389 index += 1;
390 }
391 return NULL;
392 }
394 void HeapRegion::save_marks() {
395 set_saved_mark();
396 }
398 void HeapRegion::oops_in_mr_iterate(MemRegion mr, ExtendedOopClosure* cl) {
399 HeapWord* p = mr.start();
400 HeapWord* e = mr.end();
401 oop obj;
402 while (p < e) {
403 obj = oop(p);
404 p += obj->oop_iterate(cl);
405 }
406 assert(p == e, "bad memregion: doesn't end on obj boundary");
407 }
409 #define HeapRegion_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
410 void HeapRegion::oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \
411 ContiguousSpace::oop_since_save_marks_iterate##nv_suffix(cl); \
412 }
413 SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(HeapRegion_OOP_SINCE_SAVE_MARKS_DEFN)
416 void HeapRegion::oop_before_save_marks_iterate(ExtendedOopClosure* cl) {
417 oops_in_mr_iterate(MemRegion(bottom(), saved_mark_word()), cl);
418 }
420 void HeapRegion::note_self_forwarding_removal_start(bool during_initial_mark,
421 bool during_conc_mark) {
422 // We always recreate the prev marking info and we'll explicitly
423 // mark all objects we find to be self-forwarded on the prev
424 // bitmap. So all objects need to be below PTAMS.
425 _prev_top_at_mark_start = top();
426 _prev_marked_bytes = 0;
428 if (during_initial_mark) {
429 // During initial-mark, we'll also explicitly mark all objects
430 // we find to be self-forwarded on the next bitmap. So all
431 // objects need to be below NTAMS.
432 _next_top_at_mark_start = top();
433 _next_marked_bytes = 0;
434 } else if (during_conc_mark) {
435 // During concurrent mark, all objects in the CSet (including
436 // the ones we find to be self-forwarded) are implicitly live.
437 // So all objects need to be above NTAMS.
438 _next_top_at_mark_start = bottom();
439 _next_marked_bytes = 0;
440 }
441 }
443 void HeapRegion::note_self_forwarding_removal_end(bool during_initial_mark,
444 bool during_conc_mark,
445 size_t marked_bytes) {
446 assert(0 <= marked_bytes && marked_bytes <= used(),
447 err_msg("marked: "SIZE_FORMAT" used: "SIZE_FORMAT,
448 marked_bytes, used()));
449 _prev_marked_bytes = marked_bytes;
450 }
452 HeapWord*
453 HeapRegion::object_iterate_mem_careful(MemRegion mr,
454 ObjectClosure* cl) {
455 G1CollectedHeap* g1h = G1CollectedHeap::heap();
456 // We used to use "block_start_careful" here. But we're actually happy
457 // to update the BOT while we do this...
458 HeapWord* cur = block_start(mr.start());
459 mr = mr.intersection(used_region());
460 if (mr.is_empty()) return NULL;
461 // Otherwise, find the obj that extends onto mr.start().
463 assert(cur <= mr.start()
464 && (oop(cur)->klass_or_null() == NULL ||
465 cur + oop(cur)->size() > mr.start()),
466 "postcondition of block_start");
467 oop obj;
468 while (cur < mr.end()) {
469 obj = oop(cur);
470 if (obj->klass_or_null() == NULL) {
471 // Ran into an unparseable point.
472 return cur;
473 } else if (!g1h->is_obj_dead(obj)) {
474 cl->do_object(obj);
475 }
476 if (cl->abort()) return cur;
477 // The check above must occur before the operation below, since an
478 // abort might invalidate the "size" operation.
479 cur += obj->size();
480 }
481 return NULL;
482 }
484 HeapWord*
485 HeapRegion::
486 oops_on_card_seq_iterate_careful(MemRegion mr,
487 FilterOutOfRegionClosure* cl,
488 bool filter_young,
489 jbyte* card_ptr) {
490 // Currently, we should only have to clean the card if filter_young
491 // is true and vice versa.
492 if (filter_young) {
493 assert(card_ptr != NULL, "pre-condition");
494 } else {
495 assert(card_ptr == NULL, "pre-condition");
496 }
497 G1CollectedHeap* g1h = G1CollectedHeap::heap();
499 // If we're within a stop-world GC, then we might look at a card in a
500 // GC alloc region that extends onto a GC LAB, which may not be
501 // parseable. Stop such at the "saved_mark" of the region.
502 if (g1h->is_gc_active()) {
503 mr = mr.intersection(used_region_at_save_marks());
504 } else {
505 mr = mr.intersection(used_region());
506 }
507 if (mr.is_empty()) return NULL;
508 // Otherwise, find the obj that extends onto mr.start().
510 // The intersection of the incoming mr (for the card) and the
511 // allocated part of the region is non-empty. This implies that
512 // we have actually allocated into this region. The code in
513 // G1CollectedHeap.cpp that allocates a new region sets the
514 // is_young tag on the region before allocating. Thus we
515 // safely know if this region is young.
516 if (is_young() && filter_young) {
517 return NULL;
518 }
520 assert(!is_young(), "check value of filter_young");
522 // We can only clean the card here, after we make the decision that
523 // the card is not young. And we only clean the card if we have been
524 // asked to (i.e., card_ptr != NULL).
525 if (card_ptr != NULL) {
526 *card_ptr = CardTableModRefBS::clean_card_val();
527 // We must complete this write before we do any of the reads below.
528 OrderAccess::storeload();
529 }
531 // Cache the boundaries of the memory region in some const locals
532 HeapWord* const start = mr.start();
533 HeapWord* const end = mr.end();
535 // We used to use "block_start_careful" here. But we're actually happy
536 // to update the BOT while we do this...
537 HeapWord* cur = block_start(start);
538 assert(cur <= start, "Postcondition");
540 oop obj;
542 HeapWord* next = cur;
543 while (next <= start) {
544 cur = next;
545 obj = oop(cur);
546 if (obj->klass_or_null() == NULL) {
547 // Ran into an unparseable point.
548 return cur;
549 }
550 // Otherwise...
551 next = (cur + obj->size());
552 }
554 // If we finish the above loop...We have a parseable object that
555 // begins on or before the start of the memory region, and ends
556 // inside or spans the entire region.
558 assert(obj == oop(cur), "sanity");
559 assert(cur <= start &&
560 obj->klass_or_null() != NULL &&
561 (cur + obj->size()) > start,
562 "Loop postcondition");
564 if (!g1h->is_obj_dead(obj)) {
565 obj->oop_iterate(cl, mr);
566 }
568 while (cur < end) {
569 obj = oop(cur);
570 if (obj->klass_or_null() == NULL) {
571 // Ran into an unparseable point.
572 return cur;
573 };
575 // Otherwise:
576 next = (cur + obj->size());
578 if (!g1h->is_obj_dead(obj)) {
579 if (next < end || !obj->is_objArray()) {
580 // This object either does not span the MemRegion
581 // boundary, or if it does it's not an array.
582 // Apply closure to whole object.
583 obj->oop_iterate(cl);
584 } else {
585 // This obj is an array that spans the boundary.
586 // Stop at the boundary.
587 obj->oop_iterate(cl, mr);
588 }
589 }
590 cur = next;
591 }
592 return NULL;
593 }
595 // Code roots support
597 void HeapRegion::add_strong_code_root(nmethod* nm) {
598 HeapRegionRemSet* hrrs = rem_set();
599 hrrs->add_strong_code_root(nm);
600 }
602 void HeapRegion::remove_strong_code_root(nmethod* nm) {
603 HeapRegionRemSet* hrrs = rem_set();
604 hrrs->remove_strong_code_root(nm);
605 }
607 void HeapRegion::migrate_strong_code_roots() {
608 assert(in_collection_set(), "only collection set regions");
609 assert(!isHumongous(), "not humongous regions");
611 HeapRegionRemSet* hrrs = rem_set();
612 hrrs->migrate_strong_code_roots();
613 }
615 void HeapRegion::strong_code_roots_do(CodeBlobClosure* blk) const {
616 HeapRegionRemSet* hrrs = rem_set();
617 hrrs->strong_code_roots_do(blk);
618 }
620 class VerifyStrongCodeRootOopClosure: public OopClosure {
621 const HeapRegion* _hr;
622 nmethod* _nm;
623 bool _failures;
624 bool _has_oops_in_region;
626 template <class T> void do_oop_work(T* p) {
627 T heap_oop = oopDesc::load_heap_oop(p);
628 if (!oopDesc::is_null(heap_oop)) {
629 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
631 // Note: not all the oops embedded in the nmethod are in the
632 // current region. We only look at those which are.
633 if (_hr->is_in(obj)) {
634 // Object is in the region. Check that its less than top
635 if (_hr->top() <= (HeapWord*)obj) {
636 // Object is above top
637 gclog_or_tty->print_cr("Object "PTR_FORMAT" in region "
638 "["PTR_FORMAT", "PTR_FORMAT") is above "
639 "top "PTR_FORMAT,
640 obj, _hr->bottom(), _hr->end(), _hr->top());
641 _failures = true;
642 return;
643 }
644 // Nmethod has at least one oop in the current region
645 _has_oops_in_region = true;
646 }
647 }
648 }
650 public:
651 VerifyStrongCodeRootOopClosure(const HeapRegion* hr, nmethod* nm):
652 _hr(hr), _failures(false), _has_oops_in_region(false) {}
654 void do_oop(narrowOop* p) { do_oop_work(p); }
655 void do_oop(oop* p) { do_oop_work(p); }
657 bool failures() { return _failures; }
658 bool has_oops_in_region() { return _has_oops_in_region; }
659 };
661 class VerifyStrongCodeRootCodeBlobClosure: public CodeBlobClosure {
662 const HeapRegion* _hr;
663 bool _failures;
664 public:
665 VerifyStrongCodeRootCodeBlobClosure(const HeapRegion* hr) :
666 _hr(hr), _failures(false) {}
668 void do_code_blob(CodeBlob* cb) {
669 nmethod* nm = (cb == NULL) ? NULL : cb->as_nmethod_or_null();
670 if (nm != NULL) {
671 // Verify that the nemthod is live
672 if (!nm->is_alive()) {
673 gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] has dead nmethod "
674 PTR_FORMAT" in its strong code roots",
675 _hr->bottom(), _hr->end(), nm);
676 _failures = true;
677 } else {
678 VerifyStrongCodeRootOopClosure oop_cl(_hr, nm);
679 nm->oops_do(&oop_cl);
680 if (!oop_cl.has_oops_in_region()) {
681 gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] has nmethod "
682 PTR_FORMAT" in its strong code roots "
683 "with no pointers into region",
684 _hr->bottom(), _hr->end(), nm);
685 _failures = true;
686 } else if (oop_cl.failures()) {
687 gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] has other "
688 "failures for nmethod "PTR_FORMAT,
689 _hr->bottom(), _hr->end(), nm);
690 _failures = true;
691 }
692 }
693 }
694 }
696 bool failures() { return _failures; }
697 };
699 void HeapRegion::verify_strong_code_roots(VerifyOption vo, bool* failures) const {
700 if (!G1VerifyHeapRegionCodeRoots) {
701 // We're not verifying code roots.
702 return;
703 }
704 if (vo == VerifyOption_G1UseMarkWord) {
705 // Marking verification during a full GC is performed after class
706 // unloading, code cache unloading, etc so the strong code roots
707 // attached to each heap region are in an inconsistent state. They won't
708 // be consistent until the strong code roots are rebuilt after the
709 // actual GC. Skip verifying the strong code roots in this particular
710 // time.
711 assert(VerifyDuringGC, "only way to get here");
712 return;
713 }
715 HeapRegionRemSet* hrrs = rem_set();
716 int strong_code_roots_length = hrrs->strong_code_roots_list_length();
718 // if this region is empty then there should be no entries
719 // on its strong code root list
720 if (is_empty()) {
721 if (strong_code_roots_length > 0) {
722 gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] is empty "
723 "but has "INT32_FORMAT" code root entries",
724 bottom(), end(), strong_code_roots_length);
725 *failures = true;
726 }
727 return;
728 }
730 // An H-region should have an empty strong code root list
731 if (isHumongous()) {
732 if (strong_code_roots_length > 0) {
733 gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] is humongous "
734 "but has "INT32_FORMAT" code root entries",
735 bottom(), end(), strong_code_roots_length);
736 *failures = true;
737 }
738 return;
739 }
741 VerifyStrongCodeRootCodeBlobClosure cb_cl(this);
742 strong_code_roots_do(&cb_cl);
744 if (cb_cl.failures()) {
745 *failures = true;
746 }
747 }
749 void HeapRegion::print() const { print_on(gclog_or_tty); }
750 void HeapRegion::print_on(outputStream* st) const {
751 if (isHumongous()) {
752 if (startsHumongous())
753 st->print(" HS");
754 else
755 st->print(" HC");
756 } else {
757 st->print(" ");
758 }
759 if (in_collection_set())
760 st->print(" CS");
761 else
762 st->print(" ");
763 if (is_young())
764 st->print(is_survivor() ? " SU" : " Y ");
765 else
766 st->print(" ");
767 if (is_empty())
768 st->print(" F");
769 else
770 st->print(" ");
771 st->print(" TS %5d", _gc_time_stamp);
772 st->print(" PTAMS "PTR_FORMAT" NTAMS "PTR_FORMAT,
773 prev_top_at_mark_start(), next_top_at_mark_start());
774 G1OffsetTableContigSpace::print_on(st);
775 }
777 class VerifyLiveClosure: public OopClosure {
778 private:
779 G1CollectedHeap* _g1h;
780 CardTableModRefBS* _bs;
781 oop _containing_obj;
782 bool _failures;
783 int _n_failures;
784 VerifyOption _vo;
785 public:
786 // _vo == UsePrevMarking -> use "prev" marking information,
787 // _vo == UseNextMarking -> use "next" marking information,
788 // _vo == UseMarkWord -> use mark word from object header.
789 VerifyLiveClosure(G1CollectedHeap* g1h, VerifyOption vo) :
790 _g1h(g1h), _bs(NULL), _containing_obj(NULL),
791 _failures(false), _n_failures(0), _vo(vo)
792 {
793 BarrierSet* bs = _g1h->barrier_set();
794 if (bs->is_a(BarrierSet::CardTableModRef))
795 _bs = (CardTableModRefBS*)bs;
796 }
798 void set_containing_obj(oop obj) {
799 _containing_obj = obj;
800 }
802 bool failures() { return _failures; }
803 int n_failures() { return _n_failures; }
805 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
806 virtual void do_oop( oop* p) { do_oop_work(p); }
808 void print_object(outputStream* out, oop obj) {
809 #ifdef PRODUCT
810 Klass* k = obj->klass();
811 const char* class_name = InstanceKlass::cast(k)->external_name();
812 out->print_cr("class name %s", class_name);
813 #else // PRODUCT
814 obj->print_on(out);
815 #endif // PRODUCT
816 }
818 template <class T>
819 void do_oop_work(T* p) {
820 assert(_containing_obj != NULL, "Precondition");
821 assert(!_g1h->is_obj_dead_cond(_containing_obj, _vo),
822 "Precondition");
823 T heap_oop = oopDesc::load_heap_oop(p);
824 if (!oopDesc::is_null(heap_oop)) {
825 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
826 bool failed = false;
827 if (!_g1h->is_in_closed_subset(obj) || _g1h->is_obj_dead_cond(obj, _vo)) {
828 MutexLockerEx x(ParGCRareEvent_lock,
829 Mutex::_no_safepoint_check_flag);
831 if (!_failures) {
832 gclog_or_tty->print_cr("");
833 gclog_or_tty->print_cr("----------");
834 }
835 if (!_g1h->is_in_closed_subset(obj)) {
836 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
837 gclog_or_tty->print_cr("Field "PTR_FORMAT
838 " of live obj "PTR_FORMAT" in region "
839 "["PTR_FORMAT", "PTR_FORMAT")",
840 p, (void*) _containing_obj,
841 from->bottom(), from->end());
842 print_object(gclog_or_tty, _containing_obj);
843 gclog_or_tty->print_cr("points to obj "PTR_FORMAT" not in the heap",
844 (void*) obj);
845 } else {
846 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
847 HeapRegion* to = _g1h->heap_region_containing((HeapWord*)obj);
848 gclog_or_tty->print_cr("Field "PTR_FORMAT
849 " of live obj "PTR_FORMAT" in region "
850 "["PTR_FORMAT", "PTR_FORMAT")",
851 p, (void*) _containing_obj,
852 from->bottom(), from->end());
853 print_object(gclog_or_tty, _containing_obj);
854 gclog_or_tty->print_cr("points to dead obj "PTR_FORMAT" in region "
855 "["PTR_FORMAT", "PTR_FORMAT")",
856 (void*) obj, to->bottom(), to->end());
857 print_object(gclog_or_tty, obj);
858 }
859 gclog_or_tty->print_cr("----------");
860 gclog_or_tty->flush();
861 _failures = true;
862 failed = true;
863 _n_failures++;
864 }
866 if (!_g1h->full_collection() || G1VerifyRSetsDuringFullGC) {
867 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
868 HeapRegion* to = _g1h->heap_region_containing(obj);
869 if (from != NULL && to != NULL &&
870 from != to &&
871 !to->isHumongous()) {
872 jbyte cv_obj = *_bs->byte_for_const(_containing_obj);
873 jbyte cv_field = *_bs->byte_for_const(p);
874 const jbyte dirty = CardTableModRefBS::dirty_card_val();
876 bool is_bad = !(from->is_young()
877 || to->rem_set()->contains_reference(p)
878 || !G1HRRSFlushLogBuffersOnVerify && // buffers were not flushed
879 (_containing_obj->is_objArray() ?
880 cv_field == dirty
881 : cv_obj == dirty || cv_field == dirty));
882 if (is_bad) {
883 MutexLockerEx x(ParGCRareEvent_lock,
884 Mutex::_no_safepoint_check_flag);
886 if (!_failures) {
887 gclog_or_tty->print_cr("");
888 gclog_or_tty->print_cr("----------");
889 }
890 gclog_or_tty->print_cr("Missing rem set entry:");
891 gclog_or_tty->print_cr("Field "PTR_FORMAT" "
892 "of obj "PTR_FORMAT", "
893 "in region "HR_FORMAT,
894 p, (void*) _containing_obj,
895 HR_FORMAT_PARAMS(from));
896 _containing_obj->print_on(gclog_or_tty);
897 gclog_or_tty->print_cr("points to obj "PTR_FORMAT" "
898 "in region "HR_FORMAT,
899 (void*) obj,
900 HR_FORMAT_PARAMS(to));
901 obj->print_on(gclog_or_tty);
902 gclog_or_tty->print_cr("Obj head CTE = %d, field CTE = %d.",
903 cv_obj, cv_field);
904 gclog_or_tty->print_cr("----------");
905 gclog_or_tty->flush();
906 _failures = true;
907 if (!failed) _n_failures++;
908 }
909 }
910 }
911 }
912 }
913 };
915 // This really ought to be commoned up into OffsetTableContigSpace somehow.
916 // We would need a mechanism to make that code skip dead objects.
918 void HeapRegion::verify(VerifyOption vo,
919 bool* failures) const {
920 G1CollectedHeap* g1 = G1CollectedHeap::heap();
921 *failures = false;
922 HeapWord* p = bottom();
923 HeapWord* prev_p = NULL;
924 VerifyLiveClosure vl_cl(g1, vo);
925 bool is_humongous = isHumongous();
926 bool do_bot_verify = !is_young();
927 size_t object_num = 0;
928 while (p < top()) {
929 oop obj = oop(p);
930 size_t obj_size = obj->size();
931 object_num += 1;
933 if (is_humongous != g1->isHumongous(obj_size)) {
934 gclog_or_tty->print_cr("obj "PTR_FORMAT" is of %shumongous size ("
935 SIZE_FORMAT" words) in a %shumongous region",
936 p, g1->isHumongous(obj_size) ? "" : "non-",
937 obj_size, is_humongous ? "" : "non-");
938 *failures = true;
939 return;
940 }
942 // If it returns false, verify_for_object() will output the
943 // appropriate messasge.
944 if (do_bot_verify && !_offsets.verify_for_object(p, obj_size)) {
945 *failures = true;
946 return;
947 }
949 if (!g1->is_obj_dead_cond(obj, this, vo)) {
950 if (obj->is_oop()) {
951 Klass* klass = obj->klass();
952 if (!klass->is_metaspace_object()) {
953 gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" "
954 "not metadata", klass, obj);
955 *failures = true;
956 return;
957 } else if (!klass->is_klass()) {
958 gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" "
959 "not a klass", klass, obj);
960 *failures = true;
961 return;
962 } else {
963 vl_cl.set_containing_obj(obj);
964 obj->oop_iterate_no_header(&vl_cl);
965 if (vl_cl.failures()) {
966 *failures = true;
967 }
968 if (G1MaxVerifyFailures >= 0 &&
969 vl_cl.n_failures() >= G1MaxVerifyFailures) {
970 return;
971 }
972 }
973 } else {
974 gclog_or_tty->print_cr(PTR_FORMAT" no an oop", obj);
975 *failures = true;
976 return;
977 }
978 }
979 prev_p = p;
980 p += obj_size;
981 }
983 if (p != top()) {
984 gclog_or_tty->print_cr("end of last object "PTR_FORMAT" "
985 "does not match top "PTR_FORMAT, p, top());
986 *failures = true;
987 return;
988 }
990 HeapWord* the_end = end();
991 assert(p == top(), "it should still hold");
992 // Do some extra BOT consistency checking for addresses in the
993 // range [top, end). BOT look-ups in this range should yield
994 // top. No point in doing that if top == end (there's nothing there).
995 if (p < the_end) {
996 // Look up top
997 HeapWord* addr_1 = p;
998 HeapWord* b_start_1 = _offsets.block_start_const(addr_1);
999 if (b_start_1 != p) {
1000 gclog_or_tty->print_cr("BOT look up for top: "PTR_FORMAT" "
1001 " yielded "PTR_FORMAT", expecting "PTR_FORMAT,
1002 addr_1, b_start_1, p);
1003 *failures = true;
1004 return;
1005 }
1007 // Look up top + 1
1008 HeapWord* addr_2 = p + 1;
1009 if (addr_2 < the_end) {
1010 HeapWord* b_start_2 = _offsets.block_start_const(addr_2);
1011 if (b_start_2 != p) {
1012 gclog_or_tty->print_cr("BOT look up for top + 1: "PTR_FORMAT" "
1013 " yielded "PTR_FORMAT", expecting "PTR_FORMAT,
1014 addr_2, b_start_2, p);
1015 *failures = true;
1016 return;
1017 }
1018 }
1020 // Look up an address between top and end
1021 size_t diff = pointer_delta(the_end, p) / 2;
1022 HeapWord* addr_3 = p + diff;
1023 if (addr_3 < the_end) {
1024 HeapWord* b_start_3 = _offsets.block_start_const(addr_3);
1025 if (b_start_3 != p) {
1026 gclog_or_tty->print_cr("BOT look up for top + diff: "PTR_FORMAT" "
1027 " yielded "PTR_FORMAT", expecting "PTR_FORMAT,
1028 addr_3, b_start_3, p);
1029 *failures = true;
1030 return;
1031 }
1032 }
1034 // Loook up end - 1
1035 HeapWord* addr_4 = the_end - 1;
1036 HeapWord* b_start_4 = _offsets.block_start_const(addr_4);
1037 if (b_start_4 != p) {
1038 gclog_or_tty->print_cr("BOT look up for end - 1: "PTR_FORMAT" "
1039 " yielded "PTR_FORMAT", expecting "PTR_FORMAT,
1040 addr_4, b_start_4, p);
1041 *failures = true;
1042 return;
1043 }
1044 }
1046 if (is_humongous && object_num > 1) {
1047 gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] is humongous "
1048 "but has "SIZE_FORMAT", objects",
1049 bottom(), end(), object_num);
1050 *failures = true;
1051 return;
1052 }
1054 verify_strong_code_roots(vo, failures);
1055 }
1057 void HeapRegion::verify() const {
1058 bool dummy = false;
1059 verify(VerifyOption_G1UsePrevMarking, /* failures */ &dummy);
1060 }
1062 // G1OffsetTableContigSpace code; copied from space.cpp. Hope this can go
1063 // away eventually.
1065 void G1OffsetTableContigSpace::clear(bool mangle_space) {
1066 ContiguousSpace::clear(mangle_space);
1067 _offsets.zero_bottom_entry();
1068 _offsets.initialize_threshold();
1069 }
1071 void G1OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) {
1072 Space::set_bottom(new_bottom);
1073 _offsets.set_bottom(new_bottom);
1074 }
1076 void G1OffsetTableContigSpace::set_end(HeapWord* new_end) {
1077 Space::set_end(new_end);
1078 _offsets.resize(new_end - bottom());
1079 }
1081 void G1OffsetTableContigSpace::print() const {
1082 print_short();
1083 gclog_or_tty->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", "
1084 INTPTR_FORMAT ", " INTPTR_FORMAT ")",
1085 bottom(), top(), _offsets.threshold(), end());
1086 }
1088 HeapWord* G1OffsetTableContigSpace::initialize_threshold() {
1089 return _offsets.initialize_threshold();
1090 }
1092 HeapWord* G1OffsetTableContigSpace::cross_threshold(HeapWord* start,
1093 HeapWord* end) {
1094 _offsets.alloc_block(start, end);
1095 return _offsets.threshold();
1096 }
1098 HeapWord* G1OffsetTableContigSpace::saved_mark_word() const {
1099 G1CollectedHeap* g1h = G1CollectedHeap::heap();
1100 assert( _gc_time_stamp <= g1h->get_gc_time_stamp(), "invariant" );
1101 if (_gc_time_stamp < g1h->get_gc_time_stamp())
1102 return top();
1103 else
1104 return ContiguousSpace::saved_mark_word();
1105 }
1107 void G1OffsetTableContigSpace::set_saved_mark() {
1108 G1CollectedHeap* g1h = G1CollectedHeap::heap();
1109 unsigned curr_gc_time_stamp = g1h->get_gc_time_stamp();
1111 if (_gc_time_stamp < curr_gc_time_stamp) {
1112 // The order of these is important, as another thread might be
1113 // about to start scanning this region. If it does so after
1114 // set_saved_mark and before _gc_time_stamp = ..., then the latter
1115 // will be false, and it will pick up top() as the high water mark
1116 // of region. If it does so after _gc_time_stamp = ..., then it
1117 // will pick up the right saved_mark_word() as the high water mark
1118 // of the region. Either way, the behaviour will be correct.
1119 ContiguousSpace::set_saved_mark();
1120 OrderAccess::storestore();
1121 _gc_time_stamp = curr_gc_time_stamp;
1122 // No need to do another barrier to flush the writes above. If
1123 // this is called in parallel with other threads trying to
1124 // allocate into the region, the caller should call this while
1125 // holding a lock and when the lock is released the writes will be
1126 // flushed.
1127 }
1128 }
1130 G1OffsetTableContigSpace::
1131 G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
1132 MemRegion mr) :
1133 _offsets(sharedOffsetArray, mr),
1134 _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true),
1135 _gc_time_stamp(0)
1136 {
1137 _offsets.set_space(this);
1138 // false ==> we'll do the clearing if there's clearing to be done.
1139 ContiguousSpace::initialize(mr, false, SpaceDecorator::Mangle);
1140 _offsets.zero_bottom_entry();
1141 _offsets.initialize_threshold();
1142 }