Sat, 07 Nov 2020 10:30:02 +0800
Added tag mips-jdk8u275-b01 for changeset d3b4d62f391f
1 /*
2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "code/nmethod.hpp"
27 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
28 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
29 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
30 #include "gc_implementation/g1/heapRegion.inline.hpp"
31 #include "gc_implementation/g1/heapRegionBounds.inline.hpp"
32 #include "gc_implementation/g1/heapRegionRemSet.hpp"
33 #include "gc_implementation/g1/heapRegionManager.inline.hpp"
34 #include "gc_implementation/shared/liveRange.hpp"
35 #include "memory/genOopClosures.inline.hpp"
36 #include "memory/iterator.hpp"
37 #include "memory/space.inline.hpp"
38 #include "oops/oop.inline.hpp"
39 #include "runtime/orderAccess.inline.hpp"
40 #include "gc_implementation/g1/heapRegionTracer.hpp"
42 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
44 int HeapRegion::LogOfHRGrainBytes = 0;
45 int HeapRegion::LogOfHRGrainWords = 0;
46 size_t HeapRegion::GrainBytes = 0;
47 size_t HeapRegion::GrainWords = 0;
48 size_t HeapRegion::CardsPerRegion = 0;
50 HeapRegionDCTOC::HeapRegionDCTOC(G1CollectedHeap* g1,
51 HeapRegion* hr,
52 G1ParPushHeapRSClosure* cl,
53 CardTableModRefBS::PrecisionStyle precision) :
54 DirtyCardToOopClosure(hr, cl, precision, NULL),
55 _hr(hr), _rs_scan(cl), _g1(g1) { }
57 FilterOutOfRegionClosure::FilterOutOfRegionClosure(HeapRegion* r,
58 OopClosure* oc) :
59 _r_bottom(r->bottom()), _r_end(r->end()), _oc(oc) { }
61 void HeapRegionDCTOC::walk_mem_region(MemRegion mr,
62 HeapWord* bottom,
63 HeapWord* top) {
64 G1CollectedHeap* g1h = _g1;
65 size_t oop_size;
66 HeapWord* cur = bottom;
68 // Start filtering what we add to the remembered set. If the object is
69 // not considered dead, either because it is marked (in the mark bitmap)
70 // or it was allocated after marking finished, then we add it. Otherwise
71 // we can safely ignore the object.
72 if (!g1h->is_obj_dead(oop(cur), _hr)) {
73 oop_size = oop(cur)->oop_iterate(_rs_scan, mr);
74 } else {
75 oop_size = _hr->block_size(cur);
76 }
78 cur += oop_size;
80 if (cur < top) {
81 oop cur_oop = oop(cur);
82 oop_size = _hr->block_size(cur);
83 HeapWord* next_obj = cur + oop_size;
84 while (next_obj < top) {
85 // Keep filtering the remembered set.
86 if (!g1h->is_obj_dead(cur_oop, _hr)) {
87 // Bottom lies entirely below top, so we can call the
88 // non-memRegion version of oop_iterate below.
89 cur_oop->oop_iterate(_rs_scan);
90 }
91 cur = next_obj;
92 cur_oop = oop(cur);
93 oop_size = _hr->block_size(cur);
94 next_obj = cur + oop_size;
95 }
97 // Last object. Need to do dead-obj filtering here too.
98 if (!g1h->is_obj_dead(oop(cur), _hr)) {
99 oop(cur)->oop_iterate(_rs_scan, mr);
100 }
101 }
102 }
104 size_t HeapRegion::max_region_size() {
105 return HeapRegionBounds::max_size();
106 }
108 void HeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size) {
109 uintx region_size = G1HeapRegionSize;
110 if (FLAG_IS_DEFAULT(G1HeapRegionSize)) {
111 size_t average_heap_size = (initial_heap_size + max_heap_size) / 2;
112 region_size = MAX2(average_heap_size / HeapRegionBounds::target_number(),
113 (uintx) HeapRegionBounds::min_size());
114 }
116 int region_size_log = log2_long((jlong) region_size);
117 // Recalculate the region size to make sure it's a power of
118 // 2. This means that region_size is the largest power of 2 that's
119 // <= what we've calculated so far.
120 region_size = ((uintx)1 << region_size_log);
122 // Now make sure that we don't go over or under our limits.
123 if (region_size < HeapRegionBounds::min_size()) {
124 region_size = HeapRegionBounds::min_size();
125 } else if (region_size > HeapRegionBounds::max_size()) {
126 region_size = HeapRegionBounds::max_size();
127 }
129 // And recalculate the log.
130 region_size_log = log2_long((jlong) region_size);
132 // Now, set up the globals.
133 guarantee(LogOfHRGrainBytes == 0, "we should only set it once");
134 LogOfHRGrainBytes = region_size_log;
136 guarantee(LogOfHRGrainWords == 0, "we should only set it once");
137 LogOfHRGrainWords = LogOfHRGrainBytes - LogHeapWordSize;
139 guarantee(GrainBytes == 0, "we should only set it once");
140 // The cast to int is safe, given that we've bounded region_size by
141 // MIN_REGION_SIZE and MAX_REGION_SIZE.
142 GrainBytes = (size_t)region_size;
144 guarantee(GrainWords == 0, "we should only set it once");
145 GrainWords = GrainBytes >> LogHeapWordSize;
146 guarantee((size_t) 1 << LogOfHRGrainWords == GrainWords, "sanity");
148 guarantee(CardsPerRegion == 0, "we should only set it once");
149 CardsPerRegion = GrainBytes >> CardTableModRefBS::card_shift;
150 }
152 void HeapRegion::reset_after_compaction() {
153 G1OffsetTableContigSpace::reset_after_compaction();
154 // After a compaction the mark bitmap is invalid, so we must
155 // treat all objects as being inside the unmarked area.
156 zero_marked_bytes();
157 init_top_at_mark_start();
158 }
160 void HeapRegion::hr_clear(bool par, bool clear_space, bool locked) {
161 assert(_humongous_start_region == NULL,
162 "we should have already filtered out humongous regions");
163 assert(_end == _orig_end,
164 "we should have already filtered out humongous regions");
166 _in_collection_set = false;
168 set_allocation_context(AllocationContext::system());
169 set_young_index_in_cset(-1);
170 uninstall_surv_rate_group();
171 set_free();
172 reset_pre_dummy_top();
174 if (!par) {
175 // If this is parallel, this will be done later.
176 HeapRegionRemSet* hrrs = rem_set();
177 if (locked) {
178 hrrs->clear_locked();
179 } else {
180 hrrs->clear();
181 }
182 _claimed = InitialClaimValue;
183 }
184 zero_marked_bytes();
186 _offsets.resize(HeapRegion::GrainWords);
187 init_top_at_mark_start();
188 if (clear_space) clear(SpaceDecorator::Mangle);
189 }
191 void HeapRegion::par_clear() {
192 assert(used() == 0, "the region should have been already cleared");
193 assert(capacity() == HeapRegion::GrainBytes, "should be back to normal");
194 HeapRegionRemSet* hrrs = rem_set();
195 hrrs->clear();
196 CardTableModRefBS* ct_bs =
197 (CardTableModRefBS*)G1CollectedHeap::heap()->barrier_set();
198 ct_bs->clear(MemRegion(bottom(), end()));
199 }
201 void HeapRegion::calc_gc_efficiency() {
202 // GC efficiency is the ratio of how much space would be
203 // reclaimed over how long we predict it would take to reclaim it.
204 G1CollectedHeap* g1h = G1CollectedHeap::heap();
205 G1CollectorPolicy* g1p = g1h->g1_policy();
207 // Retrieve a prediction of the elapsed time for this region for
208 // a mixed gc because the region will only be evacuated during a
209 // mixed gc.
210 double region_elapsed_time_ms =
211 g1p->predict_region_elapsed_time_ms(this, false /* for_young_gc */);
212 _gc_efficiency = (double) reclaimable_bytes() / region_elapsed_time_ms;
213 }
215 void HeapRegion::set_free() {
216 report_region_type_change(G1HeapRegionTraceType::Free);
217 _type.set_free();
218 }
220 void HeapRegion::set_eden() {
221 report_region_type_change(G1HeapRegionTraceType::Eden);
222 _type.set_eden();
223 }
225 void HeapRegion::set_eden_pre_gc() {
226 report_region_type_change(G1HeapRegionTraceType::Eden);
227 _type.set_eden_pre_gc();
228 }
230 void HeapRegion::set_survivor() {
231 report_region_type_change(G1HeapRegionTraceType::Survivor);
232 _type.set_survivor();
233 }
235 void HeapRegion::set_old() {
236 report_region_type_change(G1HeapRegionTraceType::Old);
237 _type.set_old();
238 }
240 void HeapRegion::set_startsHumongous(HeapWord* new_top, HeapWord* new_end) {
241 assert(!isHumongous(), "sanity / pre-condition");
242 assert(end() == _orig_end,
243 "Should be normal before the humongous object allocation");
244 assert(top() == bottom(), "should be empty");
245 assert(bottom() <= new_top && new_top <= new_end, "pre-condition");
247 report_region_type_change(G1HeapRegionTraceType::StartsHumongous);
248 _type.set_starts_humongous();
249 _humongous_start_region = this;
251 set_end(new_end);
252 _offsets.set_for_starts_humongous(new_top);
253 }
255 void HeapRegion::set_continuesHumongous(HeapRegion* first_hr) {
256 assert(!isHumongous(), "sanity / pre-condition");
257 assert(end() == _orig_end,
258 "Should be normal before the humongous object allocation");
259 assert(top() == bottom(), "should be empty");
260 assert(first_hr->startsHumongous(), "pre-condition");
262 report_region_type_change(G1HeapRegionTraceType::ContinuesHumongous);
263 _type.set_continues_humongous();
264 _humongous_start_region = first_hr;
265 }
267 void HeapRegion::clear_humongous() {
268 assert(isHumongous(), "pre-condition");
270 if (startsHumongous()) {
271 assert(top() <= end(), "pre-condition");
272 set_end(_orig_end);
273 if (top() > end()) {
274 // at least one "continues humongous" region after it
275 set_top(end());
276 }
277 } else {
278 // continues humongous
279 assert(end() == _orig_end, "sanity");
280 }
282 assert(capacity() == HeapRegion::GrainBytes, "pre-condition");
283 _humongous_start_region = NULL;
284 }
286 bool HeapRegion::claimHeapRegion(jint claimValue) {
287 jint current = _claimed;
288 if (current != claimValue) {
289 jint res = Atomic::cmpxchg(claimValue, &_claimed, current);
290 if (res == current) {
291 return true;
292 }
293 }
294 return false;
295 }
297 HeapRegion::HeapRegion(uint hrm_index,
298 G1BlockOffsetSharedArray* sharedOffsetArray,
299 MemRegion mr) :
300 G1OffsetTableContigSpace(sharedOffsetArray, mr),
301 _hrm_index(hrm_index),
302 _allocation_context(AllocationContext::system()),
303 _humongous_start_region(NULL),
304 _in_collection_set(false),
305 _next_in_special_set(NULL), _orig_end(NULL),
306 _claimed(InitialClaimValue), _evacuation_failed(false),
307 _prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0),
308 _next_young_region(NULL),
309 _next_dirty_cards_region(NULL), _next(NULL), _prev(NULL),
310 #ifdef ASSERT
311 _containing_set(NULL),
312 #endif // ASSERT
313 _young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1),
314 _rem_set(NULL), _recorded_rs_length(0), _predicted_elapsed_time_ms(0),
315 _predicted_bytes_to_copy(0)
316 {
317 _rem_set = new HeapRegionRemSet(sharedOffsetArray, this);
318 assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant.");
320 initialize(mr);
321 }
323 void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
324 assert(_rem_set->is_empty(), "Remembered set must be empty");
326 G1OffsetTableContigSpace::initialize(mr, clear_space, mangle_space);
328 _orig_end = mr.end();
329 hr_clear(false /*par*/, false /*clear_space*/);
330 set_top(bottom());
331 record_timestamp();
332 }
334 void HeapRegion::report_region_type_change(G1HeapRegionTraceType::Type to) {
335 HeapRegionTracer::send_region_type_change(_hrm_index,
336 get_trace_type(),
337 to,
338 (uintptr_t)bottom(),
339 used());
340 }
342 CompactibleSpace* HeapRegion::next_compaction_space() const {
343 return G1CollectedHeap::heap()->next_compaction_region(this);
344 }
346 void HeapRegion::note_self_forwarding_removal_start(bool during_initial_mark,
347 bool during_conc_mark) {
348 // We always recreate the prev marking info and we'll explicitly
349 // mark all objects we find to be self-forwarded on the prev
350 // bitmap. So all objects need to be below PTAMS.
351 _prev_marked_bytes = 0;
353 if (during_initial_mark) {
354 // During initial-mark, we'll also explicitly mark all objects
355 // we find to be self-forwarded on the next bitmap. So all
356 // objects need to be below NTAMS.
357 _next_top_at_mark_start = top();
358 _next_marked_bytes = 0;
359 } else if (during_conc_mark) {
360 // During concurrent mark, all objects in the CSet (including
361 // the ones we find to be self-forwarded) are implicitly live.
362 // So all objects need to be above NTAMS.
363 _next_top_at_mark_start = bottom();
364 _next_marked_bytes = 0;
365 }
366 }
368 void HeapRegion::note_self_forwarding_removal_end(bool during_initial_mark,
369 bool during_conc_mark,
370 size_t marked_bytes) {
371 assert(0 <= marked_bytes && marked_bytes <= used(),
372 err_msg("marked: " SIZE_FORMAT " used: " SIZE_FORMAT,
373 marked_bytes, used()));
374 _prev_top_at_mark_start = top();
375 _prev_marked_bytes = marked_bytes;
376 }
378 HeapWord*
379 HeapRegion::object_iterate_mem_careful(MemRegion mr,
380 ObjectClosure* cl) {
381 G1CollectedHeap* g1h = G1CollectedHeap::heap();
382 // We used to use "block_start_careful" here. But we're actually happy
383 // to update the BOT while we do this...
384 HeapWord* cur = block_start(mr.start());
385 mr = mr.intersection(used_region());
386 if (mr.is_empty()) return NULL;
387 // Otherwise, find the obj that extends onto mr.start().
389 assert(cur <= mr.start()
390 && (oop(cur)->klass_or_null() == NULL ||
391 cur + oop(cur)->size() > mr.start()),
392 "postcondition of block_start");
393 oop obj;
394 while (cur < mr.end()) {
395 obj = oop(cur);
396 if (obj->klass_or_null() == NULL) {
397 // Ran into an unparseable point.
398 return cur;
399 } else if (!g1h->is_obj_dead(obj)) {
400 cl->do_object(obj);
401 }
402 if (cl->abort()) return cur;
403 // The check above must occur before the operation below, since an
404 // abort might invalidate the "size" operation.
405 cur += block_size(cur);
406 }
407 return NULL;
408 }
410 HeapWord*
411 HeapRegion::
412 oops_on_card_seq_iterate_careful(MemRegion mr,
413 FilterOutOfRegionClosure* cl,
414 bool filter_young,
415 jbyte* card_ptr) {
416 // Currently, we should only have to clean the card if filter_young
417 // is true and vice versa.
418 if (filter_young) {
419 assert(card_ptr != NULL, "pre-condition");
420 } else {
421 assert(card_ptr == NULL, "pre-condition");
422 }
423 G1CollectedHeap* g1h = G1CollectedHeap::heap();
425 // If we're within a stop-world GC, then we might look at a card in a
426 // GC alloc region that extends onto a GC LAB, which may not be
427 // parseable. Stop such at the "scan_top" of the region.
428 if (g1h->is_gc_active()) {
429 mr = mr.intersection(MemRegion(bottom(), scan_top()));
430 } else {
431 mr = mr.intersection(used_region());
432 }
433 if (mr.is_empty()) return NULL;
434 // Otherwise, find the obj that extends onto mr.start().
436 // The intersection of the incoming mr (for the card) and the
437 // allocated part of the region is non-empty. This implies that
438 // we have actually allocated into this region. The code in
439 // G1CollectedHeap.cpp that allocates a new region sets the
440 // is_young tag on the region before allocating. Thus we
441 // safely know if this region is young.
442 if (is_young() && filter_young) {
443 return NULL;
444 }
446 assert(!is_young(), "check value of filter_young");
448 // We can only clean the card here, after we make the decision that
449 // the card is not young. And we only clean the card if we have been
450 // asked to (i.e., card_ptr != NULL).
451 if (card_ptr != NULL) {
452 *card_ptr = CardTableModRefBS::clean_card_val();
453 // We must complete this write before we do any of the reads below.
454 OrderAccess::storeload();
455 }
457 // Cache the boundaries of the memory region in some const locals
458 HeapWord* const start = mr.start();
459 HeapWord* const end = mr.end();
461 // We used to use "block_start_careful" here. But we're actually happy
462 // to update the BOT while we do this...
463 HeapWord* cur = block_start(start);
464 assert(cur <= start, "Postcondition");
466 oop obj;
468 HeapWord* next = cur;
469 do {
470 cur = next;
471 obj = oop(cur);
472 if (obj->klass_or_null() == NULL) {
473 // Ran into an unparseable point.
474 return cur;
475 }
476 // Otherwise...
477 next = cur + block_size(cur);
478 } while (next <= start);
480 // If we finish the above loop...We have a parseable object that
481 // begins on or before the start of the memory region, and ends
482 // inside or spans the entire region.
483 assert(cur <= start, "Loop postcondition");
484 assert(obj->klass_or_null() != NULL, "Loop postcondition");
486 do {
487 obj = oop(cur);
488 assert((cur + block_size(cur)) > (HeapWord*)obj, "Loop invariant");
489 if (obj->klass_or_null() == NULL) {
490 // Ran into an unparseable point.
491 return cur;
492 }
494 // Advance the current pointer. "obj" still points to the object to iterate.
495 cur = cur + block_size(cur);
497 if (!g1h->is_obj_dead(obj)) {
498 // Non-objArrays are sometimes marked imprecise at the object start. We
499 // always need to iterate over them in full.
500 // We only iterate over object arrays in full if they are completely contained
501 // in the memory region.
502 if (!obj->is_objArray() || (((HeapWord*)obj) >= start && cur <= end)) {
503 obj->oop_iterate(cl);
504 } else {
505 obj->oop_iterate(cl, mr);
506 }
507 }
508 } while (cur < end);
510 return NULL;
511 }
513 // Code roots support
515 void HeapRegion::add_strong_code_root(nmethod* nm) {
516 HeapRegionRemSet* hrrs = rem_set();
517 hrrs->add_strong_code_root(nm);
518 }
520 void HeapRegion::add_strong_code_root_locked(nmethod* nm) {
521 assert_locked_or_safepoint(CodeCache_lock);
522 HeapRegionRemSet* hrrs = rem_set();
523 hrrs->add_strong_code_root_locked(nm);
524 }
526 void HeapRegion::remove_strong_code_root(nmethod* nm) {
527 HeapRegionRemSet* hrrs = rem_set();
528 hrrs->remove_strong_code_root(nm);
529 }
531 void HeapRegion::strong_code_roots_do(CodeBlobClosure* blk) const {
532 HeapRegionRemSet* hrrs = rem_set();
533 hrrs->strong_code_roots_do(blk);
534 }
536 class VerifyStrongCodeRootOopClosure: public OopClosure {
537 const HeapRegion* _hr;
538 nmethod* _nm;
539 bool _failures;
540 bool _has_oops_in_region;
542 template <class T> void do_oop_work(T* p) {
543 T heap_oop = oopDesc::load_heap_oop(p);
544 if (!oopDesc::is_null(heap_oop)) {
545 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
547 // Note: not all the oops embedded in the nmethod are in the
548 // current region. We only look at those which are.
549 if (_hr->is_in(obj)) {
550 // Object is in the region. Check that its less than top
551 if (_hr->top() <= (HeapWord*)obj) {
552 // Object is above top
553 gclog_or_tty->print_cr("Object " PTR_FORMAT " in region "
554 "[" PTR_FORMAT ", " PTR_FORMAT ") is above "
555 "top " PTR_FORMAT,
556 (void *)obj, _hr->bottom(), _hr->end(), _hr->top());
557 _failures = true;
558 return;
559 }
560 // Nmethod has at least one oop in the current region
561 _has_oops_in_region = true;
562 }
563 }
564 }
566 public:
567 VerifyStrongCodeRootOopClosure(const HeapRegion* hr, nmethod* nm):
568 _hr(hr), _failures(false), _has_oops_in_region(false) {}
570 void do_oop(narrowOop* p) { do_oop_work(p); }
571 void do_oop(oop* p) { do_oop_work(p); }
573 bool failures() { return _failures; }
574 bool has_oops_in_region() { return _has_oops_in_region; }
575 };
577 class VerifyStrongCodeRootCodeBlobClosure: public CodeBlobClosure {
578 const HeapRegion* _hr;
579 bool _failures;
580 public:
581 VerifyStrongCodeRootCodeBlobClosure(const HeapRegion* hr) :
582 _hr(hr), _failures(false) {}
584 void do_code_blob(CodeBlob* cb) {
585 nmethod* nm = (cb == NULL) ? NULL : cb->as_nmethod_or_null();
586 if (nm != NULL) {
587 // Verify that the nemthod is live
588 if (!nm->is_alive()) {
589 gclog_or_tty->print_cr("region [" PTR_FORMAT "," PTR_FORMAT "] has dead nmethod "
590 PTR_FORMAT " in its strong code roots",
591 _hr->bottom(), _hr->end(), nm);
592 _failures = true;
593 } else {
594 VerifyStrongCodeRootOopClosure oop_cl(_hr, nm);
595 nm->oops_do(&oop_cl);
596 if (!oop_cl.has_oops_in_region()) {
597 gclog_or_tty->print_cr("region [" PTR_FORMAT "," PTR_FORMAT "] has nmethod "
598 PTR_FORMAT " in its strong code roots "
599 "with no pointers into region",
600 _hr->bottom(), _hr->end(), nm);
601 _failures = true;
602 } else if (oop_cl.failures()) {
603 gclog_or_tty->print_cr("region [" PTR_FORMAT "," PTR_FORMAT "] has other "
604 "failures for nmethod " PTR_FORMAT,
605 _hr->bottom(), _hr->end(), nm);
606 _failures = true;
607 }
608 }
609 }
610 }
612 bool failures() { return _failures; }
613 };
615 void HeapRegion::verify_strong_code_roots(VerifyOption vo, bool* failures) const {
616 if (!G1VerifyHeapRegionCodeRoots) {
617 // We're not verifying code roots.
618 return;
619 }
620 if (vo == VerifyOption_G1UseMarkWord) {
621 // Marking verification during a full GC is performed after class
622 // unloading, code cache unloading, etc so the strong code roots
623 // attached to each heap region are in an inconsistent state. They won't
624 // be consistent until the strong code roots are rebuilt after the
625 // actual GC. Skip verifying the strong code roots in this particular
626 // time.
627 assert(VerifyDuringGC, "only way to get here");
628 return;
629 }
631 HeapRegionRemSet* hrrs = rem_set();
632 size_t strong_code_roots_length = hrrs->strong_code_roots_list_length();
634 // if this region is empty then there should be no entries
635 // on its strong code root list
636 if (is_empty()) {
637 if (strong_code_roots_length > 0) {
638 gclog_or_tty->print_cr("region [" PTR_FORMAT "," PTR_FORMAT "] is empty "
639 "but has " SIZE_FORMAT " code root entries",
640 bottom(), end(), strong_code_roots_length);
641 *failures = true;
642 }
643 return;
644 }
646 if (continuesHumongous()) {
647 if (strong_code_roots_length > 0) {
648 gclog_or_tty->print_cr("region " HR_FORMAT " is a continuation of a humongous "
649 "region but has " SIZE_FORMAT " code root entries",
650 HR_FORMAT_PARAMS(this), strong_code_roots_length);
651 *failures = true;
652 }
653 return;
654 }
656 VerifyStrongCodeRootCodeBlobClosure cb_cl(this);
657 strong_code_roots_do(&cb_cl);
659 if (cb_cl.failures()) {
660 *failures = true;
661 }
662 }
664 void HeapRegion::print() const { print_on(gclog_or_tty); }
665 void HeapRegion::print_on(outputStream* st) const {
666 st->print("AC%4u", allocation_context());
667 st->print(" %2s", get_short_type_str());
668 if (in_collection_set())
669 st->print(" CS");
670 else
671 st->print(" ");
672 st->print(" TS %5d", _gc_time_stamp);
673 st->print(" PTAMS " PTR_FORMAT " NTAMS " PTR_FORMAT,
674 prev_top_at_mark_start(), next_top_at_mark_start());
675 G1OffsetTableContigSpace::print_on(st);
676 }
678 class G1VerificationClosure : public OopClosure {
679 protected:
680 G1CollectedHeap* _g1h;
681 CardTableModRefBS* _bs;
682 oop _containing_obj;
683 bool _failures;
684 int _n_failures;
685 VerifyOption _vo;
686 public:
687 // _vo == UsePrevMarking -> use "prev" marking information,
688 // _vo == UseNextMarking -> use "next" marking information,
689 // _vo == UseMarkWord -> use mark word from object header.
690 G1VerificationClosure(G1CollectedHeap* g1h, VerifyOption vo) :
691 _g1h(g1h), _bs(NULL), _containing_obj(NULL),
692 _failures(false), _n_failures(0), _vo(vo)
693 {
694 BarrierSet* bs = _g1h->barrier_set();
695 if (bs->is_a(BarrierSet::CardTableModRef))
696 _bs = (CardTableModRefBS*)bs;
697 }
699 void set_containing_obj(oop obj) {
700 _containing_obj = obj;
701 }
703 bool failures() { return _failures; }
704 int n_failures() { return _n_failures; }
706 void print_object(outputStream* out, oop obj) {
707 #ifdef PRODUCT
708 Klass* k = obj->klass();
709 const char* class_name = InstanceKlass::cast(k)->external_name();
710 out->print_cr("class name %s", class_name);
711 #else // PRODUCT
712 obj->print_on(out);
713 #endif // PRODUCT
714 }
715 };
717 class VerifyLiveClosure : public G1VerificationClosure {
718 public:
719 VerifyLiveClosure(G1CollectedHeap* g1h, VerifyOption vo) : G1VerificationClosure(g1h, vo) {}
720 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
721 virtual void do_oop(oop* p) { do_oop_work(p); }
723 template <class T>
724 void do_oop_work(T* p) {
725 assert(_containing_obj != NULL, "Precondition");
726 assert(!_g1h->is_obj_dead_cond(_containing_obj, _vo),
727 "Precondition");
728 verify_liveness(p);
729 }
731 template <class T>
732 void verify_liveness(T* p) {
733 T heap_oop = oopDesc::load_heap_oop(p);
734 if (!oopDesc::is_null(heap_oop)) {
735 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
736 bool failed = false;
737 if (!_g1h->is_in_closed_subset(obj) || _g1h->is_obj_dead_cond(obj, _vo)) {
738 MutexLockerEx x(ParGCRareEvent_lock,
739 Mutex::_no_safepoint_check_flag);
741 if (!_failures) {
742 gclog_or_tty->cr();
743 gclog_or_tty->print_cr("----------");
744 }
745 if (!_g1h->is_in_closed_subset(obj)) {
746 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
747 gclog_or_tty->print_cr("Field " PTR_FORMAT
748 " of live obj " PTR_FORMAT " in region "
749 "[" PTR_FORMAT ", " PTR_FORMAT ")",
750 p, (void*) _containing_obj,
751 from->bottom(), from->end());
752 print_object(gclog_or_tty, _containing_obj);
753 gclog_or_tty->print_cr("points to obj " PTR_FORMAT " not in the heap",
754 (void*) obj);
755 } else {
756 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
757 HeapRegion* to = _g1h->heap_region_containing((HeapWord*)obj);
758 gclog_or_tty->print_cr("Field " PTR_FORMAT
759 " of live obj " PTR_FORMAT " in region "
760 "[" PTR_FORMAT ", " PTR_FORMAT ")",
761 p, (void*) _containing_obj,
762 from->bottom(), from->end());
763 print_object(gclog_or_tty, _containing_obj);
764 gclog_or_tty->print_cr("points to dead obj " PTR_FORMAT " in region "
765 "[" PTR_FORMAT ", " PTR_FORMAT ")",
766 (void*) obj, to->bottom(), to->end());
767 print_object(gclog_or_tty, obj);
768 }
769 gclog_or_tty->print_cr("----------");
770 gclog_or_tty->flush();
771 _failures = true;
772 failed = true;
773 _n_failures++;
774 }
775 }
776 }
777 };
779 class VerifyRemSetClosure : public G1VerificationClosure {
780 public:
781 VerifyRemSetClosure(G1CollectedHeap* g1h, VerifyOption vo) : G1VerificationClosure(g1h, vo) {}
782 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
783 virtual void do_oop(oop* p) { do_oop_work(p); }
785 template <class T>
786 void do_oop_work(T* p) {
787 assert(_containing_obj != NULL, "Precondition");
788 assert(!_g1h->is_obj_dead_cond(_containing_obj, _vo),
789 "Precondition");
790 verify_remembered_set(p);
791 }
793 template <class T>
794 void verify_remembered_set(T* p) {
795 T heap_oop = oopDesc::load_heap_oop(p);
796 if (!oopDesc::is_null(heap_oop)) {
797 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
798 bool failed = false;
799 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
800 HeapRegion* to = _g1h->heap_region_containing(obj);
801 if (from != NULL && to != NULL &&
802 from != to &&
803 !to->isHumongous()) {
804 jbyte cv_obj = *_bs->byte_for_const(_containing_obj);
805 jbyte cv_field = *_bs->byte_for_const(p);
806 const jbyte dirty = CardTableModRefBS::dirty_card_val();
808 bool is_bad = !(from->is_young()
809 || to->rem_set()->contains_reference(p)
810 || !G1HRRSFlushLogBuffersOnVerify && // buffers were not flushed
811 (_containing_obj->is_objArray() ?
812 cv_field == dirty
813 : cv_obj == dirty || cv_field == dirty));
814 if (is_bad) {
815 MutexLockerEx x(ParGCRareEvent_lock,
816 Mutex::_no_safepoint_check_flag);
818 if (!_failures) {
819 gclog_or_tty->cr();
820 gclog_or_tty->print_cr("----------");
821 }
822 gclog_or_tty->print_cr("Missing rem set entry:");
823 gclog_or_tty->print_cr("Field " PTR_FORMAT " "
824 "of obj " PTR_FORMAT ", "
825 "in region " HR_FORMAT,
826 p, (void*) _containing_obj,
827 HR_FORMAT_PARAMS(from));
828 _containing_obj->print_on(gclog_or_tty);
829 gclog_or_tty->print_cr("points to obj " PTR_FORMAT " "
830 "in region " HR_FORMAT,
831 (void*) obj,
832 HR_FORMAT_PARAMS(to));
833 if (obj->is_oop()) {
834 obj->print_on(gclog_or_tty);
835 }
836 gclog_or_tty->print_cr("Obj head CTE = %d, field CTE = %d.",
837 cv_obj, cv_field);
838 gclog_or_tty->print_cr("----------");
839 gclog_or_tty->flush();
840 _failures = true;
841 if (!failed) _n_failures++;
842 }
843 }
844 }
845 }
846 };
848 // This really ought to be commoned up into OffsetTableContigSpace somehow.
849 // We would need a mechanism to make that code skip dead objects.
851 void HeapRegion::verify(VerifyOption vo,
852 bool* failures) const {
853 G1CollectedHeap* g1 = G1CollectedHeap::heap();
854 *failures = false;
855 HeapWord* p = bottom();
856 HeapWord* prev_p = NULL;
857 VerifyLiveClosure vl_cl(g1, vo);
858 VerifyRemSetClosure vr_cl(g1, vo);
859 bool is_humongous = isHumongous();
860 bool do_bot_verify = !is_young();
861 size_t object_num = 0;
862 while (p < top()) {
863 oop obj = oop(p);
864 size_t obj_size = block_size(p);
865 object_num += 1;
867 if (is_humongous != g1->isHumongous(obj_size) &&
868 !g1->is_obj_dead(obj, this)) { // Dead objects may have bigger block_size since they span several objects.
869 gclog_or_tty->print_cr("obj " PTR_FORMAT " is of %shumongous size ("
870 SIZE_FORMAT " words) in a %shumongous region",
871 p, g1->isHumongous(obj_size) ? "" : "non-",
872 obj_size, is_humongous ? "" : "non-");
873 *failures = true;
874 return;
875 }
877 // If it returns false, verify_for_object() will output the
878 // appropriate message.
879 if (do_bot_verify &&
880 !g1->is_obj_dead(obj, this) &&
881 !_offsets.verify_for_object(p, obj_size)) {
882 *failures = true;
883 return;
884 }
886 if (!g1->is_obj_dead_cond(obj, this, vo)) {
887 if (obj->is_oop()) {
888 Klass* klass = obj->klass();
889 bool is_metaspace_object = Metaspace::contains(klass) ||
890 (vo == VerifyOption_G1UsePrevMarking &&
891 ClassLoaderDataGraph::unload_list_contains(klass));
892 if (!is_metaspace_object) {
893 gclog_or_tty->print_cr("klass " PTR_FORMAT " of object " PTR_FORMAT " "
894 "not metadata", klass, (void *)obj);
895 *failures = true;
896 return;
897 } else if (!klass->is_klass()) {
898 gclog_or_tty->print_cr("klass " PTR_FORMAT " of object " PTR_FORMAT " "
899 "not a klass", klass, (void *)obj);
900 *failures = true;
901 return;
902 } else {
903 vl_cl.set_containing_obj(obj);
904 if (!g1->full_collection() || G1VerifyRSetsDuringFullGC) {
905 // verify liveness and rem_set
906 vr_cl.set_containing_obj(obj);
907 G1Mux2Closure mux(&vl_cl, &vr_cl);
908 obj->oop_iterate_no_header(&mux);
910 if (vr_cl.failures()) {
911 *failures = true;
912 }
913 if (G1MaxVerifyFailures >= 0 &&
914 vr_cl.n_failures() >= G1MaxVerifyFailures) {
915 return;
916 }
917 } else {
918 // verify only liveness
919 obj->oop_iterate_no_header(&vl_cl);
920 }
921 if (vl_cl.failures()) {
922 *failures = true;
923 }
924 if (G1MaxVerifyFailures >= 0 &&
925 vl_cl.n_failures() >= G1MaxVerifyFailures) {
926 return;
927 }
928 }
929 } else {
930 gclog_or_tty->print_cr(PTR_FORMAT " not an oop", (void *)obj);
931 *failures = true;
932 return;
933 }
934 }
935 prev_p = p;
936 p += obj_size;
937 }
939 if (p != top()) {
940 gclog_or_tty->print_cr("end of last object " PTR_FORMAT " "
941 "does not match top " PTR_FORMAT, p, top());
942 *failures = true;
943 return;
944 }
946 HeapWord* the_end = end();
947 assert(p == top(), "it should still hold");
948 // Do some extra BOT consistency checking for addresses in the
949 // range [top, end). BOT look-ups in this range should yield
950 // top. No point in doing that if top == end (there's nothing there).
951 if (p < the_end) {
952 // Look up top
953 HeapWord* addr_1 = p;
954 HeapWord* b_start_1 = _offsets.block_start_const(addr_1);
955 if (b_start_1 != p) {
956 gclog_or_tty->print_cr("BOT look up for top: " PTR_FORMAT " "
957 " yielded " PTR_FORMAT ", expecting " PTR_FORMAT,
958 addr_1, b_start_1, p);
959 *failures = true;
960 return;
961 }
963 // Look up top + 1
964 HeapWord* addr_2 = p + 1;
965 if (addr_2 < the_end) {
966 HeapWord* b_start_2 = _offsets.block_start_const(addr_2);
967 if (b_start_2 != p) {
968 gclog_or_tty->print_cr("BOT look up for top + 1: " PTR_FORMAT " "
969 " yielded " PTR_FORMAT ", expecting " PTR_FORMAT,
970 addr_2, b_start_2, p);
971 *failures = true;
972 return;
973 }
974 }
976 // Look up an address between top and end
977 size_t diff = pointer_delta(the_end, p) / 2;
978 HeapWord* addr_3 = p + diff;
979 if (addr_3 < the_end) {
980 HeapWord* b_start_3 = _offsets.block_start_const(addr_3);
981 if (b_start_3 != p) {
982 gclog_or_tty->print_cr("BOT look up for top + diff: " PTR_FORMAT " "
983 " yielded " PTR_FORMAT ", expecting " PTR_FORMAT,
984 addr_3, b_start_3, p);
985 *failures = true;
986 return;
987 }
988 }
990 // Loook up end - 1
991 HeapWord* addr_4 = the_end - 1;
992 HeapWord* b_start_4 = _offsets.block_start_const(addr_4);
993 if (b_start_4 != p) {
994 gclog_or_tty->print_cr("BOT look up for end - 1: " PTR_FORMAT " "
995 " yielded " PTR_FORMAT ", expecting " PTR_FORMAT,
996 addr_4, b_start_4, p);
997 *failures = true;
998 return;
999 }
1000 }
1002 if (is_humongous && object_num > 1) {
1003 gclog_or_tty->print_cr("region [" PTR_FORMAT "," PTR_FORMAT "] is humongous "
1004 "but has " SIZE_FORMAT ", objects",
1005 bottom(), end(), object_num);
1006 *failures = true;
1007 return;
1008 }
1010 verify_strong_code_roots(vo, failures);
1011 }
1013 void HeapRegion::verify() const {
1014 bool dummy = false;
1015 verify(VerifyOption_G1UsePrevMarking, /* failures */ &dummy);
1016 }
1018 void HeapRegion::verify_rem_set(VerifyOption vo, bool* failures) const {
1019 G1CollectedHeap* g1 = G1CollectedHeap::heap();
1020 *failures = false;
1021 HeapWord* p = bottom();
1022 HeapWord* prev_p = NULL;
1023 VerifyRemSetClosure vr_cl(g1, vo);
1024 while (p < top()) {
1025 oop obj = oop(p);
1026 size_t obj_size = block_size(p);
1028 if (!g1->is_obj_dead_cond(obj, this, vo)) {
1029 if (obj->is_oop()) {
1030 vr_cl.set_containing_obj(obj);
1031 obj->oop_iterate_no_header(&vr_cl);
1033 if (vr_cl.failures()) {
1034 *failures = true;
1035 }
1036 if (G1MaxVerifyFailures >= 0 &&
1037 vr_cl.n_failures() >= G1MaxVerifyFailures) {
1038 return;
1039 }
1040 } else {
1041 gclog_or_tty->print_cr(PTR_FORMAT " not an oop", p2i(obj));
1042 *failures = true;
1043 return;
1044 }
1045 }
1047 prev_p = p;
1048 p += obj_size;
1049 }
1050 }
1052 void HeapRegion::verify_rem_set() const {
1053 bool failures = false;
1054 verify_rem_set(VerifyOption_G1UsePrevMarking, &failures);
1055 guarantee(!failures, "HeapRegion RemSet verification failed");
1056 }
1058 // G1OffsetTableContigSpace code; copied from space.cpp. Hope this can go
1059 // away eventually.
1061 void G1OffsetTableContigSpace::clear(bool mangle_space) {
1062 set_top(bottom());
1063 _scan_top = bottom();
1064 CompactibleSpace::clear(mangle_space);
1065 reset_bot();
1066 }
1068 void G1OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) {
1069 Space::set_bottom(new_bottom);
1070 _offsets.set_bottom(new_bottom);
1071 }
1073 void G1OffsetTableContigSpace::set_end(HeapWord* new_end) {
1074 Space::set_end(new_end);
1075 _offsets.resize(new_end - bottom());
1076 }
1078 void G1OffsetTableContigSpace::print() const {
1079 print_short();
1080 gclog_or_tty->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", "
1081 INTPTR_FORMAT ", " INTPTR_FORMAT ")",
1082 bottom(), top(), _offsets.threshold(), end());
1083 }
1085 HeapWord* G1OffsetTableContigSpace::initialize_threshold() {
1086 return _offsets.initialize_threshold();
1087 }
1089 HeapWord* G1OffsetTableContigSpace::cross_threshold(HeapWord* start,
1090 HeapWord* end) {
1091 _offsets.alloc_block(start, end);
1092 return _offsets.threshold();
1093 }
1095 HeapWord* G1OffsetTableContigSpace::scan_top() const {
1096 G1CollectedHeap* g1h = G1CollectedHeap::heap();
1097 HeapWord* local_top = top();
1098 OrderAccess::loadload();
1099 const unsigned local_time_stamp = _gc_time_stamp;
1100 assert(local_time_stamp <= g1h->get_gc_time_stamp(), "invariant");
1101 if (local_time_stamp < g1h->get_gc_time_stamp()) {
1102 return local_top;
1103 } else {
1104 return _scan_top;
1105 }
1106 }
1108 void G1OffsetTableContigSpace::record_timestamp() {
1109 G1CollectedHeap* g1h = G1CollectedHeap::heap();
1110 unsigned curr_gc_time_stamp = g1h->get_gc_time_stamp();
1112 if (_gc_time_stamp < curr_gc_time_stamp) {
1113 // Setting the time stamp here tells concurrent readers to look at
1114 // scan_top to know the maximum allowed address to look at.
1116 // scan_top should be bottom for all regions except for the
1117 // retained old alloc region which should have scan_top == top
1118 HeapWord* st = _scan_top;
1119 guarantee(st == _bottom || st == _top, "invariant");
1121 _gc_time_stamp = curr_gc_time_stamp;
1122 }
1123 }
1125 void G1OffsetTableContigSpace::record_retained_region() {
1126 // scan_top is the maximum address where it's safe for the next gc to
1127 // scan this region.
1128 _scan_top = top();
1129 }
1131 void G1OffsetTableContigSpace::safe_object_iterate(ObjectClosure* blk) {
1132 object_iterate(blk);
1133 }
1135 void G1OffsetTableContigSpace::object_iterate(ObjectClosure* blk) {
1136 HeapWord* p = bottom();
1137 while (p < top()) {
1138 if (block_is_obj(p)) {
1139 blk->do_object(oop(p));
1140 }
1141 p += block_size(p);
1142 }
1143 }
1145 #define block_is_always_obj(q) true
1146 void G1OffsetTableContigSpace::prepare_for_compaction(CompactPoint* cp) {
1147 SCAN_AND_FORWARD(cp, top, block_is_always_obj, block_size);
1148 }
1149 #undef block_is_always_obj
1151 G1OffsetTableContigSpace::
1152 G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
1153 MemRegion mr) :
1154 _offsets(sharedOffsetArray, mr),
1155 _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true),
1156 _gc_time_stamp(0)
1157 {
1158 _offsets.set_space(this);
1159 }
1161 void G1OffsetTableContigSpace::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
1162 CompactibleSpace::initialize(mr, clear_space, mangle_space);
1163 _top = bottom();
1164 _scan_top = bottom();
1165 set_saved_mark_word(NULL);
1166 reset_bot();
1167 }