Wed, 18 Apr 2012 07:21:15 -0400
7157073: G1: type change size_t -> uint for region counts / indexes
Summary: Change the type of fields / variables / etc. that represent region counts and indeces from size_t to uint.
Reviewed-by: iveresov, brutisso, jmasa, jwilhelm
1 /*
2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
27 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
28 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
29 #include "gc_implementation/g1/heapRegion.inline.hpp"
30 #include "gc_implementation/g1/heapRegionRemSet.hpp"
31 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
32 #include "memory/genOopClosures.inline.hpp"
33 #include "memory/iterator.hpp"
34 #include "oops/oop.inline.hpp"
36 int HeapRegion::LogOfHRGrainBytes = 0;
37 int HeapRegion::LogOfHRGrainWords = 0;
38 size_t HeapRegion::GrainBytes = 0;
39 size_t HeapRegion::GrainWords = 0;
40 size_t HeapRegion::CardsPerRegion = 0;
42 HeapRegionDCTOC::HeapRegionDCTOC(G1CollectedHeap* g1,
43 HeapRegion* hr, OopClosure* cl,
44 CardTableModRefBS::PrecisionStyle precision,
45 FilterKind fk) :
46 ContiguousSpaceDCTOC(hr, cl, precision, NULL),
47 _hr(hr), _fk(fk), _g1(g1)
48 { }
50 FilterOutOfRegionClosure::FilterOutOfRegionClosure(HeapRegion* r,
51 OopClosure* oc) :
52 _r_bottom(r->bottom()), _r_end(r->end()),
53 _oc(oc), _out_of_region(0)
54 {}
56 class VerifyLiveClosure: public OopClosure {
57 private:
58 G1CollectedHeap* _g1h;
59 CardTableModRefBS* _bs;
60 oop _containing_obj;
61 bool _failures;
62 int _n_failures;
63 VerifyOption _vo;
64 public:
65 // _vo == UsePrevMarking -> use "prev" marking information,
66 // _vo == UseNextMarking -> use "next" marking information,
67 // _vo == UseMarkWord -> use mark word from object header.
68 VerifyLiveClosure(G1CollectedHeap* g1h, VerifyOption vo) :
69 _g1h(g1h), _bs(NULL), _containing_obj(NULL),
70 _failures(false), _n_failures(0), _vo(vo)
71 {
72 BarrierSet* bs = _g1h->barrier_set();
73 if (bs->is_a(BarrierSet::CardTableModRef))
74 _bs = (CardTableModRefBS*)bs;
75 }
77 void set_containing_obj(oop obj) {
78 _containing_obj = obj;
79 }
81 bool failures() { return _failures; }
82 int n_failures() { return _n_failures; }
84 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
85 virtual void do_oop( oop* p) { do_oop_work(p); }
87 void print_object(outputStream* out, oop obj) {
88 #ifdef PRODUCT
89 klassOop k = obj->klass();
90 const char* class_name = instanceKlass::cast(k)->external_name();
91 out->print_cr("class name %s", class_name);
92 #else // PRODUCT
93 obj->print_on(out);
94 #endif // PRODUCT
95 }
97 template <class T>
98 void do_oop_work(T* p) {
99 assert(_containing_obj != NULL, "Precondition");
100 assert(!_g1h->is_obj_dead_cond(_containing_obj, _vo),
101 "Precondition");
102 T heap_oop = oopDesc::load_heap_oop(p);
103 if (!oopDesc::is_null(heap_oop)) {
104 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
105 bool failed = false;
106 if (!_g1h->is_in_closed_subset(obj) || _g1h->is_obj_dead_cond(obj, _vo)) {
107 MutexLockerEx x(ParGCRareEvent_lock,
108 Mutex::_no_safepoint_check_flag);
110 if (!_failures) {
111 gclog_or_tty->print_cr("");
112 gclog_or_tty->print_cr("----------");
113 }
114 if (!_g1h->is_in_closed_subset(obj)) {
115 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
116 gclog_or_tty->print_cr("Field "PTR_FORMAT
117 " of live obj "PTR_FORMAT" in region "
118 "["PTR_FORMAT", "PTR_FORMAT")",
119 p, (void*) _containing_obj,
120 from->bottom(), from->end());
121 print_object(gclog_or_tty, _containing_obj);
122 gclog_or_tty->print_cr("points to obj "PTR_FORMAT" not in the heap",
123 (void*) obj);
124 } else {
125 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
126 HeapRegion* to = _g1h->heap_region_containing((HeapWord*)obj);
127 gclog_or_tty->print_cr("Field "PTR_FORMAT
128 " of live obj "PTR_FORMAT" in region "
129 "["PTR_FORMAT", "PTR_FORMAT")",
130 p, (void*) _containing_obj,
131 from->bottom(), from->end());
132 print_object(gclog_or_tty, _containing_obj);
133 gclog_or_tty->print_cr("points to dead obj "PTR_FORMAT" in region "
134 "["PTR_FORMAT", "PTR_FORMAT")",
135 (void*) obj, to->bottom(), to->end());
136 print_object(gclog_or_tty, obj);
137 }
138 gclog_or_tty->print_cr("----------");
139 gclog_or_tty->flush();
140 _failures = true;
141 failed = true;
142 _n_failures++;
143 }
145 if (!_g1h->full_collection()) {
146 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
147 HeapRegion* to = _g1h->heap_region_containing(obj);
148 if (from != NULL && to != NULL &&
149 from != to &&
150 !to->isHumongous()) {
151 jbyte cv_obj = *_bs->byte_for_const(_containing_obj);
152 jbyte cv_field = *_bs->byte_for_const(p);
153 const jbyte dirty = CardTableModRefBS::dirty_card_val();
155 bool is_bad = !(from->is_young()
156 || to->rem_set()->contains_reference(p)
157 || !G1HRRSFlushLogBuffersOnVerify && // buffers were not flushed
158 (_containing_obj->is_objArray() ?
159 cv_field == dirty
160 : cv_obj == dirty || cv_field == dirty));
161 if (is_bad) {
162 MutexLockerEx x(ParGCRareEvent_lock,
163 Mutex::_no_safepoint_check_flag);
165 if (!_failures) {
166 gclog_or_tty->print_cr("");
167 gclog_or_tty->print_cr("----------");
168 }
169 gclog_or_tty->print_cr("Missing rem set entry:");
170 gclog_or_tty->print_cr("Field "PTR_FORMAT" "
171 "of obj "PTR_FORMAT", "
172 "in region "HR_FORMAT,
173 p, (void*) _containing_obj,
174 HR_FORMAT_PARAMS(from));
175 _containing_obj->print_on(gclog_or_tty);
176 gclog_or_tty->print_cr("points to obj "PTR_FORMAT" "
177 "in region "HR_FORMAT,
178 (void*) obj,
179 HR_FORMAT_PARAMS(to));
180 obj->print_on(gclog_or_tty);
181 gclog_or_tty->print_cr("Obj head CTE = %d, field CTE = %d.",
182 cv_obj, cv_field);
183 gclog_or_tty->print_cr("----------");
184 gclog_or_tty->flush();
185 _failures = true;
186 if (!failed) _n_failures++;
187 }
188 }
189 }
190 }
191 }
192 };
194 template<class ClosureType>
195 HeapWord* walk_mem_region_loop(ClosureType* cl, G1CollectedHeap* g1h,
196 HeapRegion* hr,
197 HeapWord* cur, HeapWord* top) {
198 oop cur_oop = oop(cur);
199 int oop_size = cur_oop->size();
200 HeapWord* next_obj = cur + oop_size;
201 while (next_obj < top) {
202 // Keep filtering the remembered set.
203 if (!g1h->is_obj_dead(cur_oop, hr)) {
204 // Bottom lies entirely below top, so we can call the
205 // non-memRegion version of oop_iterate below.
206 cur_oop->oop_iterate(cl);
207 }
208 cur = next_obj;
209 cur_oop = oop(cur);
210 oop_size = cur_oop->size();
211 next_obj = cur + oop_size;
212 }
213 return cur;
214 }
216 void HeapRegionDCTOC::walk_mem_region_with_cl(MemRegion mr,
217 HeapWord* bottom,
218 HeapWord* top,
219 OopClosure* cl) {
220 G1CollectedHeap* g1h = _g1;
221 int oop_size;
222 OopClosure* cl2 = NULL;
224 FilterIntoCSClosure intoCSFilt(this, g1h, cl);
225 FilterOutOfRegionClosure outOfRegionFilt(_hr, cl);
227 switch (_fk) {
228 case NoFilterKind: cl2 = cl; break;
229 case IntoCSFilterKind: cl2 = &intoCSFilt; break;
230 case OutOfRegionFilterKind: cl2 = &outOfRegionFilt; break;
231 default: ShouldNotReachHere();
232 }
234 // Start filtering what we add to the remembered set. If the object is
235 // not considered dead, either because it is marked (in the mark bitmap)
236 // or it was allocated after marking finished, then we add it. Otherwise
237 // we can safely ignore the object.
238 if (!g1h->is_obj_dead(oop(bottom), _hr)) {
239 oop_size = oop(bottom)->oop_iterate(cl2, mr);
240 } else {
241 oop_size = oop(bottom)->size();
242 }
244 bottom += oop_size;
246 if (bottom < top) {
247 // We replicate the loop below for several kinds of possible filters.
248 switch (_fk) {
249 case NoFilterKind:
250 bottom = walk_mem_region_loop(cl, g1h, _hr, bottom, top);
251 break;
253 case IntoCSFilterKind: {
254 FilterIntoCSClosure filt(this, g1h, cl);
255 bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top);
256 break;
257 }
259 case OutOfRegionFilterKind: {
260 FilterOutOfRegionClosure filt(_hr, cl);
261 bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top);
262 break;
263 }
265 default:
266 ShouldNotReachHere();
267 }
269 // Last object. Need to do dead-obj filtering here too.
270 if (!g1h->is_obj_dead(oop(bottom), _hr)) {
271 oop(bottom)->oop_iterate(cl2, mr);
272 }
273 }
274 }
276 // Minimum region size; we won't go lower than that.
277 // We might want to decrease this in the future, to deal with small
278 // heaps a bit more efficiently.
279 #define MIN_REGION_SIZE ( 1024 * 1024 )
281 // Maximum region size; we don't go higher than that. There's a good
282 // reason for having an upper bound. We don't want regions to get too
283 // large, otherwise cleanup's effectiveness would decrease as there
284 // will be fewer opportunities to find totally empty regions after
285 // marking.
286 #define MAX_REGION_SIZE ( 32 * 1024 * 1024 )
288 // The automatic region size calculation will try to have around this
289 // many regions in the heap (based on the min heap size).
290 #define TARGET_REGION_NUMBER 2048
292 void HeapRegion::setup_heap_region_size(uintx min_heap_size) {
293 // region_size in bytes
294 uintx region_size = G1HeapRegionSize;
295 if (FLAG_IS_DEFAULT(G1HeapRegionSize)) {
296 // We base the automatic calculation on the min heap size. This
297 // can be problematic if the spread between min and max is quite
298 // wide, imagine -Xms128m -Xmx32g. But, if we decided it based on
299 // the max size, the region size might be way too large for the
300 // min size. Either way, some users might have to set the region
301 // size manually for some -Xms / -Xmx combos.
303 region_size = MAX2(min_heap_size / TARGET_REGION_NUMBER,
304 (uintx) MIN_REGION_SIZE);
305 }
307 int region_size_log = log2_long((jlong) region_size);
308 // Recalculate the region size to make sure it's a power of
309 // 2. This means that region_size is the largest power of 2 that's
310 // <= what we've calculated so far.
311 region_size = ((uintx)1 << region_size_log);
313 // Now make sure that we don't go over or under our limits.
314 if (region_size < MIN_REGION_SIZE) {
315 region_size = MIN_REGION_SIZE;
316 } else if (region_size > MAX_REGION_SIZE) {
317 region_size = MAX_REGION_SIZE;
318 }
320 // And recalculate the log.
321 region_size_log = log2_long((jlong) region_size);
323 // Now, set up the globals.
324 guarantee(LogOfHRGrainBytes == 0, "we should only set it once");
325 LogOfHRGrainBytes = region_size_log;
327 guarantee(LogOfHRGrainWords == 0, "we should only set it once");
328 LogOfHRGrainWords = LogOfHRGrainBytes - LogHeapWordSize;
330 guarantee(GrainBytes == 0, "we should only set it once");
331 // The cast to int is safe, given that we've bounded region_size by
332 // MIN_REGION_SIZE and MAX_REGION_SIZE.
333 GrainBytes = (size_t)region_size;
335 guarantee(GrainWords == 0, "we should only set it once");
336 GrainWords = GrainBytes >> LogHeapWordSize;
337 guarantee((size_t) 1 << LogOfHRGrainWords == GrainWords, "sanity");
339 guarantee(CardsPerRegion == 0, "we should only set it once");
340 CardsPerRegion = GrainBytes >> CardTableModRefBS::card_shift;
341 }
343 void HeapRegion::reset_after_compaction() {
344 G1OffsetTableContigSpace::reset_after_compaction();
345 // After a compaction the mark bitmap is invalid, so we must
346 // treat all objects as being inside the unmarked area.
347 zero_marked_bytes();
348 init_top_at_mark_start();
349 }
351 void HeapRegion::hr_clear(bool par, bool clear_space) {
352 assert(_humongous_type == NotHumongous,
353 "we should have already filtered out humongous regions");
354 assert(_humongous_start_region == NULL,
355 "we should have already filtered out humongous regions");
356 assert(_end == _orig_end,
357 "we should have already filtered out humongous regions");
359 _in_collection_set = false;
361 set_young_index_in_cset(-1);
362 uninstall_surv_rate_group();
363 set_young_type(NotYoung);
364 reset_pre_dummy_top();
366 if (!par) {
367 // If this is parallel, this will be done later.
368 HeapRegionRemSet* hrrs = rem_set();
369 if (hrrs != NULL) hrrs->clear();
370 _claimed = InitialClaimValue;
371 }
372 zero_marked_bytes();
373 set_sort_index(-1);
375 _offsets.resize(HeapRegion::GrainWords);
376 init_top_at_mark_start();
377 if (clear_space) clear(SpaceDecorator::Mangle);
378 }
380 void HeapRegion::par_clear() {
381 assert(used() == 0, "the region should have been already cleared");
382 assert(capacity() == HeapRegion::GrainBytes, "should be back to normal");
383 HeapRegionRemSet* hrrs = rem_set();
384 hrrs->clear();
385 CardTableModRefBS* ct_bs =
386 (CardTableModRefBS*)G1CollectedHeap::heap()->barrier_set();
387 ct_bs->clear(MemRegion(bottom(), end()));
388 }
390 void HeapRegion::calc_gc_efficiency() {
391 G1CollectedHeap* g1h = G1CollectedHeap::heap();
392 G1CollectorPolicy* g1p = g1h->g1_policy();
393 _gc_efficiency = (double) reclaimable_bytes() /
394 g1p->predict_region_elapsed_time_ms(this, false);
395 }
397 void HeapRegion::set_startsHumongous(HeapWord* new_top, HeapWord* new_end) {
398 assert(!isHumongous(), "sanity / pre-condition");
399 assert(end() == _orig_end,
400 "Should be normal before the humongous object allocation");
401 assert(top() == bottom(), "should be empty");
402 assert(bottom() <= new_top && new_top <= new_end, "pre-condition");
404 _humongous_type = StartsHumongous;
405 _humongous_start_region = this;
407 set_end(new_end);
408 _offsets.set_for_starts_humongous(new_top);
409 }
411 void HeapRegion::set_continuesHumongous(HeapRegion* first_hr) {
412 assert(!isHumongous(), "sanity / pre-condition");
413 assert(end() == _orig_end,
414 "Should be normal before the humongous object allocation");
415 assert(top() == bottom(), "should be empty");
416 assert(first_hr->startsHumongous(), "pre-condition");
418 _humongous_type = ContinuesHumongous;
419 _humongous_start_region = first_hr;
420 }
422 void HeapRegion::set_notHumongous() {
423 assert(isHumongous(), "pre-condition");
425 if (startsHumongous()) {
426 assert(top() <= end(), "pre-condition");
427 set_end(_orig_end);
428 if (top() > end()) {
429 // at least one "continues humongous" region after it
430 set_top(end());
431 }
432 } else {
433 // continues humongous
434 assert(end() == _orig_end, "sanity");
435 }
437 assert(capacity() == HeapRegion::GrainBytes, "pre-condition");
438 _humongous_type = NotHumongous;
439 _humongous_start_region = NULL;
440 }
442 bool HeapRegion::claimHeapRegion(jint claimValue) {
443 jint current = _claimed;
444 if (current != claimValue) {
445 jint res = Atomic::cmpxchg(claimValue, &_claimed, current);
446 if (res == current) {
447 return true;
448 }
449 }
450 return false;
451 }
453 HeapWord* HeapRegion::next_block_start_careful(HeapWord* addr) {
454 HeapWord* low = addr;
455 HeapWord* high = end();
456 while (low < high) {
457 size_t diff = pointer_delta(high, low);
458 // Must add one below to bias toward the high amount. Otherwise, if
459 // "high" were at the desired value, and "low" were one less, we
460 // would not converge on "high". This is not symmetric, because
461 // we set "high" to a block start, which might be the right one,
462 // which we don't do for "low".
463 HeapWord* middle = low + (diff+1)/2;
464 if (middle == high) return high;
465 HeapWord* mid_bs = block_start_careful(middle);
466 if (mid_bs < addr) {
467 low = middle;
468 } else {
469 high = mid_bs;
470 }
471 }
472 assert(low == high && low >= addr, "Didn't work.");
473 return low;
474 }
476 void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
477 G1OffsetTableContigSpace::initialize(mr, false, mangle_space);
478 hr_clear(false/*par*/, clear_space);
479 }
480 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
481 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
482 #endif // _MSC_VER
485 HeapRegion::HeapRegion(uint hrs_index,
486 G1BlockOffsetSharedArray* sharedOffsetArray,
487 MemRegion mr, bool is_zeroed) :
488 G1OffsetTableContigSpace(sharedOffsetArray, mr, is_zeroed),
489 _hrs_index(hrs_index),
490 _humongous_type(NotHumongous), _humongous_start_region(NULL),
491 _in_collection_set(false),
492 _next_in_special_set(NULL), _orig_end(NULL),
493 _claimed(InitialClaimValue), _evacuation_failed(false),
494 _prev_marked_bytes(0), _next_marked_bytes(0), _sort_index(-1),
495 _gc_efficiency(0.0),
496 _young_type(NotYoung), _next_young_region(NULL),
497 _next_dirty_cards_region(NULL), _next(NULL), _pending_removal(false),
498 #ifdef ASSERT
499 _containing_set(NULL),
500 #endif // ASSERT
501 _young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1),
502 _rem_set(NULL), _recorded_rs_length(0), _predicted_elapsed_time_ms(0),
503 _predicted_bytes_to_copy(0)
504 {
505 _orig_end = mr.end();
506 // Note that initialize() will set the start of the unmarked area of the
507 // region.
508 this->initialize(mr, !is_zeroed, SpaceDecorator::Mangle);
509 set_top(bottom());
510 set_saved_mark();
512 _rem_set = new HeapRegionRemSet(sharedOffsetArray, this);
514 assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant.");
515 // In case the region is allocated during a pause, note the top.
516 // We haven't done any counting on a brand new region.
517 _top_at_conc_mark_count = bottom();
518 }
520 class NextCompactionHeapRegionClosure: public HeapRegionClosure {
521 const HeapRegion* _target;
522 bool _target_seen;
523 HeapRegion* _last;
524 CompactibleSpace* _res;
525 public:
526 NextCompactionHeapRegionClosure(const HeapRegion* target) :
527 _target(target), _target_seen(false), _res(NULL) {}
528 bool doHeapRegion(HeapRegion* cur) {
529 if (_target_seen) {
530 if (!cur->isHumongous()) {
531 _res = cur;
532 return true;
533 }
534 } else if (cur == _target) {
535 _target_seen = true;
536 }
537 return false;
538 }
539 CompactibleSpace* result() { return _res; }
540 };
542 CompactibleSpace* HeapRegion::next_compaction_space() const {
543 G1CollectedHeap* g1h = G1CollectedHeap::heap();
544 // cast away const-ness
545 HeapRegion* r = (HeapRegion*) this;
546 NextCompactionHeapRegionClosure blk(r);
547 g1h->heap_region_iterate_from(r, &blk);
548 return blk.result();
549 }
551 void HeapRegion::save_marks() {
552 set_saved_mark();
553 }
555 void HeapRegion::oops_in_mr_iterate(MemRegion mr, OopClosure* cl) {
556 HeapWord* p = mr.start();
557 HeapWord* e = mr.end();
558 oop obj;
559 while (p < e) {
560 obj = oop(p);
561 p += obj->oop_iterate(cl);
562 }
563 assert(p == e, "bad memregion: doesn't end on obj boundary");
564 }
566 #define HeapRegion_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
567 void HeapRegion::oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \
568 ContiguousSpace::oop_since_save_marks_iterate##nv_suffix(cl); \
569 }
570 SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(HeapRegion_OOP_SINCE_SAVE_MARKS_DEFN)
573 void HeapRegion::oop_before_save_marks_iterate(OopClosure* cl) {
574 oops_in_mr_iterate(MemRegion(bottom(), saved_mark_word()), cl);
575 }
577 void HeapRegion::note_self_forwarding_removal_start(bool during_initial_mark,
578 bool during_conc_mark) {
579 // We always recreate the prev marking info and we'll explicitly
580 // mark all objects we find to be self-forwarded on the prev
581 // bitmap. So all objects need to be below PTAMS.
582 _prev_top_at_mark_start = top();
583 _prev_marked_bytes = 0;
585 if (during_initial_mark) {
586 // During initial-mark, we'll also explicitly mark all objects
587 // we find to be self-forwarded on the next bitmap. So all
588 // objects need to be below NTAMS.
589 _next_top_at_mark_start = top();
590 set_top_at_conc_mark_count(bottom());
591 _next_marked_bytes = 0;
592 } else if (during_conc_mark) {
593 // During concurrent mark, all objects in the CSet (including
594 // the ones we find to be self-forwarded) are implicitly live.
595 // So all objects need to be above NTAMS.
596 _next_top_at_mark_start = bottom();
597 set_top_at_conc_mark_count(bottom());
598 _next_marked_bytes = 0;
599 }
600 }
602 void HeapRegion::note_self_forwarding_removal_end(bool during_initial_mark,
603 bool during_conc_mark,
604 size_t marked_bytes) {
605 assert(0 <= marked_bytes && marked_bytes <= used(),
606 err_msg("marked: "SIZE_FORMAT" used: "SIZE_FORMAT,
607 marked_bytes, used()));
608 _prev_marked_bytes = marked_bytes;
609 }
611 HeapWord*
612 HeapRegion::object_iterate_mem_careful(MemRegion mr,
613 ObjectClosure* cl) {
614 G1CollectedHeap* g1h = G1CollectedHeap::heap();
615 // We used to use "block_start_careful" here. But we're actually happy
616 // to update the BOT while we do this...
617 HeapWord* cur = block_start(mr.start());
618 mr = mr.intersection(used_region());
619 if (mr.is_empty()) return NULL;
620 // Otherwise, find the obj that extends onto mr.start().
622 assert(cur <= mr.start()
623 && (oop(cur)->klass_or_null() == NULL ||
624 cur + oop(cur)->size() > mr.start()),
625 "postcondition of block_start");
626 oop obj;
627 while (cur < mr.end()) {
628 obj = oop(cur);
629 if (obj->klass_or_null() == NULL) {
630 // Ran into an unparseable point.
631 return cur;
632 } else if (!g1h->is_obj_dead(obj)) {
633 cl->do_object(obj);
634 }
635 if (cl->abort()) return cur;
636 // The check above must occur before the operation below, since an
637 // abort might invalidate the "size" operation.
638 cur += obj->size();
639 }
640 return NULL;
641 }
643 HeapWord*
644 HeapRegion::
645 oops_on_card_seq_iterate_careful(MemRegion mr,
646 FilterOutOfRegionClosure* cl,
647 bool filter_young,
648 jbyte* card_ptr) {
649 // Currently, we should only have to clean the card if filter_young
650 // is true and vice versa.
651 if (filter_young) {
652 assert(card_ptr != NULL, "pre-condition");
653 } else {
654 assert(card_ptr == NULL, "pre-condition");
655 }
656 G1CollectedHeap* g1h = G1CollectedHeap::heap();
658 // If we're within a stop-world GC, then we might look at a card in a
659 // GC alloc region that extends onto a GC LAB, which may not be
660 // parseable. Stop such at the "saved_mark" of the region.
661 if (g1h->is_gc_active()) {
662 mr = mr.intersection(used_region_at_save_marks());
663 } else {
664 mr = mr.intersection(used_region());
665 }
666 if (mr.is_empty()) return NULL;
667 // Otherwise, find the obj that extends onto mr.start().
669 // The intersection of the incoming mr (for the card) and the
670 // allocated part of the region is non-empty. This implies that
671 // we have actually allocated into this region. The code in
672 // G1CollectedHeap.cpp that allocates a new region sets the
673 // is_young tag on the region before allocating. Thus we
674 // safely know if this region is young.
675 if (is_young() && filter_young) {
676 return NULL;
677 }
679 assert(!is_young(), "check value of filter_young");
681 // We can only clean the card here, after we make the decision that
682 // the card is not young. And we only clean the card if we have been
683 // asked to (i.e., card_ptr != NULL).
684 if (card_ptr != NULL) {
685 *card_ptr = CardTableModRefBS::clean_card_val();
686 // We must complete this write before we do any of the reads below.
687 OrderAccess::storeload();
688 }
690 // Cache the boundaries of the memory region in some const locals
691 HeapWord* const start = mr.start();
692 HeapWord* const end = mr.end();
694 // We used to use "block_start_careful" here. But we're actually happy
695 // to update the BOT while we do this...
696 HeapWord* cur = block_start(start);
697 assert(cur <= start, "Postcondition");
699 oop obj;
701 HeapWord* next = cur;
702 while (next <= start) {
703 cur = next;
704 obj = oop(cur);
705 if (obj->klass_or_null() == NULL) {
706 // Ran into an unparseable point.
707 return cur;
708 }
709 // Otherwise...
710 next = (cur + obj->size());
711 }
713 // If we finish the above loop...We have a parseable object that
714 // begins on or before the start of the memory region, and ends
715 // inside or spans the entire region.
717 assert(obj == oop(cur), "sanity");
718 assert(cur <= start &&
719 obj->klass_or_null() != NULL &&
720 (cur + obj->size()) > start,
721 "Loop postcondition");
723 if (!g1h->is_obj_dead(obj)) {
724 obj->oop_iterate(cl, mr);
725 }
727 while (cur < end) {
728 obj = oop(cur);
729 if (obj->klass_or_null() == NULL) {
730 // Ran into an unparseable point.
731 return cur;
732 };
734 // Otherwise:
735 next = (cur + obj->size());
737 if (!g1h->is_obj_dead(obj)) {
738 if (next < end || !obj->is_objArray()) {
739 // This object either does not span the MemRegion
740 // boundary, or if it does it's not an array.
741 // Apply closure to whole object.
742 obj->oop_iterate(cl);
743 } else {
744 // This obj is an array that spans the boundary.
745 // Stop at the boundary.
746 obj->oop_iterate(cl, mr);
747 }
748 }
749 cur = next;
750 }
751 return NULL;
752 }
754 void HeapRegion::print() const { print_on(gclog_or_tty); }
755 void HeapRegion::print_on(outputStream* st) const {
756 if (isHumongous()) {
757 if (startsHumongous())
758 st->print(" HS");
759 else
760 st->print(" HC");
761 } else {
762 st->print(" ");
763 }
764 if (in_collection_set())
765 st->print(" CS");
766 else
767 st->print(" ");
768 if (is_young())
769 st->print(is_survivor() ? " SU" : " Y ");
770 else
771 st->print(" ");
772 if (is_empty())
773 st->print(" F");
774 else
775 st->print(" ");
776 st->print(" TS %5d", _gc_time_stamp);
777 st->print(" PTAMS "PTR_FORMAT" NTAMS "PTR_FORMAT,
778 prev_top_at_mark_start(), next_top_at_mark_start());
779 G1OffsetTableContigSpace::print_on(st);
780 }
782 void HeapRegion::verify() const {
783 bool dummy = false;
784 verify(VerifyOption_G1UsePrevMarking, /* failures */ &dummy);
785 }
787 // This really ought to be commoned up into OffsetTableContigSpace somehow.
788 // We would need a mechanism to make that code skip dead objects.
790 void HeapRegion::verify(VerifyOption vo,
791 bool* failures) const {
792 G1CollectedHeap* g1 = G1CollectedHeap::heap();
793 *failures = false;
794 HeapWord* p = bottom();
795 HeapWord* prev_p = NULL;
796 VerifyLiveClosure vl_cl(g1, vo);
797 bool is_humongous = isHumongous();
798 bool do_bot_verify = !is_young();
799 size_t object_num = 0;
800 while (p < top()) {
801 oop obj = oop(p);
802 size_t obj_size = obj->size();
803 object_num += 1;
805 if (is_humongous != g1->isHumongous(obj_size)) {
806 gclog_or_tty->print_cr("obj "PTR_FORMAT" is of %shumongous size ("
807 SIZE_FORMAT" words) in a %shumongous region",
808 p, g1->isHumongous(obj_size) ? "" : "non-",
809 obj_size, is_humongous ? "" : "non-");
810 *failures = true;
811 return;
812 }
814 // If it returns false, verify_for_object() will output the
815 // appropriate messasge.
816 if (do_bot_verify && !_offsets.verify_for_object(p, obj_size)) {
817 *failures = true;
818 return;
819 }
821 if (!g1->is_obj_dead_cond(obj, this, vo)) {
822 if (obj->is_oop()) {
823 klassOop klass = obj->klass();
824 if (!klass->is_perm()) {
825 gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" "
826 "not in perm", klass, obj);
827 *failures = true;
828 return;
829 } else if (!klass->is_klass()) {
830 gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" "
831 "not a klass", klass, obj);
832 *failures = true;
833 return;
834 } else {
835 vl_cl.set_containing_obj(obj);
836 obj->oop_iterate(&vl_cl);
837 if (vl_cl.failures()) {
838 *failures = true;
839 }
840 if (G1MaxVerifyFailures >= 0 &&
841 vl_cl.n_failures() >= G1MaxVerifyFailures) {
842 return;
843 }
844 }
845 } else {
846 gclog_or_tty->print_cr(PTR_FORMAT" no an oop", obj);
847 *failures = true;
848 return;
849 }
850 }
851 prev_p = p;
852 p += obj_size;
853 }
855 if (p != top()) {
856 gclog_or_tty->print_cr("end of last object "PTR_FORMAT" "
857 "does not match top "PTR_FORMAT, p, top());
858 *failures = true;
859 return;
860 }
862 HeapWord* the_end = end();
863 assert(p == top(), "it should still hold");
864 // Do some extra BOT consistency checking for addresses in the
865 // range [top, end). BOT look-ups in this range should yield
866 // top. No point in doing that if top == end (there's nothing there).
867 if (p < the_end) {
868 // Look up top
869 HeapWord* addr_1 = p;
870 HeapWord* b_start_1 = _offsets.block_start_const(addr_1);
871 if (b_start_1 != p) {
872 gclog_or_tty->print_cr("BOT look up for top: "PTR_FORMAT" "
873 " yielded "PTR_FORMAT", expecting "PTR_FORMAT,
874 addr_1, b_start_1, p);
875 *failures = true;
876 return;
877 }
879 // Look up top + 1
880 HeapWord* addr_2 = p + 1;
881 if (addr_2 < the_end) {
882 HeapWord* b_start_2 = _offsets.block_start_const(addr_2);
883 if (b_start_2 != p) {
884 gclog_or_tty->print_cr("BOT look up for top + 1: "PTR_FORMAT" "
885 " yielded "PTR_FORMAT", expecting "PTR_FORMAT,
886 addr_2, b_start_2, p);
887 *failures = true;
888 return;
889 }
890 }
892 // Look up an address between top and end
893 size_t diff = pointer_delta(the_end, p) / 2;
894 HeapWord* addr_3 = p + diff;
895 if (addr_3 < the_end) {
896 HeapWord* b_start_3 = _offsets.block_start_const(addr_3);
897 if (b_start_3 != p) {
898 gclog_or_tty->print_cr("BOT look up for top + diff: "PTR_FORMAT" "
899 " yielded "PTR_FORMAT", expecting "PTR_FORMAT,
900 addr_3, b_start_3, p);
901 *failures = true;
902 return;
903 }
904 }
906 // Loook up end - 1
907 HeapWord* addr_4 = the_end - 1;
908 HeapWord* b_start_4 = _offsets.block_start_const(addr_4);
909 if (b_start_4 != p) {
910 gclog_or_tty->print_cr("BOT look up for end - 1: "PTR_FORMAT" "
911 " yielded "PTR_FORMAT", expecting "PTR_FORMAT,
912 addr_4, b_start_4, p);
913 *failures = true;
914 return;
915 }
916 }
918 if (is_humongous && object_num > 1) {
919 gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] is humongous "
920 "but has "SIZE_FORMAT", objects",
921 bottom(), end(), object_num);
922 *failures = true;
923 return;
924 }
925 }
927 // G1OffsetTableContigSpace code; copied from space.cpp. Hope this can go
928 // away eventually.
930 void G1OffsetTableContigSpace::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
931 // false ==> we'll do the clearing if there's clearing to be done.
932 ContiguousSpace::initialize(mr, false, mangle_space);
933 _offsets.zero_bottom_entry();
934 _offsets.initialize_threshold();
935 if (clear_space) clear(mangle_space);
936 }
938 void G1OffsetTableContigSpace::clear(bool mangle_space) {
939 ContiguousSpace::clear(mangle_space);
940 _offsets.zero_bottom_entry();
941 _offsets.initialize_threshold();
942 }
944 void G1OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) {
945 Space::set_bottom(new_bottom);
946 _offsets.set_bottom(new_bottom);
947 }
949 void G1OffsetTableContigSpace::set_end(HeapWord* new_end) {
950 Space::set_end(new_end);
951 _offsets.resize(new_end - bottom());
952 }
954 void G1OffsetTableContigSpace::print() const {
955 print_short();
956 gclog_or_tty->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", "
957 INTPTR_FORMAT ", " INTPTR_FORMAT ")",
958 bottom(), top(), _offsets.threshold(), end());
959 }
961 HeapWord* G1OffsetTableContigSpace::initialize_threshold() {
962 return _offsets.initialize_threshold();
963 }
965 HeapWord* G1OffsetTableContigSpace::cross_threshold(HeapWord* start,
966 HeapWord* end) {
967 _offsets.alloc_block(start, end);
968 return _offsets.threshold();
969 }
971 HeapWord* G1OffsetTableContigSpace::saved_mark_word() const {
972 G1CollectedHeap* g1h = G1CollectedHeap::heap();
973 assert( _gc_time_stamp <= g1h->get_gc_time_stamp(), "invariant" );
974 if (_gc_time_stamp < g1h->get_gc_time_stamp())
975 return top();
976 else
977 return ContiguousSpace::saved_mark_word();
978 }
980 void G1OffsetTableContigSpace::set_saved_mark() {
981 G1CollectedHeap* g1h = G1CollectedHeap::heap();
982 unsigned curr_gc_time_stamp = g1h->get_gc_time_stamp();
984 if (_gc_time_stamp < curr_gc_time_stamp) {
985 // The order of these is important, as another thread might be
986 // about to start scanning this region. If it does so after
987 // set_saved_mark and before _gc_time_stamp = ..., then the latter
988 // will be false, and it will pick up top() as the high water mark
989 // of region. If it does so after _gc_time_stamp = ..., then it
990 // will pick up the right saved_mark_word() as the high water mark
991 // of the region. Either way, the behaviour will be correct.
992 ContiguousSpace::set_saved_mark();
993 OrderAccess::storestore();
994 _gc_time_stamp = curr_gc_time_stamp;
995 // No need to do another barrier to flush the writes above. If
996 // this is called in parallel with other threads trying to
997 // allocate into the region, the caller should call this while
998 // holding a lock and when the lock is released the writes will be
999 // flushed.
1000 }
1001 }
1003 G1OffsetTableContigSpace::
1004 G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
1005 MemRegion mr, bool is_zeroed) :
1006 _offsets(sharedOffsetArray, mr),
1007 _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true),
1008 _gc_time_stamp(0)
1009 {
1010 _offsets.set_space(this);
1011 initialize(mr, !is_zeroed, SpaceDecorator::Mangle);
1012 }