src/share/vm/gc_implementation/g1/heapRegion.cpp

Mon, 31 Aug 2009 05:27:29 -0700

author
apetrusenko
date
Mon, 31 Aug 2009 05:27:29 -0700
changeset 1375
8624da129f0b
parent 1301
18f526145aea
child 1377
2c79770d1f6e
permissions
-rw-r--r--

6841313: G1: dirty cards of survivor regions in parallel
Reviewed-by: tonyp, iveresov

     1 /*
     2  * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
    20  * CA 95054 USA or visit www.sun.com if you need additional information or
    21  * have any questions.
    22  *
    23  */
    25 #include "incls/_precompiled.incl"
    26 #include "incls/_heapRegion.cpp.incl"
    28 HeapRegionDCTOC::HeapRegionDCTOC(G1CollectedHeap* g1,
    29                                  HeapRegion* hr, OopClosure* cl,
    30                                  CardTableModRefBS::PrecisionStyle precision,
    31                                  FilterKind fk) :
    32   ContiguousSpaceDCTOC(hr, cl, precision, NULL),
    33   _hr(hr), _fk(fk), _g1(g1)
    34 {}
    36 FilterOutOfRegionClosure::FilterOutOfRegionClosure(HeapRegion* r,
    37                                                    OopClosure* oc) :
    38   _r_bottom(r->bottom()), _r_end(r->end()),
    39   _oc(oc), _out_of_region(0)
    40 {}
    42 class VerifyLiveClosure: public OopClosure {
    43 private:
    44   G1CollectedHeap* _g1h;
    45   CardTableModRefBS* _bs;
    46   oop _containing_obj;
    47   bool _failures;
    48   int _n_failures;
    49   bool _use_prev_marking;
    50 public:
    51   // use_prev_marking == true  -> use "prev" marking information,
    52   // use_prev_marking == false -> use "next" marking information
    53   VerifyLiveClosure(G1CollectedHeap* g1h, bool use_prev_marking) :
    54     _g1h(g1h), _bs(NULL), _containing_obj(NULL),
    55     _failures(false), _n_failures(0), _use_prev_marking(use_prev_marking)
    56   {
    57     BarrierSet* bs = _g1h->barrier_set();
    58     if (bs->is_a(BarrierSet::CardTableModRef))
    59       _bs = (CardTableModRefBS*)bs;
    60   }
    62   void set_containing_obj(oop obj) {
    63     _containing_obj = obj;
    64   }
    66   bool failures() { return _failures; }
    67   int n_failures() { return _n_failures; }
    69   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
    70   virtual void do_oop(      oop* p) { do_oop_work(p); }
    72   template <class T> void do_oop_work(T* p) {
    73     assert(_containing_obj != NULL, "Precondition");
    74     assert(!_g1h->is_obj_dead_cond(_containing_obj, _use_prev_marking),
    75            "Precondition");
    76     T heap_oop = oopDesc::load_heap_oop(p);
    77     if (!oopDesc::is_null(heap_oop)) {
    78       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
    79       bool failed = false;
    80       if (!_g1h->is_in_closed_subset(obj) ||
    81           _g1h->is_obj_dead_cond(obj, _use_prev_marking)) {
    82         if (!_failures) {
    83           gclog_or_tty->print_cr("");
    84           gclog_or_tty->print_cr("----------");
    85         }
    86         if (!_g1h->is_in_closed_subset(obj)) {
    87           gclog_or_tty->print_cr("Field "PTR_FORMAT
    88                         " of live obj "PTR_FORMAT
    89                         " points to obj "PTR_FORMAT
    90                         " not in the heap.",
    91                         p, (void*) _containing_obj, (void*) obj);
    92         } else {
    93           gclog_or_tty->print_cr("Field "PTR_FORMAT
    94                         " of live obj "PTR_FORMAT
    95                         " points to dead obj "PTR_FORMAT".",
    96                         p, (void*) _containing_obj, (void*) obj);
    97         }
    98         gclog_or_tty->print_cr("Live obj:");
    99         _containing_obj->print_on(gclog_or_tty);
   100         gclog_or_tty->print_cr("Bad referent:");
   101         obj->print_on(gclog_or_tty);
   102         gclog_or_tty->print_cr("----------");
   103         _failures = true;
   104         failed = true;
   105         _n_failures++;
   106       }
   108       if (!_g1h->full_collection()) {
   109         HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
   110         HeapRegion* to   = _g1h->heap_region_containing(obj);
   111         if (from != NULL && to != NULL &&
   112             from != to &&
   113             !to->isHumongous()) {
   114           jbyte cv_obj = *_bs->byte_for_const(_containing_obj);
   115           jbyte cv_field = *_bs->byte_for_const(p);
   116           const jbyte dirty = CardTableModRefBS::dirty_card_val();
   118           bool is_bad = !(from->is_young()
   119                           || to->rem_set()->contains_reference(p)
   120                           || !G1HRRSFlushLogBuffersOnVerify && // buffers were not flushed
   121                               (_containing_obj->is_objArray() ?
   122                                   cv_field == dirty
   123                                : cv_obj == dirty || cv_field == dirty));
   124           if (is_bad) {
   125             if (!_failures) {
   126               gclog_or_tty->print_cr("");
   127               gclog_or_tty->print_cr("----------");
   128             }
   129             gclog_or_tty->print_cr("Missing rem set entry:");
   130             gclog_or_tty->print_cr("Field "PTR_FORMAT
   131                           " of obj "PTR_FORMAT
   132                           ", in region %d ["PTR_FORMAT
   133                           ", "PTR_FORMAT"),",
   134                           p, (void*) _containing_obj,
   135                           from->hrs_index(),
   136                           from->bottom(),
   137                           from->end());
   138             _containing_obj->print_on(gclog_or_tty);
   139             gclog_or_tty->print_cr("points to obj "PTR_FORMAT
   140                           " in region %d ["PTR_FORMAT
   141                           ", "PTR_FORMAT").",
   142                           (void*) obj, to->hrs_index(),
   143                           to->bottom(), to->end());
   144             obj->print_on(gclog_or_tty);
   145             gclog_or_tty->print_cr("Obj head CTE = %d, field CTE = %d.",
   146                           cv_obj, cv_field);
   147             gclog_or_tty->print_cr("----------");
   148             _failures = true;
   149             if (!failed) _n_failures++;
   150           }
   151         }
   152       }
   153     }
   154   }
   155 };
   157 template<class ClosureType>
   158 HeapWord* walk_mem_region_loop(ClosureType* cl, G1CollectedHeap* g1h,
   159                                HeapRegion* hr,
   160                                HeapWord* cur, HeapWord* top) {
   161   oop cur_oop = oop(cur);
   162   int oop_size = cur_oop->size();
   163   HeapWord* next_obj = cur + oop_size;
   164   while (next_obj < top) {
   165     // Keep filtering the remembered set.
   166     if (!g1h->is_obj_dead(cur_oop, hr)) {
   167       // Bottom lies entirely below top, so we can call the
   168       // non-memRegion version of oop_iterate below.
   169       cur_oop->oop_iterate(cl);
   170     }
   171     cur = next_obj;
   172     cur_oop = oop(cur);
   173     oop_size = cur_oop->size();
   174     next_obj = cur + oop_size;
   175   }
   176   return cur;
   177 }
   179 void HeapRegionDCTOC::walk_mem_region_with_cl(MemRegion mr,
   180                                               HeapWord* bottom,
   181                                               HeapWord* top,
   182                                               OopClosure* cl) {
   183   G1CollectedHeap* g1h = _g1;
   185   int oop_size;
   187   OopClosure* cl2 = cl;
   188   FilterIntoCSClosure intoCSFilt(this, g1h, cl);
   189   FilterOutOfRegionClosure outOfRegionFilt(_hr, cl);
   190   switch (_fk) {
   191   case IntoCSFilterKind:      cl2 = &intoCSFilt; break;
   192   case OutOfRegionFilterKind: cl2 = &outOfRegionFilt; break;
   193   }
   195   // Start filtering what we add to the remembered set. If the object is
   196   // not considered dead, either because it is marked (in the mark bitmap)
   197   // or it was allocated after marking finished, then we add it. Otherwise
   198   // we can safely ignore the object.
   199   if (!g1h->is_obj_dead(oop(bottom), _hr)) {
   200     oop_size = oop(bottom)->oop_iterate(cl2, mr);
   201   } else {
   202     oop_size = oop(bottom)->size();
   203   }
   205   bottom += oop_size;
   207   if (bottom < top) {
   208     // We replicate the loop below for several kinds of possible filters.
   209     switch (_fk) {
   210     case NoFilterKind:
   211       bottom = walk_mem_region_loop(cl, g1h, _hr, bottom, top);
   212       break;
   213     case IntoCSFilterKind: {
   214       FilterIntoCSClosure filt(this, g1h, cl);
   215       bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top);
   216       break;
   217     }
   218     case OutOfRegionFilterKind: {
   219       FilterOutOfRegionClosure filt(_hr, cl);
   220       bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top);
   221       break;
   222     }
   223     default:
   224       ShouldNotReachHere();
   225     }
   227     // Last object. Need to do dead-obj filtering here too.
   228     if (!g1h->is_obj_dead(oop(bottom), _hr)) {
   229       oop(bottom)->oop_iterate(cl2, mr);
   230     }
   231   }
   232 }
   234 void HeapRegion::reset_after_compaction() {
   235   G1OffsetTableContigSpace::reset_after_compaction();
   236   // After a compaction the mark bitmap is invalid, so we must
   237   // treat all objects as being inside the unmarked area.
   238   zero_marked_bytes();
   239   init_top_at_mark_start();
   240 }
   242 DirtyCardToOopClosure*
   243 HeapRegion::new_dcto_closure(OopClosure* cl,
   244                              CardTableModRefBS::PrecisionStyle precision,
   245                              HeapRegionDCTOC::FilterKind fk) {
   246   return new HeapRegionDCTOC(G1CollectedHeap::heap(),
   247                              this, cl, precision, fk);
   248 }
   250 void HeapRegion::hr_clear(bool par, bool clear_space) {
   251   _humongous_type = NotHumongous;
   252   _humongous_start_region = NULL;
   253   _in_collection_set = false;
   254   _is_gc_alloc_region = false;
   256   // Age stuff (if parallel, this will be done separately, since it needs
   257   // to be sequential).
   258   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   260   set_young_index_in_cset(-1);
   261   uninstall_surv_rate_group();
   262   set_young_type(NotYoung);
   264   // In case it had been the start of a humongous sequence, reset its end.
   265   set_end(_orig_end);
   267   if (!par) {
   268     // If this is parallel, this will be done later.
   269     HeapRegionRemSet* hrrs = rem_set();
   270     if (hrrs != NULL) hrrs->clear();
   271     _claimed = InitialClaimValue;
   272   }
   273   zero_marked_bytes();
   274   set_sort_index(-1);
   276   _offsets.resize(HeapRegion::GrainWords);
   277   init_top_at_mark_start();
   278   if (clear_space) clear(SpaceDecorator::Mangle);
   279 }
   281 // <PREDICTION>
   282 void HeapRegion::calc_gc_efficiency() {
   283   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   284   _gc_efficiency = (double) garbage_bytes() /
   285                             g1h->predict_region_elapsed_time_ms(this, false);
   286 }
   287 // </PREDICTION>
   289 void HeapRegion::set_startsHumongous() {
   290   _humongous_type = StartsHumongous;
   291   _humongous_start_region = this;
   292   assert(end() == _orig_end, "Should be normal before alloc.");
   293 }
   295 bool HeapRegion::claimHeapRegion(jint claimValue) {
   296   jint current = _claimed;
   297   if (current != claimValue) {
   298     jint res = Atomic::cmpxchg(claimValue, &_claimed, current);
   299     if (res == current) {
   300       return true;
   301     }
   302   }
   303   return false;
   304 }
   306 HeapWord* HeapRegion::next_block_start_careful(HeapWord* addr) {
   307   HeapWord* low = addr;
   308   HeapWord* high = end();
   309   while (low < high) {
   310     size_t diff = pointer_delta(high, low);
   311     // Must add one below to bias toward the high amount.  Otherwise, if
   312   // "high" were at the desired value, and "low" were one less, we
   313     // would not converge on "high".  This is not symmetric, because
   314     // we set "high" to a block start, which might be the right one,
   315     // which we don't do for "low".
   316     HeapWord* middle = low + (diff+1)/2;
   317     if (middle == high) return high;
   318     HeapWord* mid_bs = block_start_careful(middle);
   319     if (mid_bs < addr) {
   320       low = middle;
   321     } else {
   322       high = mid_bs;
   323     }
   324   }
   325   assert(low == high && low >= addr, "Didn't work.");
   326   return low;
   327 }
   329 void HeapRegion::set_next_on_unclean_list(HeapRegion* r) {
   330   assert(r == NULL || r->is_on_unclean_list(), "Malformed unclean list.");
   331   _next_in_special_set = r;
   332 }
   334 void HeapRegion::set_on_unclean_list(bool b) {
   335   _is_on_unclean_list = b;
   336 }
   338 void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
   339   G1OffsetTableContigSpace::initialize(mr, false, mangle_space);
   340   hr_clear(false/*par*/, clear_space);
   341 }
   342 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
   343 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
   344 #endif // _MSC_VER
   347 HeapRegion::
   348 HeapRegion(G1BlockOffsetSharedArray* sharedOffsetArray,
   349                      MemRegion mr, bool is_zeroed)
   350   : G1OffsetTableContigSpace(sharedOffsetArray, mr, is_zeroed),
   351     _next_fk(HeapRegionDCTOC::NoFilterKind),
   352     _hrs_index(-1),
   353     _humongous_type(NotHumongous), _humongous_start_region(NULL),
   354     _in_collection_set(false), _is_gc_alloc_region(false),
   355     _is_on_free_list(false), _is_on_unclean_list(false),
   356     _next_in_special_set(NULL), _orig_end(NULL),
   357     _claimed(InitialClaimValue), _evacuation_failed(false),
   358     _prev_marked_bytes(0), _next_marked_bytes(0), _sort_index(-1),
   359     _young_type(NotYoung), _next_young_region(NULL),
   360     _next_dirty_cards_region(NULL),
   361     _young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1),
   362     _rem_set(NULL), _zfs(NotZeroFilled)
   363 {
   364   _orig_end = mr.end();
   365   // Note that initialize() will set the start of the unmarked area of the
   366   // region.
   367   this->initialize(mr, !is_zeroed, SpaceDecorator::Mangle);
   368   set_top(bottom());
   369   set_saved_mark();
   371   _rem_set =  new HeapRegionRemSet(sharedOffsetArray, this);
   373   assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant.");
   374   // In case the region is allocated during a pause, note the top.
   375   // We haven't done any counting on a brand new region.
   376   _top_at_conc_mark_count = bottom();
   377 }
   379 class NextCompactionHeapRegionClosure: public HeapRegionClosure {
   380   const HeapRegion* _target;
   381   bool _target_seen;
   382   HeapRegion* _last;
   383   CompactibleSpace* _res;
   384 public:
   385   NextCompactionHeapRegionClosure(const HeapRegion* target) :
   386     _target(target), _target_seen(false), _res(NULL) {}
   387   bool doHeapRegion(HeapRegion* cur) {
   388     if (_target_seen) {
   389       if (!cur->isHumongous()) {
   390         _res = cur;
   391         return true;
   392       }
   393     } else if (cur == _target) {
   394       _target_seen = true;
   395     }
   396     return false;
   397   }
   398   CompactibleSpace* result() { return _res; }
   399 };
   401 CompactibleSpace* HeapRegion::next_compaction_space() const {
   402   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   403   // cast away const-ness
   404   HeapRegion* r = (HeapRegion*) this;
   405   NextCompactionHeapRegionClosure blk(r);
   406   g1h->heap_region_iterate_from(r, &blk);
   407   return blk.result();
   408 }
   410 void HeapRegion::set_continuesHumongous(HeapRegion* start) {
   411   // The order is important here.
   412   start->add_continuingHumongousRegion(this);
   413   _humongous_type = ContinuesHumongous;
   414   _humongous_start_region = start;
   415 }
   417 void HeapRegion::add_continuingHumongousRegion(HeapRegion* cont) {
   418   // Must join the blocks of the current H region seq with the block of the
   419   // added region.
   420   offsets()->join_blocks(bottom(), cont->bottom());
   421   arrayOop obj = (arrayOop)(bottom());
   422   obj->set_length((int) (obj->length() + cont->capacity()/jintSize));
   423   set_end(cont->end());
   424   set_top(cont->end());
   425 }
   427 void HeapRegion::save_marks() {
   428   set_saved_mark();
   429 }
   431 void HeapRegion::oops_in_mr_iterate(MemRegion mr, OopClosure* cl) {
   432   HeapWord* p = mr.start();
   433   HeapWord* e = mr.end();
   434   oop obj;
   435   while (p < e) {
   436     obj = oop(p);
   437     p += obj->oop_iterate(cl);
   438   }
   439   assert(p == e, "bad memregion: doesn't end on obj boundary");
   440 }
   442 #define HeapRegion_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
   443 void HeapRegion::oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \
   444   ContiguousSpace::oop_since_save_marks_iterate##nv_suffix(cl);              \
   445 }
   446 SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(HeapRegion_OOP_SINCE_SAVE_MARKS_DEFN)
   449 void HeapRegion::oop_before_save_marks_iterate(OopClosure* cl) {
   450   oops_in_mr_iterate(MemRegion(bottom(), saved_mark_word()), cl);
   451 }
   453 #ifdef DEBUG
   454 HeapWord* HeapRegion::allocate(size_t size) {
   455   jint state = zero_fill_state();
   456   assert(!G1CollectedHeap::heap()->allocs_are_zero_filled() ||
   457          zero_fill_is_allocated(),
   458          "When ZF is on, only alloc in ZF'd regions");
   459   return G1OffsetTableContigSpace::allocate(size);
   460 }
   461 #endif
   463 void HeapRegion::set_zero_fill_state_work(ZeroFillState zfs) {
   464   assert(top() == bottom() || zfs == Allocated,
   465          "Region must be empty, or we must be setting it to allocated.");
   466   assert(ZF_mon->owned_by_self() ||
   467          Universe::heap()->is_gc_active(),
   468          "Must hold the lock or be a full GC to modify.");
   469   _zfs = zfs;
   470 }
   472 void HeapRegion::set_zero_fill_complete() {
   473   set_zero_fill_state_work(ZeroFilled);
   474   if (ZF_mon->owned_by_self()) {
   475     ZF_mon->notify_all();
   476   }
   477 }
   480 void HeapRegion::ensure_zero_filled() {
   481   MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
   482   ensure_zero_filled_locked();
   483 }
   485 void HeapRegion::ensure_zero_filled_locked() {
   486   assert(ZF_mon->owned_by_self(), "Precondition");
   487   bool should_ignore_zf = SafepointSynchronize::is_at_safepoint();
   488   assert(should_ignore_zf || Heap_lock->is_locked(),
   489          "Either we're in a GC or we're allocating a region.");
   490   switch (zero_fill_state()) {
   491   case HeapRegion::NotZeroFilled:
   492     set_zero_fill_in_progress(Thread::current());
   493     {
   494       ZF_mon->unlock();
   495       Copy::fill_to_words(bottom(), capacity()/HeapWordSize);
   496       ZF_mon->lock_without_safepoint_check();
   497     }
   498     // A trap.
   499     guarantee(zero_fill_state() == HeapRegion::ZeroFilling
   500               && zero_filler() == Thread::current(),
   501               "AHA!  Tell Dave D if you see this...");
   502     set_zero_fill_complete();
   503     // gclog_or_tty->print_cr("Did sync ZF.");
   504     ConcurrentZFThread::note_sync_zfs();
   505     break;
   506   case HeapRegion::ZeroFilling:
   507     if (should_ignore_zf) {
   508       // We can "break" the lock and take over the work.
   509       Copy::fill_to_words(bottom(), capacity()/HeapWordSize);
   510       set_zero_fill_complete();
   511       ConcurrentZFThread::note_sync_zfs();
   512       break;
   513     } else {
   514       ConcurrentZFThread::wait_for_ZF_completed(this);
   515     }
   516   case HeapRegion::ZeroFilled:
   517     // Nothing to do.
   518     break;
   519   case HeapRegion::Allocated:
   520     guarantee(false, "Should not call on allocated regions.");
   521   }
   522   assert(zero_fill_state() == HeapRegion::ZeroFilled, "Post");
   523 }
   525 HeapWord*
   526 HeapRegion::object_iterate_mem_careful(MemRegion mr,
   527                                                  ObjectClosure* cl) {
   528   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   529   // We used to use "block_start_careful" here.  But we're actually happy
   530   // to update the BOT while we do this...
   531   HeapWord* cur = block_start(mr.start());
   532   mr = mr.intersection(used_region());
   533   if (mr.is_empty()) return NULL;
   534   // Otherwise, find the obj that extends onto mr.start().
   536   assert(cur <= mr.start()
   537          && (oop(cur)->klass_or_null() == NULL ||
   538              cur + oop(cur)->size() > mr.start()),
   539          "postcondition of block_start");
   540   oop obj;
   541   while (cur < mr.end()) {
   542     obj = oop(cur);
   543     if (obj->klass_or_null() == NULL) {
   544       // Ran into an unparseable point.
   545       return cur;
   546     } else if (!g1h->is_obj_dead(obj)) {
   547       cl->do_object(obj);
   548     }
   549     if (cl->abort()) return cur;
   550     // The check above must occur before the operation below, since an
   551     // abort might invalidate the "size" operation.
   552     cur += obj->size();
   553   }
   554   return NULL;
   555 }
   557 HeapWord*
   558 HeapRegion::
   559 oops_on_card_seq_iterate_careful(MemRegion mr,
   560                                      FilterOutOfRegionClosure* cl) {
   561   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   563   // If we're within a stop-world GC, then we might look at a card in a
   564   // GC alloc region that extends onto a GC LAB, which may not be
   565   // parseable.  Stop such at the "saved_mark" of the region.
   566   if (G1CollectedHeap::heap()->is_gc_active()) {
   567     mr = mr.intersection(used_region_at_save_marks());
   568   } else {
   569     mr = mr.intersection(used_region());
   570   }
   571   if (mr.is_empty()) return NULL;
   572   // Otherwise, find the obj that extends onto mr.start().
   574   // We used to use "block_start_careful" here.  But we're actually happy
   575   // to update the BOT while we do this...
   576   HeapWord* cur = block_start(mr.start());
   577   assert(cur <= mr.start(), "Postcondition");
   579   while (cur <= mr.start()) {
   580     if (oop(cur)->klass_or_null() == NULL) {
   581       // Ran into an unparseable point.
   582       return cur;
   583     }
   584     // Otherwise...
   585     int sz = oop(cur)->size();
   586     if (cur + sz > mr.start()) break;
   587     // Otherwise, go on.
   588     cur = cur + sz;
   589   }
   590   oop obj;
   591   obj = oop(cur);
   592   // If we finish this loop...
   593   assert(cur <= mr.start()
   594          && obj->klass_or_null() != NULL
   595          && cur + obj->size() > mr.start(),
   596          "Loop postcondition");
   597   if (!g1h->is_obj_dead(obj)) {
   598     obj->oop_iterate(cl, mr);
   599   }
   601   HeapWord* next;
   602   while (cur < mr.end()) {
   603     obj = oop(cur);
   604     if (obj->klass_or_null() == NULL) {
   605       // Ran into an unparseable point.
   606       return cur;
   607     };
   608     // Otherwise:
   609     next = (cur + obj->size());
   610     if (!g1h->is_obj_dead(obj)) {
   611       if (next < mr.end()) {
   612         obj->oop_iterate(cl);
   613       } else {
   614         // this obj spans the boundary.  If it's an array, stop at the
   615         // boundary.
   616         if (obj->is_objArray()) {
   617           obj->oop_iterate(cl, mr);
   618         } else {
   619           obj->oop_iterate(cl);
   620         }
   621       }
   622     }
   623     cur = next;
   624   }
   625   return NULL;
   626 }
   628 void HeapRegion::print() const { print_on(gclog_or_tty); }
   629 void HeapRegion::print_on(outputStream* st) const {
   630   if (isHumongous()) {
   631     if (startsHumongous())
   632       st->print(" HS");
   633     else
   634       st->print(" HC");
   635   } else {
   636     st->print("   ");
   637   }
   638   if (in_collection_set())
   639     st->print(" CS");
   640   else if (is_gc_alloc_region())
   641     st->print(" A ");
   642   else
   643     st->print("   ");
   644   if (is_young())
   645     st->print(is_scan_only() ? " SO" : (is_survivor() ? " SU" : " Y "));
   646   else
   647     st->print("   ");
   648   if (is_empty())
   649     st->print(" F");
   650   else
   651     st->print("  ");
   652   st->print(" %d", _gc_time_stamp);
   653   G1OffsetTableContigSpace::print_on(st);
   654 }
   656 void HeapRegion::verify(bool allow_dirty) const {
   657   verify(allow_dirty, /* use_prev_marking */ true);
   658 }
   660 #define OBJ_SAMPLE_INTERVAL 0
   661 #define BLOCK_SAMPLE_INTERVAL 100
   663 // This really ought to be commoned up into OffsetTableContigSpace somehow.
   664 // We would need a mechanism to make that code skip dead objects.
   666 void HeapRegion::verify(bool allow_dirty, bool use_prev_marking) const {
   667   G1CollectedHeap* g1 = G1CollectedHeap::heap();
   668   HeapWord* p = bottom();
   669   HeapWord* prev_p = NULL;
   670   int objs = 0;
   671   int blocks = 0;
   672   VerifyLiveClosure vl_cl(g1, use_prev_marking);
   673   while (p < top()) {
   674     size_t size = oop(p)->size();
   675     if (blocks == BLOCK_SAMPLE_INTERVAL) {
   676       guarantee(p == block_start_const(p + (size/2)),
   677                 "check offset computation");
   678       blocks = 0;
   679     } else {
   680       blocks++;
   681     }
   682     if (objs == OBJ_SAMPLE_INTERVAL) {
   683       oop obj = oop(p);
   684       if (!g1->is_obj_dead_cond(obj, this, use_prev_marking)) {
   685         obj->verify();
   686         vl_cl.set_containing_obj(obj);
   687         obj->oop_iterate(&vl_cl);
   688         if (G1MaxVerifyFailures >= 0
   689             && vl_cl.n_failures() >= G1MaxVerifyFailures) break;
   690       }
   691       objs = 0;
   692     } else {
   693       objs++;
   694     }
   695     prev_p = p;
   696     p += size;
   697   }
   698   HeapWord* rend = end();
   699   HeapWord* rtop = top();
   700   if (rtop < rend) {
   701     guarantee(block_start_const(rtop + (rend - rtop) / 2) == rtop,
   702               "check offset computation");
   703   }
   704   if (vl_cl.failures()) {
   705     gclog_or_tty->print_cr("Heap:");
   706     G1CollectedHeap::heap()->print_on(gclog_or_tty, true /* extended */);
   707     gclog_or_tty->print_cr("");
   708   }
   709   if (VerifyDuringGC &&
   710       G1VerifyConcMarkPrintReachable &&
   711       vl_cl.failures()) {
   712     g1->concurrent_mark()->print_prev_bitmap_reachable();
   713   }
   714   guarantee(!vl_cl.failures(), "region verification failed");
   715   guarantee(p == top(), "end of last object must match end of space");
   716 }
   718 // G1OffsetTableContigSpace code; copied from space.cpp.  Hope this can go
   719 // away eventually.
   721 void G1OffsetTableContigSpace::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
   722   // false ==> we'll do the clearing if there's clearing to be done.
   723   ContiguousSpace::initialize(mr, false, mangle_space);
   724   _offsets.zero_bottom_entry();
   725   _offsets.initialize_threshold();
   726   if (clear_space) clear(mangle_space);
   727 }
   729 void G1OffsetTableContigSpace::clear(bool mangle_space) {
   730   ContiguousSpace::clear(mangle_space);
   731   _offsets.zero_bottom_entry();
   732   _offsets.initialize_threshold();
   733 }
   735 void G1OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) {
   736   Space::set_bottom(new_bottom);
   737   _offsets.set_bottom(new_bottom);
   738 }
   740 void G1OffsetTableContigSpace::set_end(HeapWord* new_end) {
   741   Space::set_end(new_end);
   742   _offsets.resize(new_end - bottom());
   743 }
   745 void G1OffsetTableContigSpace::print() const {
   746   print_short();
   747   gclog_or_tty->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", "
   748                 INTPTR_FORMAT ", " INTPTR_FORMAT ")",
   749                 bottom(), top(), _offsets.threshold(), end());
   750 }
   752 HeapWord* G1OffsetTableContigSpace::initialize_threshold() {
   753   return _offsets.initialize_threshold();
   754 }
   756 HeapWord* G1OffsetTableContigSpace::cross_threshold(HeapWord* start,
   757                                                     HeapWord* end) {
   758   _offsets.alloc_block(start, end);
   759   return _offsets.threshold();
   760 }
   762 HeapWord* G1OffsetTableContigSpace::saved_mark_word() const {
   763   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   764   assert( _gc_time_stamp <= g1h->get_gc_time_stamp(), "invariant" );
   765   if (_gc_time_stamp < g1h->get_gc_time_stamp())
   766     return top();
   767   else
   768     return ContiguousSpace::saved_mark_word();
   769 }
   771 void G1OffsetTableContigSpace::set_saved_mark() {
   772   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   773   unsigned curr_gc_time_stamp = g1h->get_gc_time_stamp();
   775   if (_gc_time_stamp < curr_gc_time_stamp) {
   776     // The order of these is important, as another thread might be
   777     // about to start scanning this region. If it does so after
   778     // set_saved_mark and before _gc_time_stamp = ..., then the latter
   779     // will be false, and it will pick up top() as the high water mark
   780     // of region. If it does so after _gc_time_stamp = ..., then it
   781     // will pick up the right saved_mark_word() as the high water mark
   782     // of the region. Either way, the behaviour will be correct.
   783     ContiguousSpace::set_saved_mark();
   784     OrderAccess::storestore();
   785     _gc_time_stamp = curr_gc_time_stamp;
   786     // The following fence is to force a flush of the writes above, but
   787     // is strictly not needed because when an allocating worker thread
   788     // calls set_saved_mark() it does so under the ParGCRareEvent_lock;
   789     // when the lock is released, the write will be flushed.
   790     // OrderAccess::fence();
   791   }
   792 }
   794 G1OffsetTableContigSpace::
   795 G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
   796                          MemRegion mr, bool is_zeroed) :
   797   _offsets(sharedOffsetArray, mr),
   798   _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true),
   799   _gc_time_stamp(0)
   800 {
   801   _offsets.set_space(this);
   802   initialize(mr, !is_zeroed, SpaceDecorator::Mangle);
   803 }
   805 size_t RegionList::length() {
   806   size_t len = 0;
   807   HeapRegion* cur = hd();
   808   DEBUG_ONLY(HeapRegion* last = NULL);
   809   while (cur != NULL) {
   810     len++;
   811     DEBUG_ONLY(last = cur);
   812     cur = get_next(cur);
   813   }
   814   assert(last == tl(), "Invariant");
   815   return len;
   816 }
   818 void RegionList::insert_before_head(HeapRegion* r) {
   819   assert(well_formed(), "Inv");
   820   set_next(r, hd());
   821   _hd = r;
   822   _sz++;
   823   if (tl() == NULL) _tl = r;
   824   assert(well_formed(), "Inv");
   825 }
   827 void RegionList::prepend_list(RegionList* new_list) {
   828   assert(well_formed(), "Precondition");
   829   assert(new_list->well_formed(), "Precondition");
   830   HeapRegion* new_tl = new_list->tl();
   831   if (new_tl != NULL) {
   832     set_next(new_tl, hd());
   833     _hd = new_list->hd();
   834     _sz += new_list->sz();
   835     if (tl() == NULL) _tl = new_list->tl();
   836   } else {
   837     assert(new_list->hd() == NULL && new_list->sz() == 0, "Inv");
   838   }
   839   assert(well_formed(), "Inv");
   840 }
   842 void RegionList::delete_after(HeapRegion* r) {
   843   assert(well_formed(), "Precondition");
   844   HeapRegion* next = get_next(r);
   845   assert(r != NULL, "Precondition");
   846   HeapRegion* next_tl = get_next(next);
   847   set_next(r, next_tl);
   848   dec_sz();
   849   if (next == tl()) {
   850     assert(next_tl == NULL, "Inv");
   851     _tl = r;
   852   }
   853   assert(well_formed(), "Inv");
   854 }
   856 HeapRegion* RegionList::pop() {
   857   assert(well_formed(), "Inv");
   858   HeapRegion* res = hd();
   859   if (res != NULL) {
   860     _hd = get_next(res);
   861     _sz--;
   862     set_next(res, NULL);
   863     if (sz() == 0) _tl = NULL;
   864   }
   865   assert(well_formed(), "Inv");
   866   return res;
   867 }

mercurial