src/share/vm/gc_implementation/g1/heapRegion.cpp

Sat, 06 Oct 2012 01:17:44 -0700

author
johnc
date
Sat, 06 Oct 2012 01:17:44 -0700
changeset 4173
8a5ea0a9ccc4
parent 4065
8fbf05030e24
child 5205
3a4805ad0005
permissions
-rw-r--r--

7127708: G1: change task num types from int to uint in concurrent mark
Summary: Change the type of various task num fields, parameters etc to unsigned and rename them to be more consistent with the other collectors. Code changes were also reviewed by Vitaly Davidovich.
Reviewed-by: johnc
Contributed-by: Kaushik Srenevasan <kaushik@twitter.com>

     1 /*
     2  * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
    27 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
    28 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
    29 #include "gc_implementation/g1/heapRegion.inline.hpp"
    30 #include "gc_implementation/g1/heapRegionRemSet.hpp"
    31 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
    32 #include "memory/genOopClosures.inline.hpp"
    33 #include "memory/iterator.hpp"
    34 #include "oops/oop.inline.hpp"
    36 int    HeapRegion::LogOfHRGrainBytes = 0;
    37 int    HeapRegion::LogOfHRGrainWords = 0;
    38 size_t HeapRegion::GrainBytes        = 0;
    39 size_t HeapRegion::GrainWords        = 0;
    40 size_t HeapRegion::CardsPerRegion    = 0;
    42 HeapRegionDCTOC::HeapRegionDCTOC(G1CollectedHeap* g1,
    43                                  HeapRegion* hr, ExtendedOopClosure* cl,
    44                                  CardTableModRefBS::PrecisionStyle precision,
    45                                  FilterKind fk) :
    46   ContiguousSpaceDCTOC(hr, cl, precision, NULL),
    47   _hr(hr), _fk(fk), _g1(g1) { }
    49 FilterOutOfRegionClosure::FilterOutOfRegionClosure(HeapRegion* r,
    50                                                    OopClosure* oc) :
    51   _r_bottom(r->bottom()), _r_end(r->end()), _oc(oc) { }
    53 class VerifyLiveClosure: public OopClosure {
    54 private:
    55   G1CollectedHeap* _g1h;
    56   CardTableModRefBS* _bs;
    57   oop _containing_obj;
    58   bool _failures;
    59   int _n_failures;
    60   VerifyOption _vo;
    61 public:
    62   // _vo == UsePrevMarking -> use "prev" marking information,
    63   // _vo == UseNextMarking -> use "next" marking information,
    64   // _vo == UseMarkWord    -> use mark word from object header.
    65   VerifyLiveClosure(G1CollectedHeap* g1h, VerifyOption vo) :
    66     _g1h(g1h), _bs(NULL), _containing_obj(NULL),
    67     _failures(false), _n_failures(0), _vo(vo)
    68   {
    69     BarrierSet* bs = _g1h->barrier_set();
    70     if (bs->is_a(BarrierSet::CardTableModRef))
    71       _bs = (CardTableModRefBS*)bs;
    72   }
    74   void set_containing_obj(oop obj) {
    75     _containing_obj = obj;
    76   }
    78   bool failures() { return _failures; }
    79   int n_failures() { return _n_failures; }
    81   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
    82   virtual void do_oop(      oop* p) { do_oop_work(p); }
    84   void print_object(outputStream* out, oop obj) {
    85 #ifdef PRODUCT
    86     Klass* k = obj->klass();
    87     const char* class_name = InstanceKlass::cast(k)->external_name();
    88     out->print_cr("class name %s", class_name);
    89 #else // PRODUCT
    90     obj->print_on(out);
    91 #endif // PRODUCT
    92   }
    94   template <class T>
    95   void do_oop_work(T* p) {
    96     assert(_containing_obj != NULL, "Precondition");
    97     assert(!_g1h->is_obj_dead_cond(_containing_obj, _vo),
    98            "Precondition");
    99     T heap_oop = oopDesc::load_heap_oop(p);
   100     if (!oopDesc::is_null(heap_oop)) {
   101       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
   102       bool failed = false;
   103       if (!_g1h->is_in_closed_subset(obj) || _g1h->is_obj_dead_cond(obj, _vo)) {
   104         MutexLockerEx x(ParGCRareEvent_lock,
   105                         Mutex::_no_safepoint_check_flag);
   107         if (!_failures) {
   108           gclog_or_tty->print_cr("");
   109           gclog_or_tty->print_cr("----------");
   110         }
   111         if (!_g1h->is_in_closed_subset(obj)) {
   112           HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
   113           gclog_or_tty->print_cr("Field "PTR_FORMAT
   114                                  " of live obj "PTR_FORMAT" in region "
   115                                  "["PTR_FORMAT", "PTR_FORMAT")",
   116                                  p, (void*) _containing_obj,
   117                                  from->bottom(), from->end());
   118           print_object(gclog_or_tty, _containing_obj);
   119           gclog_or_tty->print_cr("points to obj "PTR_FORMAT" not in the heap",
   120                                  (void*) obj);
   121         } else {
   122           HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
   123           HeapRegion* to   = _g1h->heap_region_containing((HeapWord*)obj);
   124           gclog_or_tty->print_cr("Field "PTR_FORMAT
   125                                  " of live obj "PTR_FORMAT" in region "
   126                                  "["PTR_FORMAT", "PTR_FORMAT")",
   127                                  p, (void*) _containing_obj,
   128                                  from->bottom(), from->end());
   129           print_object(gclog_or_tty, _containing_obj);
   130           gclog_or_tty->print_cr("points to dead obj "PTR_FORMAT" in region "
   131                                  "["PTR_FORMAT", "PTR_FORMAT")",
   132                                  (void*) obj, to->bottom(), to->end());
   133           print_object(gclog_or_tty, obj);
   134         }
   135         gclog_or_tty->print_cr("----------");
   136         gclog_or_tty->flush();
   137         _failures = true;
   138         failed = true;
   139         _n_failures++;
   140       }
   142       if (!_g1h->full_collection()) {
   143         HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
   144         HeapRegion* to   = _g1h->heap_region_containing(obj);
   145         if (from != NULL && to != NULL &&
   146             from != to &&
   147             !to->isHumongous()) {
   148           jbyte cv_obj = *_bs->byte_for_const(_containing_obj);
   149           jbyte cv_field = *_bs->byte_for_const(p);
   150           const jbyte dirty = CardTableModRefBS::dirty_card_val();
   152           bool is_bad = !(from->is_young()
   153                           || to->rem_set()->contains_reference(p)
   154                           || !G1HRRSFlushLogBuffersOnVerify && // buffers were not flushed
   155                               (_containing_obj->is_objArray() ?
   156                                   cv_field == dirty
   157                                : cv_obj == dirty || cv_field == dirty));
   158           if (is_bad) {
   159             MutexLockerEx x(ParGCRareEvent_lock,
   160                             Mutex::_no_safepoint_check_flag);
   162             if (!_failures) {
   163               gclog_or_tty->print_cr("");
   164               gclog_or_tty->print_cr("----------");
   165             }
   166             gclog_or_tty->print_cr("Missing rem set entry:");
   167             gclog_or_tty->print_cr("Field "PTR_FORMAT" "
   168                                    "of obj "PTR_FORMAT", "
   169                                    "in region "HR_FORMAT,
   170                                    p, (void*) _containing_obj,
   171                                    HR_FORMAT_PARAMS(from));
   172             _containing_obj->print_on(gclog_or_tty);
   173             gclog_or_tty->print_cr("points to obj "PTR_FORMAT" "
   174                                    "in region "HR_FORMAT,
   175                                    (void*) obj,
   176                                    HR_FORMAT_PARAMS(to));
   177             obj->print_on(gclog_or_tty);
   178             gclog_or_tty->print_cr("Obj head CTE = %d, field CTE = %d.",
   179                           cv_obj, cv_field);
   180             gclog_or_tty->print_cr("----------");
   181             gclog_or_tty->flush();
   182             _failures = true;
   183             if (!failed) _n_failures++;
   184           }
   185         }
   186       }
   187     }
   188   }
   189 };
   191 template<class ClosureType>
   192 HeapWord* walk_mem_region_loop(ClosureType* cl, G1CollectedHeap* g1h,
   193                                HeapRegion* hr,
   194                                HeapWord* cur, HeapWord* top) {
   195   oop cur_oop = oop(cur);
   196   int oop_size = cur_oop->size();
   197   HeapWord* next_obj = cur + oop_size;
   198   while (next_obj < top) {
   199     // Keep filtering the remembered set.
   200     if (!g1h->is_obj_dead(cur_oop, hr)) {
   201       // Bottom lies entirely below top, so we can call the
   202       // non-memRegion version of oop_iterate below.
   203       cur_oop->oop_iterate(cl);
   204     }
   205     cur = next_obj;
   206     cur_oop = oop(cur);
   207     oop_size = cur_oop->size();
   208     next_obj = cur + oop_size;
   209   }
   210   return cur;
   211 }
   213 void HeapRegionDCTOC::walk_mem_region_with_cl(MemRegion mr,
   214                                               HeapWord* bottom,
   215                                               HeapWord* top,
   216                                               ExtendedOopClosure* cl) {
   217   G1CollectedHeap* g1h = _g1;
   218   int oop_size;
   219   ExtendedOopClosure* cl2 = NULL;
   221   FilterIntoCSClosure intoCSFilt(this, g1h, cl);
   222   FilterOutOfRegionClosure outOfRegionFilt(_hr, cl);
   224   switch (_fk) {
   225   case NoFilterKind:          cl2 = cl; break;
   226   case IntoCSFilterKind:      cl2 = &intoCSFilt; break;
   227   case OutOfRegionFilterKind: cl2 = &outOfRegionFilt; break;
   228   default:                    ShouldNotReachHere();
   229   }
   231   // Start filtering what we add to the remembered set. If the object is
   232   // not considered dead, either because it is marked (in the mark bitmap)
   233   // or it was allocated after marking finished, then we add it. Otherwise
   234   // we can safely ignore the object.
   235   if (!g1h->is_obj_dead(oop(bottom), _hr)) {
   236     oop_size = oop(bottom)->oop_iterate(cl2, mr);
   237   } else {
   238     oop_size = oop(bottom)->size();
   239   }
   241   bottom += oop_size;
   243   if (bottom < top) {
   244     // We replicate the loop below for several kinds of possible filters.
   245     switch (_fk) {
   246     case NoFilterKind:
   247       bottom = walk_mem_region_loop(cl, g1h, _hr, bottom, top);
   248       break;
   250     case IntoCSFilterKind: {
   251       FilterIntoCSClosure filt(this, g1h, cl);
   252       bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top);
   253       break;
   254     }
   256     case OutOfRegionFilterKind: {
   257       FilterOutOfRegionClosure filt(_hr, cl);
   258       bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top);
   259       break;
   260     }
   262     default:
   263       ShouldNotReachHere();
   264     }
   266     // Last object. Need to do dead-obj filtering here too.
   267     if (!g1h->is_obj_dead(oop(bottom), _hr)) {
   268       oop(bottom)->oop_iterate(cl2, mr);
   269     }
   270   }
   271 }
   273 // Minimum region size; we won't go lower than that.
   274 // We might want to decrease this in the future, to deal with small
   275 // heaps a bit more efficiently.
   276 #define MIN_REGION_SIZE  (      1024 * 1024 )
   278 // Maximum region size; we don't go higher than that. There's a good
   279 // reason for having an upper bound. We don't want regions to get too
   280 // large, otherwise cleanup's effectiveness would decrease as there
   281 // will be fewer opportunities to find totally empty regions after
   282 // marking.
   283 #define MAX_REGION_SIZE  ( 32 * 1024 * 1024 )
   285 // The automatic region size calculation will try to have around this
   286 // many regions in the heap (based on the min heap size).
   287 #define TARGET_REGION_NUMBER          2048
   289 void HeapRegion::setup_heap_region_size(uintx min_heap_size) {
   290   // region_size in bytes
   291   uintx region_size = G1HeapRegionSize;
   292   if (FLAG_IS_DEFAULT(G1HeapRegionSize)) {
   293     // We base the automatic calculation on the min heap size. This
   294     // can be problematic if the spread between min and max is quite
   295     // wide, imagine -Xms128m -Xmx32g. But, if we decided it based on
   296     // the max size, the region size might be way too large for the
   297     // min size. Either way, some users might have to set the region
   298     // size manually for some -Xms / -Xmx combos.
   300     region_size = MAX2(min_heap_size / TARGET_REGION_NUMBER,
   301                        (uintx) MIN_REGION_SIZE);
   302   }
   304   int region_size_log = log2_long((jlong) region_size);
   305   // Recalculate the region size to make sure it's a power of
   306   // 2. This means that region_size is the largest power of 2 that's
   307   // <= what we've calculated so far.
   308   region_size = ((uintx)1 << region_size_log);
   310   // Now make sure that we don't go over or under our limits.
   311   if (region_size < MIN_REGION_SIZE) {
   312     region_size = MIN_REGION_SIZE;
   313   } else if (region_size > MAX_REGION_SIZE) {
   314     region_size = MAX_REGION_SIZE;
   315   }
   317   // And recalculate the log.
   318   region_size_log = log2_long((jlong) region_size);
   320   // Now, set up the globals.
   321   guarantee(LogOfHRGrainBytes == 0, "we should only set it once");
   322   LogOfHRGrainBytes = region_size_log;
   324   guarantee(LogOfHRGrainWords == 0, "we should only set it once");
   325   LogOfHRGrainWords = LogOfHRGrainBytes - LogHeapWordSize;
   327   guarantee(GrainBytes == 0, "we should only set it once");
   328   // The cast to int is safe, given that we've bounded region_size by
   329   // MIN_REGION_SIZE and MAX_REGION_SIZE.
   330   GrainBytes = (size_t)region_size;
   332   guarantee(GrainWords == 0, "we should only set it once");
   333   GrainWords = GrainBytes >> LogHeapWordSize;
   334   guarantee((size_t) 1 << LogOfHRGrainWords == GrainWords, "sanity");
   336   guarantee(CardsPerRegion == 0, "we should only set it once");
   337   CardsPerRegion = GrainBytes >> CardTableModRefBS::card_shift;
   338 }
   340 void HeapRegion::reset_after_compaction() {
   341   G1OffsetTableContigSpace::reset_after_compaction();
   342   // After a compaction the mark bitmap is invalid, so we must
   343   // treat all objects as being inside the unmarked area.
   344   zero_marked_bytes();
   345   init_top_at_mark_start();
   346 }
   348 void HeapRegion::hr_clear(bool par, bool clear_space) {
   349   assert(_humongous_type == NotHumongous,
   350          "we should have already filtered out humongous regions");
   351   assert(_humongous_start_region == NULL,
   352          "we should have already filtered out humongous regions");
   353   assert(_end == _orig_end,
   354          "we should have already filtered out humongous regions");
   356   _in_collection_set = false;
   358   set_young_index_in_cset(-1);
   359   uninstall_surv_rate_group();
   360   set_young_type(NotYoung);
   361   reset_pre_dummy_top();
   363   if (!par) {
   364     // If this is parallel, this will be done later.
   365     HeapRegionRemSet* hrrs = rem_set();
   366     if (hrrs != NULL) hrrs->clear();
   367     _claimed = InitialClaimValue;
   368   }
   369   zero_marked_bytes();
   371   _offsets.resize(HeapRegion::GrainWords);
   372   init_top_at_mark_start();
   373   if (clear_space) clear(SpaceDecorator::Mangle);
   374 }
   376 void HeapRegion::par_clear() {
   377   assert(used() == 0, "the region should have been already cleared");
   378   assert(capacity() == HeapRegion::GrainBytes, "should be back to normal");
   379   HeapRegionRemSet* hrrs = rem_set();
   380   hrrs->clear();
   381   CardTableModRefBS* ct_bs =
   382                    (CardTableModRefBS*)G1CollectedHeap::heap()->barrier_set();
   383   ct_bs->clear(MemRegion(bottom(), end()));
   384 }
   386 void HeapRegion::calc_gc_efficiency() {
   387   // GC efficiency is the ratio of how much space would be
   388   // reclaimed over how long we predict it would take to reclaim it.
   389   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   390   G1CollectorPolicy* g1p = g1h->g1_policy();
   392   // Retrieve a prediction of the elapsed time for this region for
   393   // a mixed gc because the region will only be evacuated during a
   394   // mixed gc.
   395   double region_elapsed_time_ms =
   396     g1p->predict_region_elapsed_time_ms(this, false /* for_young_gc */);
   397   _gc_efficiency = (double) reclaimable_bytes() / region_elapsed_time_ms;
   398 }
   400 void HeapRegion::set_startsHumongous(HeapWord* new_top, HeapWord* new_end) {
   401   assert(!isHumongous(), "sanity / pre-condition");
   402   assert(end() == _orig_end,
   403          "Should be normal before the humongous object allocation");
   404   assert(top() == bottom(), "should be empty");
   405   assert(bottom() <= new_top && new_top <= new_end, "pre-condition");
   407   _humongous_type = StartsHumongous;
   408   _humongous_start_region = this;
   410   set_end(new_end);
   411   _offsets.set_for_starts_humongous(new_top);
   412 }
   414 void HeapRegion::set_continuesHumongous(HeapRegion* first_hr) {
   415   assert(!isHumongous(), "sanity / pre-condition");
   416   assert(end() == _orig_end,
   417          "Should be normal before the humongous object allocation");
   418   assert(top() == bottom(), "should be empty");
   419   assert(first_hr->startsHumongous(), "pre-condition");
   421   _humongous_type = ContinuesHumongous;
   422   _humongous_start_region = first_hr;
   423 }
   425 void HeapRegion::set_notHumongous() {
   426   assert(isHumongous(), "pre-condition");
   428   if (startsHumongous()) {
   429     assert(top() <= end(), "pre-condition");
   430     set_end(_orig_end);
   431     if (top() > end()) {
   432       // at least one "continues humongous" region after it
   433       set_top(end());
   434     }
   435   } else {
   436     // continues humongous
   437     assert(end() == _orig_end, "sanity");
   438   }
   440   assert(capacity() == HeapRegion::GrainBytes, "pre-condition");
   441   _humongous_type = NotHumongous;
   442   _humongous_start_region = NULL;
   443 }
   445 bool HeapRegion::claimHeapRegion(jint claimValue) {
   446   jint current = _claimed;
   447   if (current != claimValue) {
   448     jint res = Atomic::cmpxchg(claimValue, &_claimed, current);
   449     if (res == current) {
   450       return true;
   451     }
   452   }
   453   return false;
   454 }
   456 HeapWord* HeapRegion::next_block_start_careful(HeapWord* addr) {
   457   HeapWord* low = addr;
   458   HeapWord* high = end();
   459   while (low < high) {
   460     size_t diff = pointer_delta(high, low);
   461     // Must add one below to bias toward the high amount.  Otherwise, if
   462   // "high" were at the desired value, and "low" were one less, we
   463     // would not converge on "high".  This is not symmetric, because
   464     // we set "high" to a block start, which might be the right one,
   465     // which we don't do for "low".
   466     HeapWord* middle = low + (diff+1)/2;
   467     if (middle == high) return high;
   468     HeapWord* mid_bs = block_start_careful(middle);
   469     if (mid_bs < addr) {
   470       low = middle;
   471     } else {
   472       high = mid_bs;
   473     }
   474   }
   475   assert(low == high && low >= addr, "Didn't work.");
   476   return low;
   477 }
   479 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
   480 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
   481 #endif // _MSC_VER
   484 HeapRegion::HeapRegion(uint hrs_index,
   485                        G1BlockOffsetSharedArray* sharedOffsetArray,
   486                        MemRegion mr) :
   487     G1OffsetTableContigSpace(sharedOffsetArray, mr),
   488     _hrs_index(hrs_index),
   489     _humongous_type(NotHumongous), _humongous_start_region(NULL),
   490     _in_collection_set(false),
   491     _next_in_special_set(NULL), _orig_end(NULL),
   492     _claimed(InitialClaimValue), _evacuation_failed(false),
   493     _prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0),
   494     _young_type(NotYoung), _next_young_region(NULL),
   495     _next_dirty_cards_region(NULL), _next(NULL), _pending_removal(false),
   496 #ifdef ASSERT
   497     _containing_set(NULL),
   498 #endif // ASSERT
   499      _young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1),
   500     _rem_set(NULL), _recorded_rs_length(0), _predicted_elapsed_time_ms(0),
   501     _predicted_bytes_to_copy(0)
   502 {
   503   _orig_end = mr.end();
   504   // Note that initialize() will set the start of the unmarked area of the
   505   // region.
   506   hr_clear(false /*par*/, false /*clear_space*/);
   507   set_top(bottom());
   508   set_saved_mark();
   510   _rem_set =  new HeapRegionRemSet(sharedOffsetArray, this);
   512   assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant.");
   513 }
   515 CompactibleSpace* HeapRegion::next_compaction_space() const {
   516   // We're not using an iterator given that it will wrap around when
   517   // it reaches the last region and this is not what we want here.
   518   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   519   uint index = hrs_index() + 1;
   520   while (index < g1h->n_regions()) {
   521     HeapRegion* hr = g1h->region_at(index);
   522     if (!hr->isHumongous()) {
   523       return hr;
   524     }
   525     index += 1;
   526   }
   527   return NULL;
   528 }
   530 void HeapRegion::save_marks() {
   531   set_saved_mark();
   532 }
   534 void HeapRegion::oops_in_mr_iterate(MemRegion mr, ExtendedOopClosure* cl) {
   535   HeapWord* p = mr.start();
   536   HeapWord* e = mr.end();
   537   oop obj;
   538   while (p < e) {
   539     obj = oop(p);
   540     p += obj->oop_iterate(cl);
   541   }
   542   assert(p == e, "bad memregion: doesn't end on obj boundary");
   543 }
   545 #define HeapRegion_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
   546 void HeapRegion::oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \
   547   ContiguousSpace::oop_since_save_marks_iterate##nv_suffix(cl);              \
   548 }
   549 SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(HeapRegion_OOP_SINCE_SAVE_MARKS_DEFN)
   552 void HeapRegion::oop_before_save_marks_iterate(ExtendedOopClosure* cl) {
   553   oops_in_mr_iterate(MemRegion(bottom(), saved_mark_word()), cl);
   554 }
   556 void HeapRegion::note_self_forwarding_removal_start(bool during_initial_mark,
   557                                                     bool during_conc_mark) {
   558   // We always recreate the prev marking info and we'll explicitly
   559   // mark all objects we find to be self-forwarded on the prev
   560   // bitmap. So all objects need to be below PTAMS.
   561   _prev_top_at_mark_start = top();
   562   _prev_marked_bytes = 0;
   564   if (during_initial_mark) {
   565     // During initial-mark, we'll also explicitly mark all objects
   566     // we find to be self-forwarded on the next bitmap. So all
   567     // objects need to be below NTAMS.
   568     _next_top_at_mark_start = top();
   569     _next_marked_bytes = 0;
   570   } else if (during_conc_mark) {
   571     // During concurrent mark, all objects in the CSet (including
   572     // the ones we find to be self-forwarded) are implicitly live.
   573     // So all objects need to be above NTAMS.
   574     _next_top_at_mark_start = bottom();
   575     _next_marked_bytes = 0;
   576   }
   577 }
   579 void HeapRegion::note_self_forwarding_removal_end(bool during_initial_mark,
   580                                                   bool during_conc_mark,
   581                                                   size_t marked_bytes) {
   582   assert(0 <= marked_bytes && marked_bytes <= used(),
   583          err_msg("marked: "SIZE_FORMAT" used: "SIZE_FORMAT,
   584                  marked_bytes, used()));
   585   _prev_marked_bytes = marked_bytes;
   586 }
   588 HeapWord*
   589 HeapRegion::object_iterate_mem_careful(MemRegion mr,
   590                                                  ObjectClosure* cl) {
   591   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   592   // We used to use "block_start_careful" here.  But we're actually happy
   593   // to update the BOT while we do this...
   594   HeapWord* cur = block_start(mr.start());
   595   mr = mr.intersection(used_region());
   596   if (mr.is_empty()) return NULL;
   597   // Otherwise, find the obj that extends onto mr.start().
   599   assert(cur <= mr.start()
   600          && (oop(cur)->klass_or_null() == NULL ||
   601              cur + oop(cur)->size() > mr.start()),
   602          "postcondition of block_start");
   603   oop obj;
   604   while (cur < mr.end()) {
   605     obj = oop(cur);
   606     if (obj->klass_or_null() == NULL) {
   607       // Ran into an unparseable point.
   608       return cur;
   609     } else if (!g1h->is_obj_dead(obj)) {
   610       cl->do_object(obj);
   611     }
   612     if (cl->abort()) return cur;
   613     // The check above must occur before the operation below, since an
   614     // abort might invalidate the "size" operation.
   615     cur += obj->size();
   616   }
   617   return NULL;
   618 }
   620 HeapWord*
   621 HeapRegion::
   622 oops_on_card_seq_iterate_careful(MemRegion mr,
   623                                  FilterOutOfRegionClosure* cl,
   624                                  bool filter_young,
   625                                  jbyte* card_ptr) {
   626   // Currently, we should only have to clean the card if filter_young
   627   // is true and vice versa.
   628   if (filter_young) {
   629     assert(card_ptr != NULL, "pre-condition");
   630   } else {
   631     assert(card_ptr == NULL, "pre-condition");
   632   }
   633   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   635   // If we're within a stop-world GC, then we might look at a card in a
   636   // GC alloc region that extends onto a GC LAB, which may not be
   637   // parseable.  Stop such at the "saved_mark" of the region.
   638   if (g1h->is_gc_active()) {
   639     mr = mr.intersection(used_region_at_save_marks());
   640   } else {
   641     mr = mr.intersection(used_region());
   642   }
   643   if (mr.is_empty()) return NULL;
   644   // Otherwise, find the obj that extends onto mr.start().
   646   // The intersection of the incoming mr (for the card) and the
   647   // allocated part of the region is non-empty. This implies that
   648   // we have actually allocated into this region. The code in
   649   // G1CollectedHeap.cpp that allocates a new region sets the
   650   // is_young tag on the region before allocating. Thus we
   651   // safely know if this region is young.
   652   if (is_young() && filter_young) {
   653     return NULL;
   654   }
   656   assert(!is_young(), "check value of filter_young");
   658   // We can only clean the card here, after we make the decision that
   659   // the card is not young. And we only clean the card if we have been
   660   // asked to (i.e., card_ptr != NULL).
   661   if (card_ptr != NULL) {
   662     *card_ptr = CardTableModRefBS::clean_card_val();
   663     // We must complete this write before we do any of the reads below.
   664     OrderAccess::storeload();
   665   }
   667   // Cache the boundaries of the memory region in some const locals
   668   HeapWord* const start = mr.start();
   669   HeapWord* const end = mr.end();
   671   // We used to use "block_start_careful" here.  But we're actually happy
   672   // to update the BOT while we do this...
   673   HeapWord* cur = block_start(start);
   674   assert(cur <= start, "Postcondition");
   676   oop obj;
   678   HeapWord* next = cur;
   679   while (next <= start) {
   680     cur = next;
   681     obj = oop(cur);
   682     if (obj->klass_or_null() == NULL) {
   683       // Ran into an unparseable point.
   684       return cur;
   685     }
   686     // Otherwise...
   687     next = (cur + obj->size());
   688   }
   690   // If we finish the above loop...We have a parseable object that
   691   // begins on or before the start of the memory region, and ends
   692   // inside or spans the entire region.
   694   assert(obj == oop(cur), "sanity");
   695   assert(cur <= start &&
   696          obj->klass_or_null() != NULL &&
   697          (cur + obj->size()) > start,
   698          "Loop postcondition");
   700   if (!g1h->is_obj_dead(obj)) {
   701     obj->oop_iterate(cl, mr);
   702   }
   704   while (cur < end) {
   705     obj = oop(cur);
   706     if (obj->klass_or_null() == NULL) {
   707       // Ran into an unparseable point.
   708       return cur;
   709     };
   711     // Otherwise:
   712     next = (cur + obj->size());
   714     if (!g1h->is_obj_dead(obj)) {
   715       if (next < end || !obj->is_objArray()) {
   716         // This object either does not span the MemRegion
   717         // boundary, or if it does it's not an array.
   718         // Apply closure to whole object.
   719         obj->oop_iterate(cl);
   720       } else {
   721         // This obj is an array that spans the boundary.
   722         // Stop at the boundary.
   723         obj->oop_iterate(cl, mr);
   724       }
   725     }
   726     cur = next;
   727   }
   728   return NULL;
   729 }
   731 void HeapRegion::print() const { print_on(gclog_or_tty); }
   732 void HeapRegion::print_on(outputStream* st) const {
   733   if (isHumongous()) {
   734     if (startsHumongous())
   735       st->print(" HS");
   736     else
   737       st->print(" HC");
   738   } else {
   739     st->print("   ");
   740   }
   741   if (in_collection_set())
   742     st->print(" CS");
   743   else
   744     st->print("   ");
   745   if (is_young())
   746     st->print(is_survivor() ? " SU" : " Y ");
   747   else
   748     st->print("   ");
   749   if (is_empty())
   750     st->print(" F");
   751   else
   752     st->print("  ");
   753   st->print(" TS %5d", _gc_time_stamp);
   754   st->print(" PTAMS "PTR_FORMAT" NTAMS "PTR_FORMAT,
   755             prev_top_at_mark_start(), next_top_at_mark_start());
   756   G1OffsetTableContigSpace::print_on(st);
   757 }
   759 void HeapRegion::verify() const {
   760   bool dummy = false;
   761   verify(VerifyOption_G1UsePrevMarking, /* failures */ &dummy);
   762 }
   764 // This really ought to be commoned up into OffsetTableContigSpace somehow.
   765 // We would need a mechanism to make that code skip dead objects.
   767 void HeapRegion::verify(VerifyOption vo,
   768                         bool* failures) const {
   769   G1CollectedHeap* g1 = G1CollectedHeap::heap();
   770   *failures = false;
   771   HeapWord* p = bottom();
   772   HeapWord* prev_p = NULL;
   773   VerifyLiveClosure vl_cl(g1, vo);
   774   bool is_humongous = isHumongous();
   775   bool do_bot_verify = !is_young();
   776   size_t object_num = 0;
   777   while (p < top()) {
   778     oop obj = oop(p);
   779     size_t obj_size = obj->size();
   780     object_num += 1;
   782     if (is_humongous != g1->isHumongous(obj_size)) {
   783       gclog_or_tty->print_cr("obj "PTR_FORMAT" is of %shumongous size ("
   784                              SIZE_FORMAT" words) in a %shumongous region",
   785                              p, g1->isHumongous(obj_size) ? "" : "non-",
   786                              obj_size, is_humongous ? "" : "non-");
   787        *failures = true;
   788        return;
   789     }
   791     // If it returns false, verify_for_object() will output the
   792     // appropriate messasge.
   793     if (do_bot_verify && !_offsets.verify_for_object(p, obj_size)) {
   794       *failures = true;
   795       return;
   796     }
   798     if (!g1->is_obj_dead_cond(obj, this, vo)) {
   799       if (obj->is_oop()) {
   800         Klass* klass = obj->klass();
   801         if (!klass->is_metadata()) {
   802           gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" "
   803                                  "not metadata", klass, obj);
   804           *failures = true;
   805           return;
   806         } else if (!klass->is_klass()) {
   807           gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" "
   808                                  "not a klass", klass, obj);
   809           *failures = true;
   810           return;
   811         } else {
   812           vl_cl.set_containing_obj(obj);
   813           obj->oop_iterate_no_header(&vl_cl);
   814           if (vl_cl.failures()) {
   815             *failures = true;
   816           }
   817           if (G1MaxVerifyFailures >= 0 &&
   818               vl_cl.n_failures() >= G1MaxVerifyFailures) {
   819             return;
   820           }
   821         }
   822       } else {
   823         gclog_or_tty->print_cr(PTR_FORMAT" no an oop", obj);
   824         *failures = true;
   825         return;
   826       }
   827     }
   828     prev_p = p;
   829     p += obj_size;
   830   }
   832   if (p != top()) {
   833     gclog_or_tty->print_cr("end of last object "PTR_FORMAT" "
   834                            "does not match top "PTR_FORMAT, p, top());
   835     *failures = true;
   836     return;
   837   }
   839   HeapWord* the_end = end();
   840   assert(p == top(), "it should still hold");
   841   // Do some extra BOT consistency checking for addresses in the
   842   // range [top, end). BOT look-ups in this range should yield
   843   // top. No point in doing that if top == end (there's nothing there).
   844   if (p < the_end) {
   845     // Look up top
   846     HeapWord* addr_1 = p;
   847     HeapWord* b_start_1 = _offsets.block_start_const(addr_1);
   848     if (b_start_1 != p) {
   849       gclog_or_tty->print_cr("BOT look up for top: "PTR_FORMAT" "
   850                              " yielded "PTR_FORMAT", expecting "PTR_FORMAT,
   851                              addr_1, b_start_1, p);
   852       *failures = true;
   853       return;
   854     }
   856     // Look up top + 1
   857     HeapWord* addr_2 = p + 1;
   858     if (addr_2 < the_end) {
   859       HeapWord* b_start_2 = _offsets.block_start_const(addr_2);
   860       if (b_start_2 != p) {
   861         gclog_or_tty->print_cr("BOT look up for top + 1: "PTR_FORMAT" "
   862                                " yielded "PTR_FORMAT", expecting "PTR_FORMAT,
   863                                addr_2, b_start_2, p);
   864         *failures = true;
   865         return;
   866       }
   867     }
   869     // Look up an address between top and end
   870     size_t diff = pointer_delta(the_end, p) / 2;
   871     HeapWord* addr_3 = p + diff;
   872     if (addr_3 < the_end) {
   873       HeapWord* b_start_3 = _offsets.block_start_const(addr_3);
   874       if (b_start_3 != p) {
   875         gclog_or_tty->print_cr("BOT look up for top + diff: "PTR_FORMAT" "
   876                                " yielded "PTR_FORMAT", expecting "PTR_FORMAT,
   877                                addr_3, b_start_3, p);
   878         *failures = true;
   879         return;
   880       }
   881     }
   883     // Loook up end - 1
   884     HeapWord* addr_4 = the_end - 1;
   885     HeapWord* b_start_4 = _offsets.block_start_const(addr_4);
   886     if (b_start_4 != p) {
   887       gclog_or_tty->print_cr("BOT look up for end - 1: "PTR_FORMAT" "
   888                              " yielded "PTR_FORMAT", expecting "PTR_FORMAT,
   889                              addr_4, b_start_4, p);
   890       *failures = true;
   891       return;
   892     }
   893   }
   895   if (is_humongous && object_num > 1) {
   896     gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] is humongous "
   897                            "but has "SIZE_FORMAT", objects",
   898                            bottom(), end(), object_num);
   899     *failures = true;
   900     return;
   901   }
   902 }
   904 // G1OffsetTableContigSpace code; copied from space.cpp.  Hope this can go
   905 // away eventually.
   907 void G1OffsetTableContigSpace::clear(bool mangle_space) {
   908   ContiguousSpace::clear(mangle_space);
   909   _offsets.zero_bottom_entry();
   910   _offsets.initialize_threshold();
   911 }
   913 void G1OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) {
   914   Space::set_bottom(new_bottom);
   915   _offsets.set_bottom(new_bottom);
   916 }
   918 void G1OffsetTableContigSpace::set_end(HeapWord* new_end) {
   919   Space::set_end(new_end);
   920   _offsets.resize(new_end - bottom());
   921 }
   923 void G1OffsetTableContigSpace::print() const {
   924   print_short();
   925   gclog_or_tty->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", "
   926                 INTPTR_FORMAT ", " INTPTR_FORMAT ")",
   927                 bottom(), top(), _offsets.threshold(), end());
   928 }
   930 HeapWord* G1OffsetTableContigSpace::initialize_threshold() {
   931   return _offsets.initialize_threshold();
   932 }
   934 HeapWord* G1OffsetTableContigSpace::cross_threshold(HeapWord* start,
   935                                                     HeapWord* end) {
   936   _offsets.alloc_block(start, end);
   937   return _offsets.threshold();
   938 }
   940 HeapWord* G1OffsetTableContigSpace::saved_mark_word() const {
   941   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   942   assert( _gc_time_stamp <= g1h->get_gc_time_stamp(), "invariant" );
   943   if (_gc_time_stamp < g1h->get_gc_time_stamp())
   944     return top();
   945   else
   946     return ContiguousSpace::saved_mark_word();
   947 }
   949 void G1OffsetTableContigSpace::set_saved_mark() {
   950   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   951   unsigned curr_gc_time_stamp = g1h->get_gc_time_stamp();
   953   if (_gc_time_stamp < curr_gc_time_stamp) {
   954     // The order of these is important, as another thread might be
   955     // about to start scanning this region. If it does so after
   956     // set_saved_mark and before _gc_time_stamp = ..., then the latter
   957     // will be false, and it will pick up top() as the high water mark
   958     // of region. If it does so after _gc_time_stamp = ..., then it
   959     // will pick up the right saved_mark_word() as the high water mark
   960     // of the region. Either way, the behaviour will be correct.
   961     ContiguousSpace::set_saved_mark();
   962     OrderAccess::storestore();
   963     _gc_time_stamp = curr_gc_time_stamp;
   964     // No need to do another barrier to flush the writes above. If
   965     // this is called in parallel with other threads trying to
   966     // allocate into the region, the caller should call this while
   967     // holding a lock and when the lock is released the writes will be
   968     // flushed.
   969   }
   970 }
   972 G1OffsetTableContigSpace::
   973 G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
   974                          MemRegion mr) :
   975   _offsets(sharedOffsetArray, mr),
   976   _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true),
   977   _gc_time_stamp(0)
   978 {
   979   _offsets.set_space(this);
   980   // false ==> we'll do the clearing if there's clearing to be done.
   981   ContiguousSpace::initialize(mr, false, SpaceDecorator::Mangle);
   982   _offsets.zero_bottom_entry();
   983   _offsets.initialize_threshold();
   984 }

mercurial