src/share/vm/gc_implementation/g1/heapRegion.cpp

Mon, 21 Jul 2014 10:00:31 +0200

author
tschatzl
date
Mon, 21 Jul 2014 10:00:31 +0200
changeset 7009
3f2894c5052e
parent 6992
2c6ef90f030a
child 7050
6701abbc4441
permissions
-rw-r--r--

8048112: G1 Full GC needs to support the case when the very first region is not available
Summary: Refactor preparation for compaction during Full GC so that it lazily initializes the first compaction point. This also avoids problems later when the first region may not be committed. Also reviewed by K. Barrett.
Reviewed-by: brutisso

     1 /*
     2  * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "code/nmethod.hpp"
    27 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
    28 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
    29 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
    30 #include "gc_implementation/g1/heapRegion.inline.hpp"
    31 #include "gc_implementation/g1/heapRegionRemSet.hpp"
    32 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
    33 #include "gc_implementation/shared/liveRange.hpp"
    34 #include "memory/genOopClosures.inline.hpp"
    35 #include "memory/iterator.hpp"
    36 #include "memory/space.inline.hpp"
    37 #include "oops/oop.inline.hpp"
    38 #include "runtime/orderAccess.inline.hpp"
    40 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
    42 int    HeapRegion::LogOfHRGrainBytes = 0;
    43 int    HeapRegion::LogOfHRGrainWords = 0;
    44 size_t HeapRegion::GrainBytes        = 0;
    45 size_t HeapRegion::GrainWords        = 0;
    46 size_t HeapRegion::CardsPerRegion    = 0;
    48 HeapRegionDCTOC::HeapRegionDCTOC(G1CollectedHeap* g1,
    49                                  HeapRegion* hr, ExtendedOopClosure* cl,
    50                                  CardTableModRefBS::PrecisionStyle precision,
    51                                  FilterKind fk) :
    52   DirtyCardToOopClosure(hr, cl, precision, NULL),
    53   _hr(hr), _fk(fk), _g1(g1) { }
    55 FilterOutOfRegionClosure::FilterOutOfRegionClosure(HeapRegion* r,
    56                                                    OopClosure* oc) :
    57   _r_bottom(r->bottom()), _r_end(r->end()), _oc(oc) { }
    59 template<class ClosureType>
    60 HeapWord* walk_mem_region_loop(ClosureType* cl, G1CollectedHeap* g1h,
    61                                HeapRegion* hr,
    62                                HeapWord* cur, HeapWord* top) {
    63   oop cur_oop = oop(cur);
    64   size_t oop_size = hr->block_size(cur);
    65   HeapWord* next_obj = cur + oop_size;
    66   while (next_obj < top) {
    67     // Keep filtering the remembered set.
    68     if (!g1h->is_obj_dead(cur_oop, hr)) {
    69       // Bottom lies entirely below top, so we can call the
    70       // non-memRegion version of oop_iterate below.
    71       cur_oop->oop_iterate(cl);
    72     }
    73     cur = next_obj;
    74     cur_oop = oop(cur);
    75     oop_size = hr->block_size(cur);
    76     next_obj = cur + oop_size;
    77   }
    78   return cur;
    79 }
    81 void HeapRegionDCTOC::walk_mem_region(MemRegion mr,
    82                                       HeapWord* bottom,
    83                                       HeapWord* top) {
    84   G1CollectedHeap* g1h = _g1;
    85   size_t oop_size;
    86   ExtendedOopClosure* cl2 = NULL;
    88   FilterIntoCSClosure intoCSFilt(this, g1h, _cl);
    89   FilterOutOfRegionClosure outOfRegionFilt(_hr, _cl);
    91   switch (_fk) {
    92   case NoFilterKind:          cl2 = _cl; break;
    93   case IntoCSFilterKind:      cl2 = &intoCSFilt; break;
    94   case OutOfRegionFilterKind: cl2 = &outOfRegionFilt; break;
    95   default:                    ShouldNotReachHere();
    96   }
    98   // Start filtering what we add to the remembered set. If the object is
    99   // not considered dead, either because it is marked (in the mark bitmap)
   100   // or it was allocated after marking finished, then we add it. Otherwise
   101   // we can safely ignore the object.
   102   if (!g1h->is_obj_dead(oop(bottom), _hr)) {
   103     oop_size = oop(bottom)->oop_iterate(cl2, mr);
   104   } else {
   105     oop_size = _hr->block_size(bottom);
   106   }
   108   bottom += oop_size;
   110   if (bottom < top) {
   111     // We replicate the loop below for several kinds of possible filters.
   112     switch (_fk) {
   113     case NoFilterKind:
   114       bottom = walk_mem_region_loop(_cl, g1h, _hr, bottom, top);
   115       break;
   117     case IntoCSFilterKind: {
   118       FilterIntoCSClosure filt(this, g1h, _cl);
   119       bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top);
   120       break;
   121     }
   123     case OutOfRegionFilterKind: {
   124       FilterOutOfRegionClosure filt(_hr, _cl);
   125       bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top);
   126       break;
   127     }
   129     default:
   130       ShouldNotReachHere();
   131     }
   133     // Last object. Need to do dead-obj filtering here too.
   134     if (!g1h->is_obj_dead(oop(bottom), _hr)) {
   135       oop(bottom)->oop_iterate(cl2, mr);
   136     }
   137   }
   138 }
   140 // Minimum region size; we won't go lower than that.
   141 // We might want to decrease this in the future, to deal with small
   142 // heaps a bit more efficiently.
   143 #define MIN_REGION_SIZE  (      1024 * 1024 )
   145 // Maximum region size; we don't go higher than that. There's a good
   146 // reason for having an upper bound. We don't want regions to get too
   147 // large, otherwise cleanup's effectiveness would decrease as there
   148 // will be fewer opportunities to find totally empty regions after
   149 // marking.
   150 #define MAX_REGION_SIZE  ( 32 * 1024 * 1024 )
   152 // The automatic region size calculation will try to have around this
   153 // many regions in the heap (based on the min heap size).
   154 #define TARGET_REGION_NUMBER          2048
   156 size_t HeapRegion::max_region_size() {
   157   return (size_t)MAX_REGION_SIZE;
   158 }
   160 void HeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size) {
   161   uintx region_size = G1HeapRegionSize;
   162   if (FLAG_IS_DEFAULT(G1HeapRegionSize)) {
   163     size_t average_heap_size = (initial_heap_size + max_heap_size) / 2;
   164     region_size = MAX2(average_heap_size / TARGET_REGION_NUMBER,
   165                        (uintx) MIN_REGION_SIZE);
   166   }
   168   int region_size_log = log2_long((jlong) region_size);
   169   // Recalculate the region size to make sure it's a power of
   170   // 2. This means that region_size is the largest power of 2 that's
   171   // <= what we've calculated so far.
   172   region_size = ((uintx)1 << region_size_log);
   174   // Now make sure that we don't go over or under our limits.
   175   if (region_size < MIN_REGION_SIZE) {
   176     region_size = MIN_REGION_SIZE;
   177   } else if (region_size > MAX_REGION_SIZE) {
   178     region_size = MAX_REGION_SIZE;
   179   }
   181   // And recalculate the log.
   182   region_size_log = log2_long((jlong) region_size);
   184   // Now, set up the globals.
   185   guarantee(LogOfHRGrainBytes == 0, "we should only set it once");
   186   LogOfHRGrainBytes = region_size_log;
   188   guarantee(LogOfHRGrainWords == 0, "we should only set it once");
   189   LogOfHRGrainWords = LogOfHRGrainBytes - LogHeapWordSize;
   191   guarantee(GrainBytes == 0, "we should only set it once");
   192   // The cast to int is safe, given that we've bounded region_size by
   193   // MIN_REGION_SIZE and MAX_REGION_SIZE.
   194   GrainBytes = (size_t)region_size;
   196   guarantee(GrainWords == 0, "we should only set it once");
   197   GrainWords = GrainBytes >> LogHeapWordSize;
   198   guarantee((size_t) 1 << LogOfHRGrainWords == GrainWords, "sanity");
   200   guarantee(CardsPerRegion == 0, "we should only set it once");
   201   CardsPerRegion = GrainBytes >> CardTableModRefBS::card_shift;
   202 }
   204 void HeapRegion::reset_after_compaction() {
   205   G1OffsetTableContigSpace::reset_after_compaction();
   206   // After a compaction the mark bitmap is invalid, so we must
   207   // treat all objects as being inside the unmarked area.
   208   zero_marked_bytes();
   209   init_top_at_mark_start();
   210 }
   212 void HeapRegion::hr_clear(bool par, bool clear_space, bool locked) {
   213   assert(_humongous_type == NotHumongous,
   214          "we should have already filtered out humongous regions");
   215   assert(_humongous_start_region == NULL,
   216          "we should have already filtered out humongous regions");
   217   assert(_end == _orig_end,
   218          "we should have already filtered out humongous regions");
   220   _in_collection_set = false;
   222   set_young_index_in_cset(-1);
   223   uninstall_surv_rate_group();
   224   set_young_type(NotYoung);
   225   reset_pre_dummy_top();
   227   if (!par) {
   228     // If this is parallel, this will be done later.
   229     HeapRegionRemSet* hrrs = rem_set();
   230     if (locked) {
   231       hrrs->clear_locked();
   232     } else {
   233       hrrs->clear();
   234     }
   235     _claimed = InitialClaimValue;
   236   }
   237   zero_marked_bytes();
   239   _offsets.resize(HeapRegion::GrainWords);
   240   init_top_at_mark_start();
   241   if (clear_space) clear(SpaceDecorator::Mangle);
   242 }
   244 void HeapRegion::par_clear() {
   245   assert(used() == 0, "the region should have been already cleared");
   246   assert(capacity() == HeapRegion::GrainBytes, "should be back to normal");
   247   HeapRegionRemSet* hrrs = rem_set();
   248   hrrs->clear();
   249   CardTableModRefBS* ct_bs =
   250                    (CardTableModRefBS*)G1CollectedHeap::heap()->barrier_set();
   251   ct_bs->clear(MemRegion(bottom(), end()));
   252 }
   254 void HeapRegion::calc_gc_efficiency() {
   255   // GC efficiency is the ratio of how much space would be
   256   // reclaimed over how long we predict it would take to reclaim it.
   257   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   258   G1CollectorPolicy* g1p = g1h->g1_policy();
   260   // Retrieve a prediction of the elapsed time for this region for
   261   // a mixed gc because the region will only be evacuated during a
   262   // mixed gc.
   263   double region_elapsed_time_ms =
   264     g1p->predict_region_elapsed_time_ms(this, false /* for_young_gc */);
   265   _gc_efficiency = (double) reclaimable_bytes() / region_elapsed_time_ms;
   266 }
   268 void HeapRegion::set_startsHumongous(HeapWord* new_top, HeapWord* new_end) {
   269   assert(!isHumongous(), "sanity / pre-condition");
   270   assert(end() == _orig_end,
   271          "Should be normal before the humongous object allocation");
   272   assert(top() == bottom(), "should be empty");
   273   assert(bottom() <= new_top && new_top <= new_end, "pre-condition");
   275   _humongous_type = StartsHumongous;
   276   _humongous_start_region = this;
   278   set_end(new_end);
   279   _offsets.set_for_starts_humongous(new_top);
   280 }
   282 void HeapRegion::set_continuesHumongous(HeapRegion* first_hr) {
   283   assert(!isHumongous(), "sanity / pre-condition");
   284   assert(end() == _orig_end,
   285          "Should be normal before the humongous object allocation");
   286   assert(top() == bottom(), "should be empty");
   287   assert(first_hr->startsHumongous(), "pre-condition");
   289   _humongous_type = ContinuesHumongous;
   290   _humongous_start_region = first_hr;
   291 }
   293 void HeapRegion::set_notHumongous() {
   294   assert(isHumongous(), "pre-condition");
   296   if (startsHumongous()) {
   297     assert(top() <= end(), "pre-condition");
   298     set_end(_orig_end);
   299     if (top() > end()) {
   300       // at least one "continues humongous" region after it
   301       set_top(end());
   302     }
   303   } else {
   304     // continues humongous
   305     assert(end() == _orig_end, "sanity");
   306   }
   308   assert(capacity() == HeapRegion::GrainBytes, "pre-condition");
   309   _humongous_type = NotHumongous;
   310   _humongous_start_region = NULL;
   311 }
   313 bool HeapRegion::claimHeapRegion(jint claimValue) {
   314   jint current = _claimed;
   315   if (current != claimValue) {
   316     jint res = Atomic::cmpxchg(claimValue, &_claimed, current);
   317     if (res == current) {
   318       return true;
   319     }
   320   }
   321   return false;
   322 }
   324 HeapWord* HeapRegion::next_block_start_careful(HeapWord* addr) {
   325   HeapWord* low = addr;
   326   HeapWord* high = end();
   327   while (low < high) {
   328     size_t diff = pointer_delta(high, low);
   329     // Must add one below to bias toward the high amount.  Otherwise, if
   330   // "high" were at the desired value, and "low" were one less, we
   331     // would not converge on "high".  This is not symmetric, because
   332     // we set "high" to a block start, which might be the right one,
   333     // which we don't do for "low".
   334     HeapWord* middle = low + (diff+1)/2;
   335     if (middle == high) return high;
   336     HeapWord* mid_bs = block_start_careful(middle);
   337     if (mid_bs < addr) {
   338       low = middle;
   339     } else {
   340       high = mid_bs;
   341     }
   342   }
   343   assert(low == high && low >= addr, "Didn't work.");
   344   return low;
   345 }
   347 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
   348 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
   349 #endif // _MSC_VER
   352 HeapRegion::HeapRegion(uint hrs_index,
   353                        G1BlockOffsetSharedArray* sharedOffsetArray,
   354                        MemRegion mr) :
   355     G1OffsetTableContigSpace(sharedOffsetArray, mr),
   356     _hrs_index(hrs_index),
   357     _humongous_type(NotHumongous), _humongous_start_region(NULL),
   358     _in_collection_set(false),
   359     _next_in_special_set(NULL), _orig_end(NULL),
   360     _claimed(InitialClaimValue), _evacuation_failed(false),
   361     _prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0),
   362     _young_type(NotYoung), _next_young_region(NULL),
   363     _next_dirty_cards_region(NULL), _next(NULL), _prev(NULL), _pending_removal(false),
   364 #ifdef ASSERT
   365     _containing_set(NULL),
   366 #endif // ASSERT
   367      _young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1),
   368     _rem_set(NULL), _recorded_rs_length(0), _predicted_elapsed_time_ms(0),
   369     _predicted_bytes_to_copy(0)
   370 {
   371   _rem_set = new HeapRegionRemSet(sharedOffsetArray, this);
   372   _orig_end = mr.end();
   373   // Note that initialize() will set the start of the unmarked area of the
   374   // region.
   375   hr_clear(false /*par*/, false /*clear_space*/);
   376   set_top(bottom());
   377   record_top_and_timestamp();
   379   assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant.");
   380 }
   382 CompactibleSpace* HeapRegion::next_compaction_space() const {
   383   return G1CollectedHeap::heap()->next_compaction_region(this);
   384 }
   386 void HeapRegion::note_self_forwarding_removal_start(bool during_initial_mark,
   387                                                     bool during_conc_mark) {
   388   // We always recreate the prev marking info and we'll explicitly
   389   // mark all objects we find to be self-forwarded on the prev
   390   // bitmap. So all objects need to be below PTAMS.
   391   _prev_marked_bytes = 0;
   393   if (during_initial_mark) {
   394     // During initial-mark, we'll also explicitly mark all objects
   395     // we find to be self-forwarded on the next bitmap. So all
   396     // objects need to be below NTAMS.
   397     _next_top_at_mark_start = top();
   398     _next_marked_bytes = 0;
   399   } else if (during_conc_mark) {
   400     // During concurrent mark, all objects in the CSet (including
   401     // the ones we find to be self-forwarded) are implicitly live.
   402     // So all objects need to be above NTAMS.
   403     _next_top_at_mark_start = bottom();
   404     _next_marked_bytes = 0;
   405   }
   406 }
   408 void HeapRegion::note_self_forwarding_removal_end(bool during_initial_mark,
   409                                                   bool during_conc_mark,
   410                                                   size_t marked_bytes) {
   411   assert(0 <= marked_bytes && marked_bytes <= used(),
   412          err_msg("marked: "SIZE_FORMAT" used: "SIZE_FORMAT,
   413                  marked_bytes, used()));
   414   _prev_top_at_mark_start = top();
   415   _prev_marked_bytes = marked_bytes;
   416 }
   418 HeapWord*
   419 HeapRegion::object_iterate_mem_careful(MemRegion mr,
   420                                                  ObjectClosure* cl) {
   421   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   422   // We used to use "block_start_careful" here.  But we're actually happy
   423   // to update the BOT while we do this...
   424   HeapWord* cur = block_start(mr.start());
   425   mr = mr.intersection(used_region());
   426   if (mr.is_empty()) return NULL;
   427   // Otherwise, find the obj that extends onto mr.start().
   429   assert(cur <= mr.start()
   430          && (oop(cur)->klass_or_null() == NULL ||
   431              cur + oop(cur)->size() > mr.start()),
   432          "postcondition of block_start");
   433   oop obj;
   434   while (cur < mr.end()) {
   435     obj = oop(cur);
   436     if (obj->klass_or_null() == NULL) {
   437       // Ran into an unparseable point.
   438       return cur;
   439     } else if (!g1h->is_obj_dead(obj)) {
   440       cl->do_object(obj);
   441     }
   442     if (cl->abort()) return cur;
   443     // The check above must occur before the operation below, since an
   444     // abort might invalidate the "size" operation.
   445     cur += block_size(cur);
   446   }
   447   return NULL;
   448 }
   450 HeapWord*
   451 HeapRegion::
   452 oops_on_card_seq_iterate_careful(MemRegion mr,
   453                                  FilterOutOfRegionClosure* cl,
   454                                  bool filter_young,
   455                                  jbyte* card_ptr) {
   456   // Currently, we should only have to clean the card if filter_young
   457   // is true and vice versa.
   458   if (filter_young) {
   459     assert(card_ptr != NULL, "pre-condition");
   460   } else {
   461     assert(card_ptr == NULL, "pre-condition");
   462   }
   463   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   465   // If we're within a stop-world GC, then we might look at a card in a
   466   // GC alloc region that extends onto a GC LAB, which may not be
   467   // parseable.  Stop such at the "saved_mark" of the region.
   468   if (g1h->is_gc_active()) {
   469     mr = mr.intersection(used_region_at_save_marks());
   470   } else {
   471     mr = mr.intersection(used_region());
   472   }
   473   if (mr.is_empty()) return NULL;
   474   // Otherwise, find the obj that extends onto mr.start().
   476   // The intersection of the incoming mr (for the card) and the
   477   // allocated part of the region is non-empty. This implies that
   478   // we have actually allocated into this region. The code in
   479   // G1CollectedHeap.cpp that allocates a new region sets the
   480   // is_young tag on the region before allocating. Thus we
   481   // safely know if this region is young.
   482   if (is_young() && filter_young) {
   483     return NULL;
   484   }
   486   assert(!is_young(), "check value of filter_young");
   488   // We can only clean the card here, after we make the decision that
   489   // the card is not young. And we only clean the card if we have been
   490   // asked to (i.e., card_ptr != NULL).
   491   if (card_ptr != NULL) {
   492     *card_ptr = CardTableModRefBS::clean_card_val();
   493     // We must complete this write before we do any of the reads below.
   494     OrderAccess::storeload();
   495   }
   497   // Cache the boundaries of the memory region in some const locals
   498   HeapWord* const start = mr.start();
   499   HeapWord* const end = mr.end();
   501   // We used to use "block_start_careful" here.  But we're actually happy
   502   // to update the BOT while we do this...
   503   HeapWord* cur = block_start(start);
   504   assert(cur <= start, "Postcondition");
   506   oop obj;
   508   HeapWord* next = cur;
   509   while (next <= start) {
   510     cur = next;
   511     obj = oop(cur);
   512     if (obj->klass_or_null() == NULL) {
   513       // Ran into an unparseable point.
   514       return cur;
   515     }
   516     // Otherwise...
   517     next = cur + block_size(cur);
   518   }
   520   // If we finish the above loop...We have a parseable object that
   521   // begins on or before the start of the memory region, and ends
   522   // inside or spans the entire region.
   524   assert(obj == oop(cur), "sanity");
   525   assert(cur <= start, "Loop postcondition");
   526   assert(obj->klass_or_null() != NULL, "Loop postcondition");
   527   assert((cur + block_size(cur)) > start, "Loop postcondition");
   529   if (!g1h->is_obj_dead(obj)) {
   530     obj->oop_iterate(cl, mr);
   531   }
   533   while (cur < end) {
   534     obj = oop(cur);
   535     if (obj->klass_or_null() == NULL) {
   536       // Ran into an unparseable point.
   537       return cur;
   538     };
   540     // Otherwise:
   541     next = cur + block_size(cur);
   543     if (!g1h->is_obj_dead(obj)) {
   544       if (next < end || !obj->is_objArray()) {
   545         // This object either does not span the MemRegion
   546         // boundary, or if it does it's not an array.
   547         // Apply closure to whole object.
   548         obj->oop_iterate(cl);
   549       } else {
   550         // This obj is an array that spans the boundary.
   551         // Stop at the boundary.
   552         obj->oop_iterate(cl, mr);
   553       }
   554     }
   555     cur = next;
   556   }
   557   return NULL;
   558 }
   560 // Code roots support
   562 void HeapRegion::add_strong_code_root(nmethod* nm) {
   563   HeapRegionRemSet* hrrs = rem_set();
   564   hrrs->add_strong_code_root(nm);
   565 }
   567 void HeapRegion::remove_strong_code_root(nmethod* nm) {
   568   HeapRegionRemSet* hrrs = rem_set();
   569   hrrs->remove_strong_code_root(nm);
   570 }
   572 void HeapRegion::migrate_strong_code_roots() {
   573   assert(in_collection_set(), "only collection set regions");
   574   assert(!isHumongous(),
   575           err_msg("humongous region "HR_FORMAT" should not have been added to collection set",
   576                   HR_FORMAT_PARAMS(this)));
   578   HeapRegionRemSet* hrrs = rem_set();
   579   hrrs->migrate_strong_code_roots();
   580 }
   582 void HeapRegion::strong_code_roots_do(CodeBlobClosure* blk) const {
   583   HeapRegionRemSet* hrrs = rem_set();
   584   hrrs->strong_code_roots_do(blk);
   585 }
   587 class VerifyStrongCodeRootOopClosure: public OopClosure {
   588   const HeapRegion* _hr;
   589   nmethod* _nm;
   590   bool _failures;
   591   bool _has_oops_in_region;
   593   template <class T> void do_oop_work(T* p) {
   594     T heap_oop = oopDesc::load_heap_oop(p);
   595     if (!oopDesc::is_null(heap_oop)) {
   596       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
   598       // Note: not all the oops embedded in the nmethod are in the
   599       // current region. We only look at those which are.
   600       if (_hr->is_in(obj)) {
   601         // Object is in the region. Check that its less than top
   602         if (_hr->top() <= (HeapWord*)obj) {
   603           // Object is above top
   604           gclog_or_tty->print_cr("Object "PTR_FORMAT" in region "
   605                                  "["PTR_FORMAT", "PTR_FORMAT") is above "
   606                                  "top "PTR_FORMAT,
   607                                  (void *)obj, _hr->bottom(), _hr->end(), _hr->top());
   608           _failures = true;
   609           return;
   610         }
   611         // Nmethod has at least one oop in the current region
   612         _has_oops_in_region = true;
   613       }
   614     }
   615   }
   617 public:
   618   VerifyStrongCodeRootOopClosure(const HeapRegion* hr, nmethod* nm):
   619     _hr(hr), _failures(false), _has_oops_in_region(false) {}
   621   void do_oop(narrowOop* p) { do_oop_work(p); }
   622   void do_oop(oop* p)       { do_oop_work(p); }
   624   bool failures()           { return _failures; }
   625   bool has_oops_in_region() { return _has_oops_in_region; }
   626 };
   628 class VerifyStrongCodeRootCodeBlobClosure: public CodeBlobClosure {
   629   const HeapRegion* _hr;
   630   bool _failures;
   631 public:
   632   VerifyStrongCodeRootCodeBlobClosure(const HeapRegion* hr) :
   633     _hr(hr), _failures(false) {}
   635   void do_code_blob(CodeBlob* cb) {
   636     nmethod* nm = (cb == NULL) ? NULL : cb->as_nmethod_or_null();
   637     if (nm != NULL) {
   638       // Verify that the nemthod is live
   639       if (!nm->is_alive()) {
   640         gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] has dead nmethod "
   641                                PTR_FORMAT" in its strong code roots",
   642                                _hr->bottom(), _hr->end(), nm);
   643         _failures = true;
   644       } else {
   645         VerifyStrongCodeRootOopClosure oop_cl(_hr, nm);
   646         nm->oops_do(&oop_cl);
   647         if (!oop_cl.has_oops_in_region()) {
   648           gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] has nmethod "
   649                                  PTR_FORMAT" in its strong code roots "
   650                                  "with no pointers into region",
   651                                  _hr->bottom(), _hr->end(), nm);
   652           _failures = true;
   653         } else if (oop_cl.failures()) {
   654           gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] has other "
   655                                  "failures for nmethod "PTR_FORMAT,
   656                                  _hr->bottom(), _hr->end(), nm);
   657           _failures = true;
   658         }
   659       }
   660     }
   661   }
   663   bool failures()       { return _failures; }
   664 };
   666 void HeapRegion::verify_strong_code_roots(VerifyOption vo, bool* failures) const {
   667   if (!G1VerifyHeapRegionCodeRoots) {
   668     // We're not verifying code roots.
   669     return;
   670   }
   671   if (vo == VerifyOption_G1UseMarkWord) {
   672     // Marking verification during a full GC is performed after class
   673     // unloading, code cache unloading, etc so the strong code roots
   674     // attached to each heap region are in an inconsistent state. They won't
   675     // be consistent until the strong code roots are rebuilt after the
   676     // actual GC. Skip verifying the strong code roots in this particular
   677     // time.
   678     assert(VerifyDuringGC, "only way to get here");
   679     return;
   680   }
   682   HeapRegionRemSet* hrrs = rem_set();
   683   size_t strong_code_roots_length = hrrs->strong_code_roots_list_length();
   685   // if this region is empty then there should be no entries
   686   // on its strong code root list
   687   if (is_empty()) {
   688     if (strong_code_roots_length > 0) {
   689       gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] is empty "
   690                              "but has "SIZE_FORMAT" code root entries",
   691                              bottom(), end(), strong_code_roots_length);
   692       *failures = true;
   693     }
   694     return;
   695   }
   697   if (continuesHumongous()) {
   698     if (strong_code_roots_length > 0) {
   699       gclog_or_tty->print_cr("region "HR_FORMAT" is a continuation of a humongous "
   700                              "region but has "SIZE_FORMAT" code root entries",
   701                              HR_FORMAT_PARAMS(this), strong_code_roots_length);
   702       *failures = true;
   703     }
   704     return;
   705   }
   707   VerifyStrongCodeRootCodeBlobClosure cb_cl(this);
   708   strong_code_roots_do(&cb_cl);
   710   if (cb_cl.failures()) {
   711     *failures = true;
   712   }
   713 }
   715 void HeapRegion::print() const { print_on(gclog_or_tty); }
   716 void HeapRegion::print_on(outputStream* st) const {
   717   if (isHumongous()) {
   718     if (startsHumongous())
   719       st->print(" HS");
   720     else
   721       st->print(" HC");
   722   } else {
   723     st->print("   ");
   724   }
   725   if (in_collection_set())
   726     st->print(" CS");
   727   else
   728     st->print("   ");
   729   if (is_young())
   730     st->print(is_survivor() ? " SU" : " Y ");
   731   else
   732     st->print("   ");
   733   if (is_empty())
   734     st->print(" F");
   735   else
   736     st->print("  ");
   737   st->print(" TS %5d", _gc_time_stamp);
   738   st->print(" PTAMS "PTR_FORMAT" NTAMS "PTR_FORMAT,
   739             prev_top_at_mark_start(), next_top_at_mark_start());
   740   G1OffsetTableContigSpace::print_on(st);
   741 }
   743 class VerifyLiveClosure: public OopClosure {
   744 private:
   745   G1CollectedHeap* _g1h;
   746   CardTableModRefBS* _bs;
   747   oop _containing_obj;
   748   bool _failures;
   749   int _n_failures;
   750   VerifyOption _vo;
   751 public:
   752   // _vo == UsePrevMarking -> use "prev" marking information,
   753   // _vo == UseNextMarking -> use "next" marking information,
   754   // _vo == UseMarkWord    -> use mark word from object header.
   755   VerifyLiveClosure(G1CollectedHeap* g1h, VerifyOption vo) :
   756     _g1h(g1h), _bs(NULL), _containing_obj(NULL),
   757     _failures(false), _n_failures(0), _vo(vo)
   758   {
   759     BarrierSet* bs = _g1h->barrier_set();
   760     if (bs->is_a(BarrierSet::CardTableModRef))
   761       _bs = (CardTableModRefBS*)bs;
   762   }
   764   void set_containing_obj(oop obj) {
   765     _containing_obj = obj;
   766   }
   768   bool failures() { return _failures; }
   769   int n_failures() { return _n_failures; }
   771   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
   772   virtual void do_oop(      oop* p) { do_oop_work(p); }
   774   void print_object(outputStream* out, oop obj) {
   775 #ifdef PRODUCT
   776     Klass* k = obj->klass();
   777     const char* class_name = InstanceKlass::cast(k)->external_name();
   778     out->print_cr("class name %s", class_name);
   779 #else // PRODUCT
   780     obj->print_on(out);
   781 #endif // PRODUCT
   782   }
   784   template <class T>
   785   void do_oop_work(T* p) {
   786     assert(_containing_obj != NULL, "Precondition");
   787     assert(!_g1h->is_obj_dead_cond(_containing_obj, _vo),
   788            "Precondition");
   789     T heap_oop = oopDesc::load_heap_oop(p);
   790     if (!oopDesc::is_null(heap_oop)) {
   791       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
   792       bool failed = false;
   793       if (!_g1h->is_in_closed_subset(obj) || _g1h->is_obj_dead_cond(obj, _vo)) {
   794         MutexLockerEx x(ParGCRareEvent_lock,
   795                         Mutex::_no_safepoint_check_flag);
   797         if (!_failures) {
   798           gclog_or_tty->cr();
   799           gclog_or_tty->print_cr("----------");
   800         }
   801         if (!_g1h->is_in_closed_subset(obj)) {
   802           HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
   803           gclog_or_tty->print_cr("Field "PTR_FORMAT
   804                                  " of live obj "PTR_FORMAT" in region "
   805                                  "["PTR_FORMAT", "PTR_FORMAT")",
   806                                  p, (void*) _containing_obj,
   807                                  from->bottom(), from->end());
   808           print_object(gclog_or_tty, _containing_obj);
   809           gclog_or_tty->print_cr("points to obj "PTR_FORMAT" not in the heap",
   810                                  (void*) obj);
   811         } else {
   812           HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
   813           HeapRegion* to   = _g1h->heap_region_containing((HeapWord*)obj);
   814           gclog_or_tty->print_cr("Field "PTR_FORMAT
   815                                  " of live obj "PTR_FORMAT" in region "
   816                                  "["PTR_FORMAT", "PTR_FORMAT")",
   817                                  p, (void*) _containing_obj,
   818                                  from->bottom(), from->end());
   819           print_object(gclog_or_tty, _containing_obj);
   820           gclog_or_tty->print_cr("points to dead obj "PTR_FORMAT" in region "
   821                                  "["PTR_FORMAT", "PTR_FORMAT")",
   822                                  (void*) obj, to->bottom(), to->end());
   823           print_object(gclog_or_tty, obj);
   824         }
   825         gclog_or_tty->print_cr("----------");
   826         gclog_or_tty->flush();
   827         _failures = true;
   828         failed = true;
   829         _n_failures++;
   830       }
   832       if (!_g1h->full_collection() || G1VerifyRSetsDuringFullGC) {
   833         HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
   834         HeapRegion* to   = _g1h->heap_region_containing(obj);
   835         if (from != NULL && to != NULL &&
   836             from != to &&
   837             !to->isHumongous()) {
   838           jbyte cv_obj = *_bs->byte_for_const(_containing_obj);
   839           jbyte cv_field = *_bs->byte_for_const(p);
   840           const jbyte dirty = CardTableModRefBS::dirty_card_val();
   842           bool is_bad = !(from->is_young()
   843                           || to->rem_set()->contains_reference(p)
   844                           || !G1HRRSFlushLogBuffersOnVerify && // buffers were not flushed
   845                               (_containing_obj->is_objArray() ?
   846                                   cv_field == dirty
   847                                : cv_obj == dirty || cv_field == dirty));
   848           if (is_bad) {
   849             MutexLockerEx x(ParGCRareEvent_lock,
   850                             Mutex::_no_safepoint_check_flag);
   852             if (!_failures) {
   853               gclog_or_tty->cr();
   854               gclog_or_tty->print_cr("----------");
   855             }
   856             gclog_or_tty->print_cr("Missing rem set entry:");
   857             gclog_or_tty->print_cr("Field "PTR_FORMAT" "
   858                                    "of obj "PTR_FORMAT", "
   859                                    "in region "HR_FORMAT,
   860                                    p, (void*) _containing_obj,
   861                                    HR_FORMAT_PARAMS(from));
   862             _containing_obj->print_on(gclog_or_tty);
   863             gclog_or_tty->print_cr("points to obj "PTR_FORMAT" "
   864                                    "in region "HR_FORMAT,
   865                                    (void*) obj,
   866                                    HR_FORMAT_PARAMS(to));
   867             obj->print_on(gclog_or_tty);
   868             gclog_or_tty->print_cr("Obj head CTE = %d, field CTE = %d.",
   869                           cv_obj, cv_field);
   870             gclog_or_tty->print_cr("----------");
   871             gclog_or_tty->flush();
   872             _failures = true;
   873             if (!failed) _n_failures++;
   874           }
   875         }
   876       }
   877     }
   878   }
   879 };
   881 // This really ought to be commoned up into OffsetTableContigSpace somehow.
   882 // We would need a mechanism to make that code skip dead objects.
   884 void HeapRegion::verify(VerifyOption vo,
   885                         bool* failures) const {
   886   G1CollectedHeap* g1 = G1CollectedHeap::heap();
   887   *failures = false;
   888   HeapWord* p = bottom();
   889   HeapWord* prev_p = NULL;
   890   VerifyLiveClosure vl_cl(g1, vo);
   891   bool is_humongous = isHumongous();
   892   bool do_bot_verify = !is_young();
   893   size_t object_num = 0;
   894   while (p < top()) {
   895     oop obj = oop(p);
   896     size_t obj_size = block_size(p);
   897     object_num += 1;
   899     if (is_humongous != g1->isHumongous(obj_size) &&
   900         !g1->is_obj_dead(obj, this)) { // Dead objects may have bigger block_size since they span several objects.
   901       gclog_or_tty->print_cr("obj "PTR_FORMAT" is of %shumongous size ("
   902                              SIZE_FORMAT" words) in a %shumongous region",
   903                              p, g1->isHumongous(obj_size) ? "" : "non-",
   904                              obj_size, is_humongous ? "" : "non-");
   905        *failures = true;
   906        return;
   907     }
   909     // If it returns false, verify_for_object() will output the
   910     // appropriate messasge.
   911     if (do_bot_verify &&
   912         !g1->is_obj_dead(obj, this) &&
   913         !_offsets.verify_for_object(p, obj_size)) {
   914       *failures = true;
   915       return;
   916     }
   918     if (!g1->is_obj_dead_cond(obj, this, vo)) {
   919       if (obj->is_oop()) {
   920         Klass* klass = obj->klass();
   921         bool is_metaspace_object = Metaspace::contains(klass) ||
   922                                    (vo == VerifyOption_G1UsePrevMarking &&
   923                                    ClassLoaderDataGraph::unload_list_contains(klass));
   924         if (!is_metaspace_object) {
   925           gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" "
   926                                  "not metadata", klass, (void *)obj);
   927           *failures = true;
   928           return;
   929         } else if (!klass->is_klass()) {
   930           gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" "
   931                                  "not a klass", klass, (void *)obj);
   932           *failures = true;
   933           return;
   934         } else {
   935           vl_cl.set_containing_obj(obj);
   936           obj->oop_iterate_no_header(&vl_cl);
   937           if (vl_cl.failures()) {
   938             *failures = true;
   939           }
   940           if (G1MaxVerifyFailures >= 0 &&
   941               vl_cl.n_failures() >= G1MaxVerifyFailures) {
   942             return;
   943           }
   944         }
   945       } else {
   946         gclog_or_tty->print_cr(PTR_FORMAT" no an oop", (void *)obj);
   947         *failures = true;
   948         return;
   949       }
   950     }
   951     prev_p = p;
   952     p += obj_size;
   953   }
   955   if (p != top()) {
   956     gclog_or_tty->print_cr("end of last object "PTR_FORMAT" "
   957                            "does not match top "PTR_FORMAT, p, top());
   958     *failures = true;
   959     return;
   960   }
   962   HeapWord* the_end = end();
   963   assert(p == top(), "it should still hold");
   964   // Do some extra BOT consistency checking for addresses in the
   965   // range [top, end). BOT look-ups in this range should yield
   966   // top. No point in doing that if top == end (there's nothing there).
   967   if (p < the_end) {
   968     // Look up top
   969     HeapWord* addr_1 = p;
   970     HeapWord* b_start_1 = _offsets.block_start_const(addr_1);
   971     if (b_start_1 != p) {
   972       gclog_or_tty->print_cr("BOT look up for top: "PTR_FORMAT" "
   973                              " yielded "PTR_FORMAT", expecting "PTR_FORMAT,
   974                              addr_1, b_start_1, p);
   975       *failures = true;
   976       return;
   977     }
   979     // Look up top + 1
   980     HeapWord* addr_2 = p + 1;
   981     if (addr_2 < the_end) {
   982       HeapWord* b_start_2 = _offsets.block_start_const(addr_2);
   983       if (b_start_2 != p) {
   984         gclog_or_tty->print_cr("BOT look up for top + 1: "PTR_FORMAT" "
   985                                " yielded "PTR_FORMAT", expecting "PTR_FORMAT,
   986                                addr_2, b_start_2, p);
   987         *failures = true;
   988         return;
   989       }
   990     }
   992     // Look up an address between top and end
   993     size_t diff = pointer_delta(the_end, p) / 2;
   994     HeapWord* addr_3 = p + diff;
   995     if (addr_3 < the_end) {
   996       HeapWord* b_start_3 = _offsets.block_start_const(addr_3);
   997       if (b_start_3 != p) {
   998         gclog_or_tty->print_cr("BOT look up for top + diff: "PTR_FORMAT" "
   999                                " yielded "PTR_FORMAT", expecting "PTR_FORMAT,
  1000                                addr_3, b_start_3, p);
  1001         *failures = true;
  1002         return;
  1006     // Loook up end - 1
  1007     HeapWord* addr_4 = the_end - 1;
  1008     HeapWord* b_start_4 = _offsets.block_start_const(addr_4);
  1009     if (b_start_4 != p) {
  1010       gclog_or_tty->print_cr("BOT look up for end - 1: "PTR_FORMAT" "
  1011                              " yielded "PTR_FORMAT", expecting "PTR_FORMAT,
  1012                              addr_4, b_start_4, p);
  1013       *failures = true;
  1014       return;
  1018   if (is_humongous && object_num > 1) {
  1019     gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] is humongous "
  1020                            "but has "SIZE_FORMAT", objects",
  1021                            bottom(), end(), object_num);
  1022     *failures = true;
  1023     return;
  1026   verify_strong_code_roots(vo, failures);
  1029 void HeapRegion::verify() const {
  1030   bool dummy = false;
  1031   verify(VerifyOption_G1UsePrevMarking, /* failures */ &dummy);
  1034 // G1OffsetTableContigSpace code; copied from space.cpp.  Hope this can go
  1035 // away eventually.
  1037 void G1OffsetTableContigSpace::clear(bool mangle_space) {
  1038   set_top(bottom());
  1039   set_saved_mark_word(bottom());
  1040   CompactibleSpace::clear(mangle_space);
  1041   _offsets.zero_bottom_entry();
  1042   _offsets.initialize_threshold();
  1045 void G1OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) {
  1046   Space::set_bottom(new_bottom);
  1047   _offsets.set_bottom(new_bottom);
  1050 void G1OffsetTableContigSpace::set_end(HeapWord* new_end) {
  1051   Space::set_end(new_end);
  1052   _offsets.resize(new_end - bottom());
  1055 void G1OffsetTableContigSpace::print() const {
  1056   print_short();
  1057   gclog_or_tty->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", "
  1058                 INTPTR_FORMAT ", " INTPTR_FORMAT ")",
  1059                 bottom(), top(), _offsets.threshold(), end());
  1062 HeapWord* G1OffsetTableContigSpace::initialize_threshold() {
  1063   return _offsets.initialize_threshold();
  1066 HeapWord* G1OffsetTableContigSpace::cross_threshold(HeapWord* start,
  1067                                                     HeapWord* end) {
  1068   _offsets.alloc_block(start, end);
  1069   return _offsets.threshold();
  1072 HeapWord* G1OffsetTableContigSpace::saved_mark_word() const {
  1073   G1CollectedHeap* g1h = G1CollectedHeap::heap();
  1074   assert( _gc_time_stamp <= g1h->get_gc_time_stamp(), "invariant" );
  1075   if (_gc_time_stamp < g1h->get_gc_time_stamp())
  1076     return top();
  1077   else
  1078     return Space::saved_mark_word();
  1081 void G1OffsetTableContigSpace::record_top_and_timestamp() {
  1082   G1CollectedHeap* g1h = G1CollectedHeap::heap();
  1083   unsigned curr_gc_time_stamp = g1h->get_gc_time_stamp();
  1085   if (_gc_time_stamp < curr_gc_time_stamp) {
  1086     // The order of these is important, as another thread might be
  1087     // about to start scanning this region. If it does so after
  1088     // set_saved_mark and before _gc_time_stamp = ..., then the latter
  1089     // will be false, and it will pick up top() as the high water mark
  1090     // of region. If it does so after _gc_time_stamp = ..., then it
  1091     // will pick up the right saved_mark_word() as the high water mark
  1092     // of the region. Either way, the behaviour will be correct.
  1093     Space::set_saved_mark_word(top());
  1094     OrderAccess::storestore();
  1095     _gc_time_stamp = curr_gc_time_stamp;
  1096     // No need to do another barrier to flush the writes above. If
  1097     // this is called in parallel with other threads trying to
  1098     // allocate into the region, the caller should call this while
  1099     // holding a lock and when the lock is released the writes will be
  1100     // flushed.
  1104 void G1OffsetTableContigSpace::safe_object_iterate(ObjectClosure* blk) {
  1105   object_iterate(blk);
  1108 void G1OffsetTableContigSpace::object_iterate(ObjectClosure* blk) {
  1109   HeapWord* p = bottom();
  1110   while (p < top()) {
  1111     if (block_is_obj(p)) {
  1112       blk->do_object(oop(p));
  1114     p += block_size(p);
  1118 #define block_is_always_obj(q) true
  1119 void G1OffsetTableContigSpace::prepare_for_compaction(CompactPoint* cp) {
  1120   SCAN_AND_FORWARD(cp, top, block_is_always_obj, block_size);
  1122 #undef block_is_always_obj
  1124 G1OffsetTableContigSpace::
  1125 G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
  1126                          MemRegion mr) :
  1127   _offsets(sharedOffsetArray, mr),
  1128   _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true),
  1129   _gc_time_stamp(0)
  1131   _offsets.set_space(this);
  1132   // false ==> we'll do the clearing if there's clearing to be done.
  1133   CompactibleSpace::initialize(mr, false, SpaceDecorator::Mangle);
  1134   _top = bottom();
  1135   _offsets.zero_bottom_entry();
  1136   _offsets.initialize_threshold();

mercurial