Merge

Thu, 25 Jun 2009 22:01:08 -0700

author
trims
date
Thu, 25 Jun 2009 22:01:08 -0700
changeset 1248
f9c95d5dc41f
parent 1240
8754a3c37762
parent 1247
85d0690f7d12
child 1249
32c83fb84370

Merge

     1.1 --- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Thu Jun 25 12:09:48 2009 -0700
     1.2 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Thu Jun 25 22:01:08 2009 -0700
     1.3 @@ -1157,6 +1157,13 @@
     1.4    } else {
     1.5      // We're done with marking.
     1.6      JavaThread::satb_mark_queue_set().set_active_all_threads(false);
     1.7 +
     1.8 +    if (VerifyDuringGC) {
     1.9 +      g1h->prepare_for_verify();
    1.10 +      g1h->verify(/* allow_dirty */      true,
    1.11 +                  /* silent */           false,
    1.12 +                  /* use_prev_marking */ false);
    1.13 +    }
    1.14    }
    1.15  
    1.16  #if VERIFY_OBJS_PROCESSED
    1.17 @@ -1747,12 +1754,12 @@
    1.18    // races with it goes around and waits for completeCleanup to finish.
    1.19    g1h->increment_total_collections();
    1.20  
    1.21 -#ifndef PRODUCT
    1.22    if (VerifyDuringGC) {
    1.23 -    G1CollectedHeap::heap()->prepare_for_verify();
    1.24 -    G1CollectedHeap::heap()->verify(true,false);
    1.25 +    g1h->prepare_for_verify();
    1.26 +    g1h->verify(/* allow_dirty */      true,
    1.27 +                /* silent */           false,
    1.28 +                /* use_prev_marking */ true);
    1.29    }
    1.30 -#endif
    1.31  }
    1.32  
    1.33  void ConcurrentMark::completeCleanup() {
     2.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Thu Jun 25 12:09:48 2009 -0700
     2.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Thu Jun 25 22:01:08 2009 -0700
     2.3 @@ -1535,6 +1535,15 @@
     2.4    guarantee(_hrs != NULL, "Couldn't allocate HeapRegionSeq");
     2.5    guarantee(_cur_alloc_region == NULL, "from constructor");
     2.6  
     2.7 +  // 6843694 - ensure that the maximum region index can fit
     2.8 +  // in the remembered set structures.
     2.9 +  const size_t max_region_idx = ((size_t)1 << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
    2.10 +  guarantee((max_regions() - 1) <= max_region_idx, "too many regions");
    2.11 +
    2.12 +  const size_t cards_per_region = HeapRegion::GrainBytes >> CardTableModRefBS::card_shift;
    2.13 +  size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
    2.14 +  guarantee(cards_per_region < max_cards_per_region, "too many cards per region");
    2.15 +
    2.16    _bot_shared = new G1BlockOffsetSharedArray(_reserved,
    2.17                                               heap_word_size(init_byte_size));
    2.18  
    2.19 @@ -2127,17 +2136,22 @@
    2.20  };
    2.21  
    2.22  class VerifyObjsInRegionClosure: public ObjectClosure {
    2.23 +private:
    2.24    G1CollectedHeap* _g1h;
    2.25    size_t _live_bytes;
    2.26    HeapRegion *_hr;
    2.27 +  bool _use_prev_marking;
    2.28  public:
    2.29 -  VerifyObjsInRegionClosure(HeapRegion *hr) : _live_bytes(0), _hr(hr) {
    2.30 +  // use_prev_marking == true  -> use "prev" marking information,
    2.31 +  // use_prev_marking == false -> use "next" marking information
    2.32 +  VerifyObjsInRegionClosure(HeapRegion *hr, bool use_prev_marking)
    2.33 +    : _live_bytes(0), _hr(hr), _use_prev_marking(use_prev_marking) {
    2.34      _g1h = G1CollectedHeap::heap();
    2.35    }
    2.36    void do_object(oop o) {
    2.37      VerifyLivenessOopClosure isLive(_g1h);
    2.38      assert(o != NULL, "Huh?");
    2.39 -    if (!_g1h->is_obj_dead(o)) {
    2.40 +    if (!_g1h->is_obj_dead_cond(o, _use_prev_marking)) {
    2.41        o->oop_iterate(&isLive);
    2.42        if (!_hr->obj_allocated_since_prev_marking(o))
    2.43          _live_bytes += (o->size() * HeapWordSize);
    2.44 @@ -2176,17 +2190,22 @@
    2.45  };
    2.46  
    2.47  class VerifyRegionClosure: public HeapRegionClosure {
    2.48 -public:
    2.49 +private:
    2.50    bool _allow_dirty;
    2.51    bool _par;
    2.52 -  VerifyRegionClosure(bool allow_dirty, bool par = false)
    2.53 -    : _allow_dirty(allow_dirty), _par(par) {}
    2.54 +  bool _use_prev_marking;
    2.55 +public:
    2.56 +  // use_prev_marking == true  -> use "prev" marking information,
    2.57 +  // use_prev_marking == false -> use "next" marking information
    2.58 +  VerifyRegionClosure(bool allow_dirty, bool par, bool use_prev_marking)
    2.59 +    : _allow_dirty(allow_dirty), _par(par),
    2.60 +      _use_prev_marking(use_prev_marking) {}
    2.61    bool doHeapRegion(HeapRegion* r) {
    2.62      guarantee(_par || r->claim_value() == HeapRegion::InitialClaimValue,
    2.63                "Should be unclaimed at verify points.");
    2.64      if (!r->continuesHumongous()) {
    2.65 -      VerifyObjsInRegionClosure not_dead_yet_cl(r);
    2.66 -      r->verify(_allow_dirty);
    2.67 +      VerifyObjsInRegionClosure not_dead_yet_cl(r, _use_prev_marking);
    2.68 +      r->verify(_allow_dirty, _use_prev_marking);
    2.69        r->object_iterate(&not_dead_yet_cl);
    2.70        guarantee(r->max_live_bytes() >= not_dead_yet_cl.live_bytes(),
    2.71                  "More live objects than counted in last complete marking.");
    2.72 @@ -2199,10 +2218,13 @@
    2.73  private:
    2.74    G1CollectedHeap* _g1h;
    2.75    bool             _failures;
    2.76 -
    2.77 +  bool             _use_prev_marking;
    2.78  public:
    2.79 -  VerifyRootsClosure() :
    2.80 -    _g1h(G1CollectedHeap::heap()), _failures(false) { }
    2.81 +  // use_prev_marking == true  -> use "prev" marking information,
    2.82 +  // use_prev_marking == false -> use "next" marking information
    2.83 +  VerifyRootsClosure(bool use_prev_marking) :
    2.84 +    _g1h(G1CollectedHeap::heap()), _failures(false),
    2.85 +    _use_prev_marking(use_prev_marking) { }
    2.86  
    2.87    bool failures() { return _failures; }
    2.88  
    2.89 @@ -2213,7 +2235,7 @@
    2.90    void do_oop(oop* p) {
    2.91      oop obj = *p;
    2.92      if (obj != NULL) {
    2.93 -      if (_g1h->is_obj_dead(obj)) {
    2.94 +      if (_g1h->is_obj_dead_cond(obj, _use_prev_marking)) {
    2.95          gclog_or_tty->print_cr("Root location "PTR_FORMAT" "
    2.96                                 "points to dead obj "PTR_FORMAT, p, (void*) obj);
    2.97          obj->print_on(gclog_or_tty);
    2.98 @@ -2229,24 +2251,35 @@
    2.99  private:
   2.100    G1CollectedHeap* _g1h;
   2.101    bool _allow_dirty;
   2.102 +  bool _use_prev_marking;
   2.103  
   2.104  public:
   2.105 -  G1ParVerifyTask(G1CollectedHeap* g1h, bool allow_dirty) :
   2.106 +  // use_prev_marking == true  -> use "prev" marking information,
   2.107 +  // use_prev_marking == false -> use "next" marking information
   2.108 +  G1ParVerifyTask(G1CollectedHeap* g1h, bool allow_dirty,
   2.109 +                  bool use_prev_marking) :
   2.110      AbstractGangTask("Parallel verify task"),
   2.111 -    _g1h(g1h), _allow_dirty(allow_dirty) { }
   2.112 +    _g1h(g1h), _allow_dirty(allow_dirty),
   2.113 +    _use_prev_marking(use_prev_marking) { }
   2.114  
   2.115    void work(int worker_i) {
   2.116      HandleMark hm;
   2.117 -    VerifyRegionClosure blk(_allow_dirty, true);
   2.118 +    VerifyRegionClosure blk(_allow_dirty, true, _use_prev_marking);
   2.119      _g1h->heap_region_par_iterate_chunked(&blk, worker_i,
   2.120                                            HeapRegion::ParVerifyClaimValue);
   2.121    }
   2.122  };
   2.123  
   2.124  void G1CollectedHeap::verify(bool allow_dirty, bool silent) {
   2.125 +  verify(allow_dirty, silent, /* use_prev_marking */ true);
   2.126 +}
   2.127 +
   2.128 +void G1CollectedHeap::verify(bool allow_dirty,
   2.129 +                             bool silent,
   2.130 +                             bool use_prev_marking) {
   2.131    if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
   2.132      if (!silent) { gclog_or_tty->print("roots "); }
   2.133 -    VerifyRootsClosure rootsCl;
   2.134 +    VerifyRootsClosure rootsCl(use_prev_marking);
   2.135      process_strong_roots(false,
   2.136                           SharedHeap::SO_AllClasses,
   2.137                           &rootsCl,
   2.138 @@ -2257,7 +2290,7 @@
   2.139        assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
   2.140               "sanity check");
   2.141  
   2.142 -      G1ParVerifyTask task(this, allow_dirty);
   2.143 +      G1ParVerifyTask task(this, allow_dirty, use_prev_marking);
   2.144        int n_workers = workers()->total_workers();
   2.145        set_par_threads(n_workers);
   2.146        workers()->run_task(&task);
   2.147 @@ -2271,7 +2304,7 @@
   2.148        assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
   2.149               "sanity check");
   2.150      } else {
   2.151 -      VerifyRegionClosure blk(allow_dirty);
   2.152 +      VerifyRegionClosure blk(allow_dirty, false, use_prev_marking);
   2.153        _hrs->iterate(&blk);
   2.154      }
   2.155      if (!silent) gclog_or_tty->print("remset ");
     3.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Thu Jun 25 12:09:48 2009 -0700
     3.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Thu Jun 25 22:01:08 2009 -0700
     3.3 @@ -59,6 +59,9 @@
     3.4  typedef GenericTaskQueue<oop*>    RefToScanQueue;
     3.5  typedef GenericTaskQueueSet<oop*> RefToScanQueueSet;
     3.6  
     3.7 +typedef int RegionIdx_t;   // needs to hold [ 0..max_regions() )
     3.8 +typedef int CardIdx_t;     // needs to hold [ 0..CardsPerRegion )
     3.9 +
    3.10  enum G1GCThreadGroups {
    3.11    G1CRGroup = 0,
    3.12    G1ZFGroup = 1,
    3.13 @@ -1046,6 +1049,17 @@
    3.14    virtual void prepare_for_verify();
    3.15  
    3.16    // Perform verification.
    3.17 +
    3.18 +  // use_prev_marking == true  -> use "prev" marking information,
    3.19 +  // use_prev_marking == false -> use "next" marking information
    3.20 +  // NOTE: Only the "prev" marking information is guaranteed to be
    3.21 +  // consistent most of the time, so most calls to this should use
    3.22 +  // use_prev_marking == true. Currently, there is only one case where
    3.23 +  // this is called with use_prev_marking == false, which is to verify
    3.24 +  // the "next" marking information at the end of remark.
    3.25 +  void verify(bool allow_dirty, bool silent, bool use_prev_marking);
    3.26 +
    3.27 +  // Override; it uses the "prev" marking information
    3.28    virtual void verify(bool allow_dirty, bool silent);
    3.29    virtual void print() const;
    3.30    virtual void print_on(outputStream* st) const;
    3.31 @@ -1122,6 +1136,18 @@
    3.32    bool isMarkedPrev(oop obj) const;
    3.33    bool isMarkedNext(oop obj) const;
    3.34  
    3.35 +  // use_prev_marking == true  -> use "prev" marking information,
    3.36 +  // use_prev_marking == false -> use "next" marking information
    3.37 +  bool is_obj_dead_cond(const oop obj,
    3.38 +                        const HeapRegion* hr,
    3.39 +                        const bool use_prev_marking) const {
    3.40 +    if (use_prev_marking) {
    3.41 +      return is_obj_dead(obj, hr);
    3.42 +    } else {
    3.43 +      return is_obj_ill(obj, hr);
    3.44 +    }
    3.45 +  }
    3.46 +
    3.47    // Determine if an object is dead, given the object and also
    3.48    // the region to which the object belongs. An object is dead
    3.49    // iff a) it was not allocated since the last mark and b) it
    3.50 @@ -1159,8 +1185,19 @@
    3.51    // Added if it is in permanent gen it isn't dead.
    3.52    // Added if it is NULL it isn't dead.
    3.53  
    3.54 -  bool is_obj_dead(oop obj) {
    3.55 -    HeapRegion* hr = heap_region_containing(obj);
    3.56 +  // use_prev_marking == true  -> use "prev" marking information,
    3.57 +  // use_prev_marking == false -> use "next" marking information
    3.58 +  bool is_obj_dead_cond(const oop obj,
    3.59 +                        const bool use_prev_marking) {
    3.60 +    if (use_prev_marking) {
    3.61 +      return is_obj_dead(obj);
    3.62 +    } else {
    3.63 +      return is_obj_ill(obj);
    3.64 +    }
    3.65 +  }
    3.66 +
    3.67 +  bool is_obj_dead(const oop obj) {
    3.68 +    const HeapRegion* hr = heap_region_containing(obj);
    3.69      if (hr == NULL) {
    3.70        if (Universe::heap()->is_in_permanent(obj))
    3.71          return false;
    3.72 @@ -1170,8 +1207,8 @@
    3.73      else return is_obj_dead(obj, hr);
    3.74    }
    3.75  
    3.76 -  bool is_obj_ill(oop obj) {
    3.77 -    HeapRegion* hr = heap_region_containing(obj);
    3.78 +  bool is_obj_ill(const oop obj) {
    3.79 +    const HeapRegion* hr = heap_region_containing(obj);
    3.80      if (hr == NULL) {
    3.81        if (Universe::heap()->is_in_permanent(obj))
    3.82          return false;
     4.1 --- a/src/share/vm/gc_implementation/g1/heapRegion.cpp	Thu Jun 25 12:09:48 2009 -0700
     4.2 +++ b/src/share/vm/gc_implementation/g1/heapRegion.cpp	Thu Jun 25 22:01:08 2009 -0700
     4.3 @@ -40,15 +40,19 @@
     4.4  {}
     4.5  
     4.6  class VerifyLiveClosure: public OopClosure {
     4.7 +private:
     4.8    G1CollectedHeap* _g1h;
     4.9    CardTableModRefBS* _bs;
    4.10    oop _containing_obj;
    4.11    bool _failures;
    4.12    int _n_failures;
    4.13 +  bool _use_prev_marking;
    4.14  public:
    4.15 -  VerifyLiveClosure(G1CollectedHeap* g1h) :
    4.16 +  // use_prev_marking == true  -> use "prev" marking information,
    4.17 +  // use_prev_marking == false -> use "next" marking information
    4.18 +  VerifyLiveClosure(G1CollectedHeap* g1h, bool use_prev_marking) :
    4.19      _g1h(g1h), _bs(NULL), _containing_obj(NULL),
    4.20 -    _failures(false), _n_failures(0)
    4.21 +    _failures(false), _n_failures(0), _use_prev_marking(use_prev_marking)
    4.22    {
    4.23      BarrierSet* bs = _g1h->barrier_set();
    4.24      if (bs->is_a(BarrierSet::CardTableModRef))
    4.25 @@ -68,11 +72,13 @@
    4.26  
    4.27    void do_oop(oop* p) {
    4.28      assert(_containing_obj != NULL, "Precondition");
    4.29 -    assert(!_g1h->is_obj_dead(_containing_obj), "Precondition");
    4.30 +    assert(!_g1h->is_obj_dead_cond(_containing_obj, _use_prev_marking),
    4.31 +           "Precondition");
    4.32      oop obj = *p;
    4.33      if (obj != NULL) {
    4.34        bool failed = false;
    4.35 -      if (!_g1h->is_in_closed_subset(obj) || _g1h->is_obj_dead(obj)) {
    4.36 +      if (!_g1h->is_in_closed_subset(obj) ||
    4.37 +          _g1h->is_obj_dead_cond(obj, _use_prev_marking)) {
    4.38          if (!_failures) {
    4.39            gclog_or_tty->print_cr("");
    4.40            gclog_or_tty->print_cr("----------");
    4.41 @@ -647,19 +653,23 @@
    4.42    G1OffsetTableContigSpace::print_on(st);
    4.43  }
    4.44  
    4.45 +void HeapRegion::verify(bool allow_dirty) const {
    4.46 +  verify(allow_dirty, /* use_prev_marking */ true);
    4.47 +}
    4.48 +
    4.49  #define OBJ_SAMPLE_INTERVAL 0
    4.50  #define BLOCK_SAMPLE_INTERVAL 100
    4.51  
    4.52  // This really ought to be commoned up into OffsetTableContigSpace somehow.
    4.53  // We would need a mechanism to make that code skip dead objects.
    4.54  
    4.55 -void HeapRegion::verify(bool allow_dirty) const {
    4.56 +void HeapRegion::verify(bool allow_dirty, bool use_prev_marking) const {
    4.57    G1CollectedHeap* g1 = G1CollectedHeap::heap();
    4.58    HeapWord* p = bottom();
    4.59    HeapWord* prev_p = NULL;
    4.60    int objs = 0;
    4.61    int blocks = 0;
    4.62 -  VerifyLiveClosure vl_cl(g1);
    4.63 +  VerifyLiveClosure vl_cl(g1, use_prev_marking);
    4.64    while (p < top()) {
    4.65      size_t size = oop(p)->size();
    4.66      if (blocks == BLOCK_SAMPLE_INTERVAL) {
    4.67 @@ -671,7 +681,7 @@
    4.68      }
    4.69      if (objs == OBJ_SAMPLE_INTERVAL) {
    4.70        oop obj = oop(p);
    4.71 -      if (!g1->is_obj_dead(obj, this)) {
    4.72 +      if (!g1->is_obj_dead_cond(obj, this, use_prev_marking)) {
    4.73          obj->verify();
    4.74          vl_cl.set_containing_obj(obj);
    4.75          obj->oop_iterate(&vl_cl);
     5.1 --- a/src/share/vm/gc_implementation/g1/heapRegion.hpp	Thu Jun 25 12:09:48 2009 -0700
     5.2 +++ b/src/share/vm/gc_implementation/g1/heapRegion.hpp	Thu Jun 25 22:01:08 2009 -0700
     5.3 @@ -782,7 +782,16 @@
     5.4    void print() const;
     5.5    void print_on(outputStream* st) const;
     5.6  
     5.7 -  // Override
     5.8 +  // use_prev_marking == true  -> use "prev" marking information,
     5.9 +  // use_prev_marking == false -> use "next" marking information
    5.10 +  // NOTE: Only the "prev" marking information is guaranteed to be
    5.11 +  // consistent most of the time, so most calls to this should use
    5.12 +  // use_prev_marking == true. Currently, there is only one case where
    5.13 +  // this is called with use_prev_marking == false, which is to verify
    5.14 +  // the "next" marking information at the end of remark.
    5.15 +  void verify(bool allow_dirty, bool use_prev_marking) const;
    5.16 +
    5.17 +  // Override; it uses the "prev" marking information
    5.18    virtual void verify(bool allow_dirty) const;
    5.19  
    5.20  #ifdef DEBUG
     6.1 --- a/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Thu Jun 25 12:09:48 2009 -0700
     6.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Thu Jun 25 22:01:08 2009 -0700
     6.3 @@ -109,7 +109,7 @@
     6.4      return new PerRegionTable(hr);
     6.5    }
     6.6  
     6.7 -  void add_card_work(short from_card, bool par) {
     6.8 +  void add_card_work(CardIdx_t from_card, bool par) {
     6.9      if (!_bm.at(from_card)) {
    6.10        if (par) {
    6.11          if (_bm.par_at_put(from_card, 1)) {
    6.12 @@ -141,11 +141,11 @@
    6.13      // and adding a bit to the new table is never incorrect.
    6.14      if (loc_hr->is_in_reserved(from)) {
    6.15        size_t hw_offset = pointer_delta((HeapWord*)from, loc_hr->bottom());
    6.16 -      size_t from_card =
    6.17 -        hw_offset >>
    6.18 -        (CardTableModRefBS::card_shift - LogHeapWordSize);
    6.19 +      CardIdx_t from_card = (CardIdx_t)
    6.20 +          hw_offset >> (CardTableModRefBS::card_shift - LogHeapWordSize);
    6.21  
    6.22 -      add_card_work((short) from_card, par);
    6.23 +      assert(0 <= from_card && from_card < CardsPerRegion, "Must be in range.");
    6.24 +      add_card_work(from_card, par);
    6.25      }
    6.26    }
    6.27  
    6.28 @@ -190,11 +190,11 @@
    6.29  #endif
    6.30    }
    6.31  
    6.32 -  void add_card(short from_card_index) {
    6.33 +  void add_card(CardIdx_t from_card_index) {
    6.34      add_card_work(from_card_index, /*parallel*/ true);
    6.35    }
    6.36  
    6.37 -  void seq_add_card(short from_card_index) {
    6.38 +  void seq_add_card(CardIdx_t from_card_index) {
    6.39      add_card_work(from_card_index, /*parallel*/ false);
    6.40    }
    6.41  
    6.42 @@ -604,7 +604,7 @@
    6.43  
    6.44    // Note that this may be a continued H region.
    6.45    HeapRegion* from_hr = _g1h->heap_region_containing_raw(from);
    6.46 -  size_t from_hrs_ind = (size_t)from_hr->hrs_index();
    6.47 +  RegionIdx_t from_hrs_ind = (RegionIdx_t) from_hr->hrs_index();
    6.48  
    6.49    // If the region is already coarsened, return.
    6.50    if (_coarse_map.at(from_hrs_ind)) {
    6.51 @@ -627,11 +627,11 @@
    6.52        uintptr_t from_hr_bot_card_index =
    6.53          uintptr_t(from_hr->bottom())
    6.54            >> CardTableModRefBS::card_shift;
    6.55 -      int card_index = from_card - from_hr_bot_card_index;
    6.56 +      CardIdx_t card_index = from_card - from_hr_bot_card_index;
    6.57        assert(0 <= card_index && card_index < PosParPRT::CardsPerRegion,
    6.58               "Must be in range.");
    6.59        if (G1HRRSUseSparseTable &&
    6.60 -          _sparse_table.add_card((short) from_hrs_ind, card_index)) {
    6.61 +          _sparse_table.add_card(from_hrs_ind, card_index)) {
    6.62          if (G1RecordHRRSOops) {
    6.63            HeapRegionRemSet::record(hr(), from);
    6.64  #if HRRS_VERBOSE
    6.65 @@ -656,9 +656,9 @@
    6.66        }
    6.67  
    6.68        // Otherwise, transfer from sparse to fine-grain.
    6.69 -      short cards[SparsePRTEntry::CardsPerEntry];
    6.70 +      CardIdx_t cards[SparsePRTEntry::CardsPerEntry];
    6.71        if (G1HRRSUseSparseTable) {
    6.72 -        bool res = _sparse_table.get_cards((short) from_hrs_ind, &cards[0]);
    6.73 +        bool res = _sparse_table.get_cards(from_hrs_ind, &cards[0]);
    6.74          assert(res, "There should have been an entry");
    6.75        }
    6.76  
    6.77 @@ -679,13 +679,13 @@
    6.78        // Add in the cards from the sparse table.
    6.79        if (G1HRRSUseSparseTable) {
    6.80          for (int i = 0; i < SparsePRTEntry::CardsPerEntry; i++) {
    6.81 -          short c = cards[i];
    6.82 +          CardIdx_t c = cards[i];
    6.83            if (c != SparsePRTEntry::NullEntry) {
    6.84              prt->add_card(c);
    6.85            }
    6.86          }
    6.87          // Now we can delete the sparse entry.
    6.88 -        bool res = _sparse_table.delete_entry((short) from_hrs_ind);
    6.89 +        bool res = _sparse_table.delete_entry(from_hrs_ind);
    6.90          assert(res, "It should have been there.");
    6.91        }
    6.92      }
    6.93 @@ -1030,7 +1030,7 @@
    6.94  bool OtherRegionsTable::contains_reference_locked(oop* from) const {
    6.95    HeapRegion* hr = _g1h->heap_region_containing_raw(from);
    6.96    if (hr == NULL) return false;
    6.97 -  size_t hr_ind = hr->hrs_index();
    6.98 +  RegionIdx_t hr_ind = (RegionIdx_t) hr->hrs_index();
    6.99    // Is this region in the coarse map?
   6.100    if (_coarse_map.at(hr_ind)) return true;
   6.101  
   6.102 @@ -1045,8 +1045,9 @@
   6.103      uintptr_t hr_bot_card_index =
   6.104        uintptr_t(hr->bottom()) >> CardTableModRefBS::card_shift;
   6.105      assert(from_card >= hr_bot_card_index, "Inv");
   6.106 -    int card_index = from_card - hr_bot_card_index;
   6.107 -    return _sparse_table.contains_card((short)hr_ind, card_index);
   6.108 +    CardIdx_t card_index = from_card - hr_bot_card_index;
   6.109 +    assert(0 <= card_index && card_index < PosParPRT::CardsPerRegion, "Must be in range.");
   6.110 +    return _sparse_table.contains_card(hr_ind, card_index);
   6.111    }
   6.112  
   6.113  
     7.1 --- a/src/share/vm/gc_implementation/g1/sparsePRT.cpp	Thu Jun 25 12:09:48 2009 -0700
     7.2 +++ b/src/share/vm/gc_implementation/g1/sparsePRT.cpp	Thu Jun 25 22:01:08 2009 -0700
     7.3 @@ -33,7 +33,7 @@
     7.4      sprt_iter->init(this);
     7.5  }
     7.6  
     7.7 -void SparsePRTEntry::init(short region_ind) {
     7.8 +void SparsePRTEntry::init(RegionIdx_t region_ind) {
     7.9    _region_ind = region_ind;
    7.10    _next_index = NullEntry;
    7.11  #if UNROLL_CARD_LOOPS
    7.12 @@ -43,11 +43,12 @@
    7.13    _cards[2] = NullEntry;
    7.14    _cards[3] = NullEntry;
    7.15  #else
    7.16 -  for (int i = 0; i < CardsPerEntry; i++) _cards[i] = NullEntry;
    7.17 +  for (int i = 0; i < CardsPerEntry; i++)
    7.18 +    _cards[i] = NullEntry;
    7.19  #endif
    7.20  }
    7.21  
    7.22 -bool SparsePRTEntry::contains_card(short card_index) const {
    7.23 +bool SparsePRTEntry::contains_card(CardIdx_t card_index) const {
    7.24  #if UNROLL_CARD_LOOPS
    7.25    assert(CardsPerEntry == 4, "Assumption.  If changes, un-unroll.");
    7.26    if (_cards[0] == card_index) return true;
    7.27 @@ -80,10 +81,10 @@
    7.28    return sum;
    7.29  }
    7.30  
    7.31 -SparsePRTEntry::AddCardResult SparsePRTEntry::add_card(short card_index) {
    7.32 +SparsePRTEntry::AddCardResult SparsePRTEntry::add_card(CardIdx_t card_index) {
    7.33  #if UNROLL_CARD_LOOPS
    7.34    assert(CardsPerEntry == 4, "Assumption.  If changes, un-unroll.");
    7.35 -  short c = _cards[0];
    7.36 +  CardIdx_t c = _cards[0];
    7.37    if (c == card_index) return found;
    7.38    if (c == NullEntry) { _cards[0] = card_index; return added; }
    7.39    c = _cards[1];
    7.40 @@ -97,16 +98,19 @@
    7.41    if (c == NullEntry) { _cards[3] = card_index; return added; }
    7.42  #else
    7.43    for (int i = 0; i < CardsPerEntry; i++) {
    7.44 -    short c = _cards[i];
    7.45 +    CardIdx_t c = _cards[i];
    7.46      if (c == card_index) return found;
    7.47 -    if (c == NullEntry) { _cards[i] = card_index; return added; }
    7.48 +    if (c == NullEntry) {
    7.49 +      _cards[i] = card_index;
    7.50 +      return added;
    7.51 +    }
    7.52    }
    7.53  #endif
    7.54    // Otherwise, we're full.
    7.55    return overflow;
    7.56  }
    7.57  
    7.58 -void SparsePRTEntry::copy_cards(short* cards) const {
    7.59 +void SparsePRTEntry::copy_cards(CardIdx_t* cards) const {
    7.60  #if UNROLL_CARD_LOOPS
    7.61    assert(CardsPerEntry == 4, "Assumption.  If changes, un-unroll.");
    7.62    cards[0] = _cards[0];
    7.63 @@ -130,7 +134,7 @@
    7.64    _capacity(capacity), _capacity_mask(capacity-1),
    7.65    _occupied_entries(0), _occupied_cards(0),
    7.66    _entries(NEW_C_HEAP_ARRAY(SparsePRTEntry, capacity)),
    7.67 -  _buckets(NEW_C_HEAP_ARRAY(short, capacity)),
    7.68 +  _buckets(NEW_C_HEAP_ARRAY(int, capacity)),
    7.69    _next_deleted(NULL), _deleted(false),
    7.70    _free_list(NullEntry), _free_region(0)
    7.71  {
    7.72 @@ -143,7 +147,7 @@
    7.73      _entries = NULL;
    7.74    }
    7.75    if (_buckets != NULL) {
    7.76 -    FREE_C_HEAP_ARRAY(short, _buckets);
    7.77 +    FREE_C_HEAP_ARRAY(int, _buckets);
    7.78      _buckets = NULL;
    7.79    }
    7.80  }
    7.81 @@ -153,14 +157,18 @@
    7.82    _occupied_cards = 0;
    7.83    guarantee(_entries != NULL, "INV");
    7.84    guarantee(_buckets != NULL, "INV");
    7.85 +
    7.86 +  guarantee(_capacity <= ((size_t)1 << (sizeof(int)*BitsPerByte-1)) - 1,
    7.87 +                "_capacity too large");
    7.88 +
    7.89    // This will put -1 == NullEntry in the key field of all entries.
    7.90    memset(_entries, -1, _capacity * sizeof(SparsePRTEntry));
    7.91 -  memset(_buckets, -1, _capacity * sizeof(short));
    7.92 +  memset(_buckets, -1, _capacity * sizeof(int));
    7.93    _free_list = NullEntry;
    7.94    _free_region = 0;
    7.95  }
    7.96  
    7.97 -bool RSHashTable::add_card(short region_ind, short card_index) {
    7.98 +bool RSHashTable::add_card(RegionIdx_t region_ind, CardIdx_t card_index) {
    7.99    SparsePRTEntry* e = entry_for_region_ind_create(region_ind);
   7.100    assert(e != NULL && e->r_ind() == region_ind,
   7.101           "Postcondition of call above.");
   7.102 @@ -175,9 +183,9 @@
   7.103    return res != SparsePRTEntry::overflow;
   7.104  }
   7.105  
   7.106 -bool RSHashTable::get_cards(short region_ind, short* cards) {
   7.107 -  short ind = (short) (region_ind & capacity_mask());
   7.108 -  short cur_ind = _buckets[ind];
   7.109 +bool RSHashTable::get_cards(RegionIdx_t region_ind, CardIdx_t* cards) {
   7.110 +  int ind = (int) (region_ind & capacity_mask());
   7.111 +  int cur_ind = _buckets[ind];
   7.112    SparsePRTEntry* cur;
   7.113    while (cur_ind != NullEntry &&
   7.114           (cur = entry(cur_ind))->r_ind() != region_ind) {
   7.115 @@ -192,10 +200,10 @@
   7.116    return true;
   7.117  }
   7.118  
   7.119 -bool RSHashTable::delete_entry(short region_ind) {
   7.120 -  short ind = (short) (region_ind & capacity_mask());
   7.121 -  short* prev_loc = &_buckets[ind];
   7.122 -  short cur_ind = *prev_loc;
   7.123 +bool RSHashTable::delete_entry(RegionIdx_t region_ind) {
   7.124 +  int ind = (int) (region_ind & capacity_mask());
   7.125 +  int* prev_loc = &_buckets[ind];
   7.126 +  int cur_ind = *prev_loc;
   7.127    SparsePRTEntry* cur;
   7.128    while (cur_ind != NullEntry &&
   7.129           (cur = entry(cur_ind))->r_ind() != region_ind) {
   7.130 @@ -212,10 +220,11 @@
   7.131    return true;
   7.132  }
   7.133  
   7.134 -SparsePRTEntry* RSHashTable::entry_for_region_ind(short region_ind) const {
   7.135 +SparsePRTEntry*
   7.136 +RSHashTable::entry_for_region_ind(RegionIdx_t region_ind) const {
   7.137    assert(occupied_entries() < capacity(), "Precondition");
   7.138 -  short ind = (short) (region_ind & capacity_mask());
   7.139 -  short cur_ind = _buckets[ind];
   7.140 +  int ind = (int) (region_ind & capacity_mask());
   7.141 +  int cur_ind = _buckets[ind];
   7.142    SparsePRTEntry* cur;
   7.143    // XXX
   7.144    // int k = 0;
   7.145 @@ -242,15 +251,16 @@
   7.146    }
   7.147  }
   7.148  
   7.149 -SparsePRTEntry* RSHashTable::entry_for_region_ind_create(short region_ind) {
   7.150 +SparsePRTEntry*
   7.151 +RSHashTable::entry_for_region_ind_create(RegionIdx_t region_ind) {
   7.152    SparsePRTEntry* res = entry_for_region_ind(region_ind);
   7.153    if (res == NULL) {
   7.154 -    short new_ind = alloc_entry();
   7.155 +    int new_ind = alloc_entry();
   7.156      assert(0 <= new_ind && (size_t)new_ind < capacity(), "There should be room.");
   7.157      res = entry(new_ind);
   7.158      res->init(region_ind);
   7.159      // Insert at front.
   7.160 -    short ind = (short) (region_ind & capacity_mask());
   7.161 +    int ind = (int) (region_ind & capacity_mask());
   7.162      res->set_next_index(_buckets[ind]);
   7.163      _buckets[ind] = new_ind;
   7.164      _occupied_entries++;
   7.165 @@ -258,8 +268,8 @@
   7.166    return res;
   7.167  }
   7.168  
   7.169 -short RSHashTable::alloc_entry() {
   7.170 -  short res;
   7.171 +int RSHashTable::alloc_entry() {
   7.172 +  int res;
   7.173    if (_free_list != NullEntry) {
   7.174      res = _free_list;
   7.175      _free_list = entry(res)->next_index();
   7.176 @@ -273,13 +283,11 @@
   7.177    }
   7.178  }
   7.179  
   7.180 -
   7.181 -void RSHashTable::free_entry(short fi) {
   7.182 +void RSHashTable::free_entry(int fi) {
   7.183    entry(fi)->set_next_index(_free_list);
   7.184    _free_list = fi;
   7.185  }
   7.186  
   7.187 -
   7.188  void RSHashTable::add_entry(SparsePRTEntry* e) {
   7.189    assert(e->num_valid_cards() > 0, "Precondition.");
   7.190    SparsePRTEntry* e2 = entry_for_region_ind_create(e->r_ind());
   7.191 @@ -322,8 +330,8 @@
   7.192    return NULL;
   7.193  }
   7.194  
   7.195 -short /* RSHashTable:: */ RSHashTableIter::find_first_card_in_list() {
   7.196 -  short res;
   7.197 +CardIdx_t /* RSHashTable:: */ RSHashTableIter::find_first_card_in_list() {
   7.198 +  CardIdx_t res;
   7.199    while (_bl_ind != RSHashTable::NullEntry) {
   7.200      res = _rsht->entry(_bl_ind)->card(0);
   7.201      if (res != SparsePRTEntry::NullEntry) {
   7.202 @@ -336,7 +344,7 @@
   7.203    return SparsePRTEntry::NullEntry;
   7.204  }
   7.205  
   7.206 -size_t /* RSHashTable:: */ RSHashTableIter::compute_card_ind(short ci) {
   7.207 +size_t /* RSHashTable:: */ RSHashTableIter::compute_card_ind(CardIdx_t ci) {
   7.208    return
   7.209      _heap_bot_card_ind
   7.210      + (_rsht->entry(_bl_ind)->r_ind() * CardsPerRegion)
   7.211 @@ -345,7 +353,7 @@
   7.212  
   7.213  bool /* RSHashTable:: */ RSHashTableIter::has_next(size_t& card_index) {
   7.214    _card_ind++;
   7.215 -  short ci;
   7.216 +  CardIdx_t ci;
   7.217    if (_card_ind < SparsePRTEntry::CardsPerEntry &&
   7.218        ((ci = _rsht->entry(_bl_ind)->card(_card_ind)) !=
   7.219         SparsePRTEntry::NullEntry)) {
   7.220 @@ -379,16 +387,16 @@
   7.221    return false;
   7.222  }
   7.223  
   7.224 -bool RSHashTable::contains_card(short region_index, short card_index) const {
   7.225 +bool RSHashTable::contains_card(RegionIdx_t region_index, CardIdx_t card_index) const {
   7.226    SparsePRTEntry* e = entry_for_region_ind(region_index);
   7.227    return (e != NULL && e->contains_card(card_index));
   7.228  }
   7.229  
   7.230  size_t RSHashTable::mem_size() const {
   7.231 -  return sizeof(this) + capacity() * (sizeof(SparsePRTEntry) + sizeof(short));
   7.232 +  return sizeof(this) +
   7.233 +    capacity() * (sizeof(SparsePRTEntry) + sizeof(int));
   7.234  }
   7.235  
   7.236 -
   7.237  // ----------------------------------------------------------------------
   7.238  
   7.239  SparsePRT* SparsePRT::_head_expanded_list = NULL;
   7.240 @@ -408,6 +416,7 @@
   7.241    }
   7.242  }
   7.243  
   7.244 +
   7.245  SparsePRT* SparsePRT::get_from_expanded_list() {
   7.246    SparsePRT* hd = _head_expanded_list;
   7.247    while (hd != NULL) {
   7.248 @@ -452,6 +461,7 @@
   7.249    _next = _cur;
   7.250  }
   7.251  
   7.252 +
   7.253  SparsePRT::~SparsePRT() {
   7.254    assert(_next != NULL && _cur != NULL, "Inv");
   7.255    if (_cur != _next) { delete _cur; }
   7.256 @@ -465,7 +475,7 @@
   7.257    return sizeof(this) + _next->mem_size();
   7.258  }
   7.259  
   7.260 -bool SparsePRT::add_card(short region_id, short card_index) {
   7.261 +bool SparsePRT::add_card(RegionIdx_t region_id, CardIdx_t card_index) {
   7.262  #if SPARSE_PRT_VERBOSE
   7.263    gclog_or_tty->print_cr("  Adding card %d from region %d to region %d sparse.",
   7.264                  card_index, region_id, _hr->hrs_index());
   7.265 @@ -476,11 +486,11 @@
   7.266    return _next->add_card(region_id, card_index);
   7.267  }
   7.268  
   7.269 -bool SparsePRT::get_cards(short region_id, short* cards) {
   7.270 +bool SparsePRT::get_cards(RegionIdx_t region_id, CardIdx_t* cards) {
   7.271    return _next->get_cards(region_id, cards);
   7.272  }
   7.273  
   7.274 -bool SparsePRT::delete_entry(short region_id) {
   7.275 +bool SparsePRT::delete_entry(RegionIdx_t region_id) {
   7.276    return _next->delete_entry(region_id);
   7.277  }
   7.278  
     8.1 --- a/src/share/vm/gc_implementation/g1/sparsePRT.hpp	Thu Jun 25 12:09:48 2009 -0700
     8.2 +++ b/src/share/vm/gc_implementation/g1/sparsePRT.hpp	Thu Jun 25 22:01:08 2009 -0700
     8.3 @@ -35,32 +35,32 @@
     8.4  
     8.5  class SparsePRTEntry: public CHeapObj {
     8.6  public:
     8.7 +
     8.8    enum SomePublicConstants {
     8.9 -    CardsPerEntry = (short)4,
    8.10 -    NullEntry = (short)-1,
    8.11 -    DeletedEntry = (short)-2
    8.12 +    CardsPerEntry =  4,
    8.13 +    NullEntry     = -1
    8.14    };
    8.15  
    8.16  private:
    8.17 -  short _region_ind;
    8.18 -  short _next_index;
    8.19 -  short _cards[CardsPerEntry];
    8.20 +  RegionIdx_t _region_ind;
    8.21 +  int         _next_index;
    8.22 +  CardIdx_t   _cards[CardsPerEntry];
    8.23  
    8.24  public:
    8.25  
    8.26    // Set the region_ind to the given value, and delete all cards.
    8.27 -  inline void init(short region_ind);
    8.28 +  inline void init(RegionIdx_t region_ind);
    8.29  
    8.30 -  short r_ind() const { return _region_ind; }
    8.31 +  RegionIdx_t r_ind() const { return _region_ind; }
    8.32    bool valid_entry() const { return r_ind() >= 0; }
    8.33 -  void set_r_ind(short rind) { _region_ind = rind; }
    8.34 +  void set_r_ind(RegionIdx_t rind) { _region_ind = rind; }
    8.35  
    8.36 -  short next_index() const { return _next_index; }
    8.37 -  short* next_index_addr() { return &_next_index; }
    8.38 -  void set_next_index(short ni) { _next_index = ni; }
    8.39 +  int next_index() const { return _next_index; }
    8.40 +  int* next_index_addr() { return &_next_index; }
    8.41 +  void set_next_index(int ni) { _next_index = ni; }
    8.42  
    8.43    // Returns "true" iff the entry contains the given card index.
    8.44 -  inline bool contains_card(short card_index) const;
    8.45 +  inline bool contains_card(CardIdx_t card_index) const;
    8.46  
    8.47    // Returns the number of non-NULL card entries.
    8.48    inline int num_valid_cards() const;
    8.49 @@ -73,14 +73,14 @@
    8.50      found,
    8.51      added
    8.52    };
    8.53 -  inline AddCardResult add_card(short card_index);
    8.54 +  inline AddCardResult add_card(CardIdx_t card_index);
    8.55  
    8.56    // Copy the current entry's cards into "cards".
    8.57 -  inline void copy_cards(short* cards) const;
    8.58 +  inline void copy_cards(CardIdx_t* cards) const;
    8.59    // Copy the current entry's cards into the "_card" array of "e."
    8.60    inline void copy_cards(SparsePRTEntry* e) const;
    8.61  
    8.62 -  inline short card(int i) const { return _cards[i]; }
    8.63 +  inline CardIdx_t card(int i) const { return _cards[i]; }
    8.64  };
    8.65  
    8.66  
    8.67 @@ -98,9 +98,9 @@
    8.68    size_t _occupied_cards;
    8.69  
    8.70    SparsePRTEntry* _entries;
    8.71 -  short* _buckets;
    8.72 -  short  _free_region;
    8.73 -  short  _free_list;
    8.74 +  int* _buckets;
    8.75 +  int  _free_region;
    8.76 +  int  _free_list;
    8.77  
    8.78    static RSHashTable* _head_deleted_list;
    8.79    RSHashTable* _next_deleted;
    8.80 @@ -113,20 +113,20 @@
    8.81    // operations, and that the the table be less than completely full.  If
    8.82    // an entry for "region_ind" is already in the table, finds it and
    8.83    // returns its address; otherwise returns "NULL."
    8.84 -  SparsePRTEntry* entry_for_region_ind(short region_ind) const;
    8.85 +  SparsePRTEntry* entry_for_region_ind(RegionIdx_t region_ind) const;
    8.86  
    8.87    // Requires that the caller hold a lock preventing parallel modifying
    8.88    // operations, and that the the table be less than completely full.  If
    8.89    // an entry for "region_ind" is already in the table, finds it and
    8.90    // returns its address; otherwise allocates, initializes, inserts and
    8.91    // returns a new entry for "region_ind".
    8.92 -  SparsePRTEntry* entry_for_region_ind_create(short region_ind);
    8.93 +  SparsePRTEntry* entry_for_region_ind_create(RegionIdx_t region_ind);
    8.94  
    8.95    // Returns the index of the next free entry in "_entries".
    8.96 -  short alloc_entry();
    8.97 +  int alloc_entry();
    8.98    // Declares the entry "fi" to be free.  (It must have already been
    8.99    // deleted from any bucket lists.
   8.100 -  void free_entry(short fi);
   8.101 +  void free_entry(int fi);
   8.102  
   8.103  public:
   8.104    RSHashTable(size_t capacity);
   8.105 @@ -138,12 +138,12 @@
   8.106    // Otherwise, returns "false" to indicate that the addition would
   8.107    // overflow the entry for the region.  The caller must transfer these
   8.108    // entries to a larger-capacity representation.
   8.109 -  bool add_card(short region_id, short card_index);
   8.110 +  bool add_card(RegionIdx_t region_id, CardIdx_t card_index);
   8.111  
   8.112 -  bool get_cards(short region_id, short* cards);
   8.113 -  bool delete_entry(short region_id);
   8.114 +  bool get_cards(RegionIdx_t region_id, CardIdx_t* cards);
   8.115 +  bool delete_entry(RegionIdx_t region_id);
   8.116  
   8.117 -  bool contains_card(short region_id, short card_index) const;
   8.118 +  bool contains_card(RegionIdx_t region_id, CardIdx_t card_index) const;
   8.119  
   8.120    void add_entry(SparsePRTEntry* e);
   8.121  
   8.122 @@ -162,51 +162,49 @@
   8.123  
   8.124    static void add_to_deleted_list(RSHashTable* rsht);
   8.125    static RSHashTable* get_from_deleted_list();
   8.126 -
   8.127 -
   8.128  };
   8.129  
   8.130 -  // ValueObj because will be embedded in HRRS iterator.
   8.131 +// ValueObj because will be embedded in HRRS iterator.
   8.132  class RSHashTableIter VALUE_OBJ_CLASS_SPEC {
   8.133 -    short _tbl_ind;
   8.134 -    short _bl_ind;
   8.135 -    short _card_ind;
   8.136 -    RSHashTable* _rsht;
   8.137 -    size_t _heap_bot_card_ind;
   8.138 +  int _tbl_ind;         // [-1, 0.._rsht->_capacity)
   8.139 +  int _bl_ind;          // [-1, 0.._rsht->_capacity)
   8.140 +  short _card_ind;      // [0..CardsPerEntry)
   8.141 +  RSHashTable* _rsht;
   8.142 +  size_t _heap_bot_card_ind;
   8.143  
   8.144 -    enum SomePrivateConstants {
   8.145 -      CardsPerRegion = HeapRegion::GrainBytes >> CardTableModRefBS::card_shift
   8.146 -    };
   8.147 +  enum SomePrivateConstants {
   8.148 +    CardsPerRegion = HeapRegion::GrainBytes >> CardTableModRefBS::card_shift
   8.149 +  };
   8.150  
   8.151 -    // If the bucket list pointed to by _bl_ind contains a card, sets
   8.152 -    // _bl_ind to the index of that entry, and returns the card.
   8.153 -    // Otherwise, returns SparseEntry::NullEnty.
   8.154 -    short find_first_card_in_list();
   8.155 -    // Computes the proper card index for the card whose offset in the
   8.156 -    // current region (as indicated by _bl_ind) is "ci".
   8.157 -    // This is subject to errors when there is iteration concurrent with
   8.158 -    // modification, but these errors should be benign.
   8.159 -    size_t compute_card_ind(short ci);
   8.160 +  // If the bucket list pointed to by _bl_ind contains a card, sets
   8.161 +  // _bl_ind to the index of that entry, and returns the card.
   8.162 +  // Otherwise, returns SparseEntry::NullEntry.
   8.163 +  CardIdx_t find_first_card_in_list();
   8.164  
   8.165 -  public:
   8.166 -    RSHashTableIter(size_t heap_bot_card_ind) :
   8.167 -      _tbl_ind(RSHashTable::NullEntry),
   8.168 -      _bl_ind(RSHashTable::NullEntry),
   8.169 -      _card_ind((SparsePRTEntry::CardsPerEntry-1)),
   8.170 -      _rsht(NULL),
   8.171 -      _heap_bot_card_ind(heap_bot_card_ind)
   8.172 -    {}
   8.173 +  // Computes the proper card index for the card whose offset in the
   8.174 +  // current region (as indicated by _bl_ind) is "ci".
   8.175 +  // This is subject to errors when there is iteration concurrent with
   8.176 +  // modification, but these errors should be benign.
   8.177 +  size_t compute_card_ind(CardIdx_t ci);
   8.178  
   8.179 -    void init(RSHashTable* rsht) {
   8.180 -      _rsht = rsht;
   8.181 -      _tbl_ind = -1; // So that first increment gets to 0.
   8.182 -      _bl_ind = RSHashTable::NullEntry;
   8.183 -      _card_ind = (SparsePRTEntry::CardsPerEntry-1);
   8.184 -    }
   8.185 +public:
   8.186 +  RSHashTableIter(size_t heap_bot_card_ind) :
   8.187 +    _tbl_ind(RSHashTable::NullEntry),
   8.188 +    _bl_ind(RSHashTable::NullEntry),
   8.189 +    _card_ind((SparsePRTEntry::CardsPerEntry-1)),
   8.190 +    _rsht(NULL),
   8.191 +    _heap_bot_card_ind(heap_bot_card_ind)
   8.192 +  {}
   8.193  
   8.194 -    bool has_next(size_t& card_index);
   8.195 +  void init(RSHashTable* rsht) {
   8.196 +    _rsht = rsht;
   8.197 +    _tbl_ind = -1; // So that first increment gets to 0.
   8.198 +    _bl_ind = RSHashTable::NullEntry;
   8.199 +    _card_ind = (SparsePRTEntry::CardsPerEntry-1);
   8.200 +  }
   8.201  
   8.202 -  };
   8.203 +  bool has_next(size_t& card_index);
   8.204 +};
   8.205  
   8.206  // Concurrent accesss to a SparsePRT must be serialized by some external
   8.207  // mutex.
   8.208 @@ -238,7 +236,6 @@
   8.209    SparsePRT* next_expanded() { return _next_expanded; }
   8.210    void set_next_expanded(SparsePRT* nxt) { _next_expanded = nxt; }
   8.211  
   8.212 -
   8.213    static SparsePRT* _head_expanded_list;
   8.214  
   8.215  public:
   8.216 @@ -255,16 +252,16 @@
   8.217    // Otherwise, returns "false" to indicate that the addition would
   8.218    // overflow the entry for the region.  The caller must transfer these
   8.219    // entries to a larger-capacity representation.
   8.220 -  bool add_card(short region_id, short card_index);
   8.221 +  bool add_card(RegionIdx_t region_id, CardIdx_t card_index);
   8.222  
   8.223    // If the table hold an entry for "region_ind",  Copies its
   8.224    // cards into "cards", which must be an array of length at least
   8.225    // "CardsPerEntry", and returns "true"; otherwise, returns "false".
   8.226 -  bool get_cards(short region_ind, short* cards);
   8.227 +  bool get_cards(RegionIdx_t region_ind, CardIdx_t* cards);
   8.228  
   8.229    // If there is an entry for "region_ind", removes it and return "true";
   8.230    // otherwise returns "false."
   8.231 -  bool delete_entry(short region_ind);
   8.232 +  bool delete_entry(RegionIdx_t region_ind);
   8.233  
   8.234    // Clear the table, and reinitialize to initial capacity.
   8.235    void clear();
   8.236 @@ -276,13 +273,12 @@
   8.237    static void cleanup_all();
   8.238    RSHashTable* cur() const { return _cur; }
   8.239  
   8.240 -
   8.241    void init_iterator(SparsePRTIter* sprt_iter);
   8.242  
   8.243    static void add_to_expanded_list(SparsePRT* sprt);
   8.244    static SparsePRT* get_from_expanded_list();
   8.245  
   8.246 -  bool contains_card(short region_id, short card_index) const {
   8.247 +  bool contains_card(RegionIdx_t region_id, CardIdx_t card_index) const {
   8.248      return _next->contains_card(region_id, card_index);
   8.249    }
   8.250  
     9.1 --- a/src/share/vm/gc_implementation/includeDB_gc_g1	Thu Jun 25 12:09:48 2009 -0700
     9.2 +++ b/src/share/vm/gc_implementation/includeDB_gc_g1	Thu Jun 25 22:01:08 2009 -0700
     9.3 @@ -51,7 +51,6 @@
     9.4  concurrentG1Refine.hpp			allocation.hpp
     9.5  concurrentG1Refine.hpp			thread.hpp
     9.6  
     9.7 -
     9.8  concurrentG1RefineThread.cpp		concurrentG1Refine.hpp
     9.9  concurrentG1RefineThread.cpp		concurrentG1RefineThread.hpp
    9.10  concurrentG1RefineThread.cpp		g1CollectedHeap.inline.hpp
    9.11 @@ -334,6 +333,7 @@
    9.12  sparsePRT.hpp				allocation.hpp
    9.13  sparsePRT.hpp				cardTableModRefBS.hpp
    9.14  sparsePRT.hpp				globalDefinitions.hpp
    9.15 +sparsePRT.hpp                           g1CollectedHeap.inline.hpp
    9.16  sparsePRT.hpp				heapRegion.hpp
    9.17  sparsePRT.hpp				mutex.hpp
    9.18  
    10.1 --- a/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.hpp	Thu Jun 25 12:09:48 2009 -0700
    10.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.hpp	Thu Jun 25 22:01:08 2009 -0700
    10.3 @@ -177,6 +177,7 @@
    10.4    // are double-word aligned in 32-bit VMs, but not in 64-bit VMs, so the 32-bit
    10.5    // granularity is 2, 64-bit is 1.
    10.6    static inline size_t obj_granularity() { return size_t(MinObjAlignment); }
    10.7 +  static inline int obj_granularity_shift() { return LogMinObjAlignment; }
    10.8  
    10.9    HeapWord*       _region_start;
   10.10    size_t          _region_size;
   10.11 @@ -299,13 +300,13 @@
   10.12  inline size_t
   10.13  ParMarkBitMap::bits_to_words(idx_t bits)
   10.14  {
   10.15 -  return bits * obj_granularity();
   10.16 +  return bits << obj_granularity_shift();
   10.17  }
   10.18  
   10.19  inline ParMarkBitMap::idx_t
   10.20  ParMarkBitMap::words_to_bits(size_t words)
   10.21  {
   10.22 -  return words / obj_granularity();
   10.23 +  return words >> obj_granularity_shift();
   10.24  }
   10.25  
   10.26  inline size_t ParMarkBitMap::obj_size(idx_t beg_bit, idx_t end_bit) const
    11.1 --- a/src/share/vm/includeDB_compiler1	Thu Jun 25 12:09:48 2009 -0700
    11.2 +++ b/src/share/vm/includeDB_compiler1	Thu Jun 25 22:01:08 2009 -0700
    11.3 @@ -387,7 +387,7 @@
    11.4  c1_ValueSet.cpp                         c1_ValueSet.hpp
    11.5  
    11.6  c1_ValueSet.hpp                         allocation.hpp
    11.7 -c1_ValueSet.hpp                         bitMap.hpp
    11.8 +c1_ValueSet.hpp                         bitMap.inline.hpp
    11.9  c1_ValueSet.hpp                         c1_Instruction.hpp
   11.10  
   11.11  c1_ValueStack.cpp                       c1_IR.hpp
    12.1 --- a/src/share/vm/memory/gcLocker.hpp	Thu Jun 25 12:09:48 2009 -0700
    12.2 +++ b/src/share/vm/memory/gcLocker.hpp	Thu Jun 25 22:01:08 2009 -0700
    12.3 @@ -242,6 +242,31 @@
    12.4  #endif
    12.5  };
    12.6  
    12.7 +// A SkipGCALot object is used to elide the usual effect of gc-a-lot
    12.8 +// over a section of execution by a thread. Currently, it's used only to
    12.9 +// prevent re-entrant calls to GC.
   12.10 +class SkipGCALot : public StackObj {
   12.11 +  private:
   12.12 +   bool _saved;
   12.13 +   Thread* _t;
   12.14 +
   12.15 +  public:
   12.16 +#ifdef ASSERT
   12.17 +    SkipGCALot(Thread* t) : _t(t) {
   12.18 +      _saved = _t->skip_gcalot();
   12.19 +      _t->set_skip_gcalot(true);
   12.20 +    }
   12.21 +
   12.22 +    ~SkipGCALot() {
   12.23 +      assert(_t->skip_gcalot(), "Save-restore protocol invariant");
   12.24 +      _t->set_skip_gcalot(_saved);
   12.25 +    }
   12.26 +#else
   12.27 +    SkipGCALot(Thread* t) { }
   12.28 +    ~SkipGCALot() { }
   12.29 +#endif
   12.30 +};
   12.31 +
   12.32  // JRT_LEAF currently can be called from either _thread_in_Java or
   12.33  // _thread_in_native mode. In _thread_in_native, it is ok
   12.34  // for another thread to trigger GC. The rest of the JRT_LEAF
    13.1 --- a/src/share/vm/runtime/interfaceSupport.cpp	Thu Jun 25 12:09:48 2009 -0700
    13.2 +++ b/src/share/vm/runtime/interfaceSupport.cpp	Thu Jun 25 22:01:08 2009 -0700
    13.3 @@ -66,11 +66,14 @@
    13.4  
    13.5  void InterfaceSupport::gc_alot() {
    13.6    Thread *thread = Thread::current();
    13.7 -  if (thread->is_VM_thread()) return; // Avoid concurrent calls
    13.8 +  if (!thread->is_Java_thread()) return; // Avoid concurrent calls
    13.9    // Check for new, not quite initialized thread. A thread in new mode cannot initiate a GC.
   13.10    JavaThread *current_thread = (JavaThread *)thread;
   13.11    if (current_thread->active_handles() == NULL) return;
   13.12  
   13.13 +  // Short-circuit any possible re-entrant gc-a-lot attempt
   13.14 +  if (thread->skip_gcalot()) return;
   13.15 +
   13.16    if (is_init_completed()) {
   13.17  
   13.18      if (++_fullgc_alot_invocation < FullGCALotStart) {
    14.1 --- a/src/share/vm/runtime/thread.cpp	Thu Jun 25 12:09:48 2009 -0700
    14.2 +++ b/src/share/vm/runtime/thread.cpp	Thu Jun 25 22:01:08 2009 -0700
    14.3 @@ -127,6 +127,7 @@
    14.4    debug_only(_owned_locks = NULL;)
    14.5    debug_only(_allow_allocation_count = 0;)
    14.6    NOT_PRODUCT(_allow_safepoint_count = 0;)
    14.7 +  NOT_PRODUCT(_skip_gcalot = false;)
    14.8    CHECK_UNHANDLED_OOPS_ONLY(_gc_locked_out_count = 0;)
    14.9    _jvmti_env_iteration_count = 0;
   14.10    _vm_operation_started_count = 0;
   14.11 @@ -784,7 +785,6 @@
   14.12        // We could enter a safepoint here and thus have a gc
   14.13        InterfaceSupport::check_gc_alot();
   14.14      }
   14.15 -
   14.16  #endif
   14.17  }
   14.18  #endif
    15.1 --- a/src/share/vm/runtime/thread.hpp	Thu Jun 25 12:09:48 2009 -0700
    15.2 +++ b/src/share/vm/runtime/thread.hpp	Thu Jun 25 22:01:08 2009 -0700
    15.3 @@ -191,6 +191,9 @@
    15.4    NOT_PRODUCT(int _allow_safepoint_count;)       // If 0, thread allow a safepoint to happen
    15.5    debug_only (int _allow_allocation_count;)      // If 0, the thread is allowed to allocate oops.
    15.6  
    15.7 +  // Used by SkipGCALot class.
    15.8 +  NOT_PRODUCT(bool _skip_gcalot;)                // Should we elide gc-a-lot?
    15.9 +
   15.10    // Record when GC is locked out via the GC_locker mechanism
   15.11    CHECK_UNHANDLED_OOPS_ONLY(int _gc_locked_out_count;)
   15.12  
   15.13 @@ -308,6 +311,11 @@
   15.14    bool is_gc_locked_out() { return _gc_locked_out_count > 0; }
   15.15  #endif // CHECK_UNHANDLED_OOPS
   15.16  
   15.17 +#ifndef PRODUCT
   15.18 +  bool skip_gcalot()           { return _skip_gcalot; }
   15.19 +  void set_skip_gcalot(bool v) { _skip_gcalot = v;    }
   15.20 +#endif
   15.21 +
   15.22   public:
   15.23    // Installs a pending exception to be inserted later
   15.24    static void send_async_exception(oop thread_oop, oop java_throwable);
    16.1 --- a/src/share/vm/runtime/vmThread.cpp	Thu Jun 25 12:09:48 2009 -0700
    16.2 +++ b/src/share/vm/runtime/vmThread.cpp	Thu Jun 25 22:01:08 2009 -0700
    16.3 @@ -531,6 +531,7 @@
    16.4    Thread* t = Thread::current();
    16.5  
    16.6    if (!t->is_VM_thread()) {
    16.7 +    SkipGCALot sgcalot(t);    // avoid re-entrant attempts to gc-a-lot
    16.8      // JavaThread or WatcherThread
    16.9      t->check_for_valid_safepoint_state(true);
   16.10  
    17.1 --- a/src/share/vm/utilities/bitMap.cpp	Thu Jun 25 12:09:48 2009 -0700
    17.2 +++ b/src/share/vm/utilities/bitMap.cpp	Thu Jun 25 22:01:08 2009 -0700
    17.3 @@ -41,19 +41,6 @@
    17.4    resize(size_in_bits, in_resource_area);
    17.5  }
    17.6  
    17.7 -
    17.8 -void BitMap::verify_index(idx_t index) const {
    17.9 -    assert(index < _size, "BitMap index out of bounds");
   17.10 -}
   17.11 -
   17.12 -void BitMap::verify_range(idx_t beg_index, idx_t end_index) const {
   17.13 -#ifdef ASSERT
   17.14 -    assert(beg_index <= end_index, "BitMap range error");
   17.15 -    // Note that [0,0) and [size,size) are both valid ranges.
   17.16 -    if (end_index != _size)  verify_index(end_index);
   17.17 -#endif
   17.18 -}
   17.19 -
   17.20  void BitMap::resize(idx_t size_in_bits, bool in_resource_area) {
   17.21    assert(size_in_bits >= 0, "just checking");
   17.22    idx_t old_size_in_words = size_in_words();
    18.1 --- a/src/share/vm/utilities/bitMap.hpp	Thu Jun 25 12:09:48 2009 -0700
    18.2 +++ b/src/share/vm/utilities/bitMap.hpp	Thu Jun 25 22:01:08 2009 -0700
    18.3 @@ -93,10 +93,12 @@
    18.4    // The index of the first full word in a range.
    18.5    idx_t word_index_round_up(idx_t bit) const;
    18.6  
    18.7 -  // Verification, statistics.
    18.8 -  void verify_index(idx_t index) const;
    18.9 -  void verify_range(idx_t beg_index, idx_t end_index) const;
   18.10 +  // Verification.
   18.11 +  inline void verify_index(idx_t index) const NOT_DEBUG_RETURN;
   18.12 +  inline void verify_range(idx_t beg_index, idx_t end_index) const
   18.13 +    NOT_DEBUG_RETURN;
   18.14  
   18.15 +  // Statistics.
   18.16    static idx_t* _pop_count_table;
   18.17    static void init_pop_count_table();
   18.18    static idx_t num_set_bits(bm_word_t w);
   18.19 @@ -287,7 +289,6 @@
   18.20  #endif
   18.21  };
   18.22  
   18.23 -
   18.24  // Convenience class wrapping BitMap which provides multiple bits per slot.
   18.25  class BitMap2D VALUE_OBJ_CLASS_SPEC {
   18.26   public:
    19.1 --- a/src/share/vm/utilities/bitMap.inline.hpp	Thu Jun 25 12:09:48 2009 -0700
    19.2 +++ b/src/share/vm/utilities/bitMap.inline.hpp	Thu Jun 25 22:01:08 2009 -0700
    19.3 @@ -22,6 +22,17 @@
    19.4   *
    19.5   */
    19.6  
    19.7 +#ifdef ASSERT
    19.8 +inline void BitMap::verify_index(idx_t index) const {
    19.9 +  assert(index < _size, "BitMap index out of bounds");
   19.10 +}
   19.11 +
   19.12 +inline void BitMap::verify_range(idx_t beg_index, idx_t end_index) const {
   19.13 +  assert(beg_index <= end_index, "BitMap range error");
   19.14 +  // Note that [0,0) and [size,size) are both valid ranges.
   19.15 +  if (end_index != _size) verify_index(end_index);
   19.16 +}
   19.17 +#endif // #ifdef ASSERT
   19.18  
   19.19  inline void BitMap::set_bit(idx_t bit) {
   19.20    verify_index(bit);
    20.1 --- a/src/share/vm/utilities/macros.hpp	Thu Jun 25 12:09:48 2009 -0700
    20.2 +++ b/src/share/vm/utilities/macros.hpp	Thu Jun 25 22:01:08 2009 -0700
    20.3 @@ -106,11 +106,13 @@
    20.4  #ifdef ASSERT
    20.5  #define DEBUG_ONLY(code) code
    20.6  #define NOT_DEBUG(code)
    20.7 +#define NOT_DEBUG_RETURN  /*next token must be ;*/
    20.8  // Historical.
    20.9  #define debug_only(code) code
   20.10  #else // ASSERT
   20.11  #define DEBUG_ONLY(code)
   20.12  #define NOT_DEBUG(code) code
   20.13 +#define NOT_DEBUG_RETURN {}
   20.14  #define debug_only(code)
   20.15  #endif // ASSERT
   20.16  

mercurial