Merge

Fri, 19 Nov 2010 13:19:49 -0800

author
jmasa
date
Fri, 19 Nov 2010 13:19:49 -0800
changeset 2304
0be53e62c06c
parent 2300
13fee5052895
parent 2303
deef066c3622
child 2308
4110c3e0c50d

Merge

     1.1 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Wed Nov 17 09:21:51 2010 -0500
     1.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Fri Nov 19 13:19:49 2010 -0800
     1.3 @@ -1093,8 +1093,9 @@
     1.4  // perm_gen_verify_bit_map where we store the "deadness" information if
     1.5  // we did not sweep the perm gen in the most recent previous GC cycle.
     1.6  bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
     1.7 +  assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),
     1.8 +         "Else races are possible");
     1.9    assert(block_is_obj(p), "The address should point to an object");
    1.10 -  assert(SafepointSynchronize::is_at_safepoint(), "Else races are possible");
    1.11  
    1.12    // If we're sweeping, we use object liveness information from the main bit map
    1.13    // for both perm gen and old gen.
     2.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Wed Nov 17 09:21:51 2010 -0500
     2.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Fri Nov 19 13:19:49 2010 -0800
     2.3 @@ -795,6 +795,7 @@
     2.4      _worker_i(worker_i),
     2.5      _g1h(g1)
     2.6    { }
     2.7 +
     2.8    bool doHeapRegion(HeapRegion* r) {
     2.9      if (!r->continuesHumongous()) {
    2.10        _cl.set_from(r);
     3.1 --- a/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Wed Nov 17 09:21:51 2010 -0500
     3.2 +++ b/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Fri Nov 19 13:19:49 2010 -0800
     3.3 @@ -116,7 +116,6 @@
     3.4    : _g1(g1), _conc_refine_cards(0),
     3.5      _ct_bs(ct_bs), _g1p(_g1->g1_policy()),
     3.6      _cg1r(g1->concurrent_g1_refine()),
     3.7 -    _traversal_in_progress(false),
     3.8      _cset_rs_update_cl(NULL),
     3.9      _cards_scanned(NULL), _total_cards_scanned(0)
    3.10  {
    3.11 @@ -512,8 +511,6 @@
    3.12    DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
    3.13    dcqs.concatenate_logs();
    3.14  
    3.15 -  assert(!_traversal_in_progress, "Invariant between iterations.");
    3.16 -  set_traversal(true);
    3.17    if (ParallelGCThreads > 0) {
    3.18      _seq_task->set_n_threads((int)n_workers());
    3.19    }
    3.20 @@ -539,9 +536,6 @@
    3.21  // through the oops which coincide with that card. It scans the reference
    3.22  // fields in each oop; when it finds an oop that points into the collection
    3.23  // set, the RSet for the region containing the referenced object is updated.
    3.24 -// Note: _par_traversal_in_progress in the G1RemSet must be FALSE; otherwise
    3.25 -// the UpdateRSetImmediate closure will cause cards to be enqueued on to
    3.26 -// the DCQS that we're iterating over, causing an infinite loop.
    3.27  class UpdateRSetCardTableEntryIntoCSetClosure: public CardTableEntryClosure {
    3.28    G1CollectedHeap* _g1;
    3.29    CardTableModRefBS* _ct_bs;
    3.30 @@ -611,8 +605,6 @@
    3.31    // Set all cards back to clean.
    3.32    _g1->cleanUpCardTable();
    3.33  
    3.34 -  set_traversal(false);
    3.35 -
    3.36    DirtyCardQueueSet& into_cset_dcqs = _g1->into_cset_dirty_card_queue_set();
    3.37    int into_cset_n_buffers = into_cset_dcqs.completed_buffers_num();
    3.38  
    3.39 @@ -645,21 +637,8 @@
    3.40    assert(_g1->into_cset_dirty_card_queue_set().completed_buffers_num() == 0,
    3.41           "all buffers should be freed");
    3.42    _g1->into_cset_dirty_card_queue_set().clear_n_completed_buffers();
    3.43 -
    3.44 -  assert(!_traversal_in_progress, "Invariant between iterations.");
    3.45  }
    3.46  
    3.47 -class UpdateRSObjectClosure: public ObjectClosure {
    3.48 -  UpdateRSOopClosure* _update_rs_oop_cl;
    3.49 -public:
    3.50 -  UpdateRSObjectClosure(UpdateRSOopClosure* update_rs_oop_cl) :
    3.51 -    _update_rs_oop_cl(update_rs_oop_cl) {}
    3.52 -  void do_object(oop obj) {
    3.53 -    obj->oop_iterate(_update_rs_oop_cl);
    3.54 -  }
    3.55 -
    3.56 -};
    3.57 -
    3.58  class ScrubRSClosure: public HeapRegionClosure {
    3.59    G1CollectedHeap* _g1h;
    3.60    BitMap* _region_bm;
    3.61 @@ -749,7 +728,12 @@
    3.62    ct_freq_note_card(_ct_bs->index_for(start));
    3.63  #endif
    3.64  
    3.65 -  UpdateRSOopClosure update_rs_oop_cl(this, worker_i);
    3.66 +  assert(!check_for_refs_into_cset || _cset_rs_update_cl[worker_i] != NULL, "sanity");
    3.67 +  UpdateRSOrPushRefOopClosure update_rs_oop_cl(_g1,
    3.68 +                                               _g1->g1_rem_set(),
    3.69 +                                               _cset_rs_update_cl[worker_i],
    3.70 +                                               check_for_refs_into_cset,
    3.71 +                                               worker_i);
    3.72    update_rs_oop_cl.set_from(r);
    3.73  
    3.74    TriggerClosure trigger_cl;
     4.1 --- a/src/share/vm/gc_implementation/g1/g1RemSet.hpp	Wed Nov 17 09:21:51 2010 -0500
     4.2 +++ b/src/share/vm/gc_implementation/g1/g1RemSet.hpp	Fri Nov 19 13:19:49 2010 -0800
     4.3 @@ -59,11 +59,6 @@
     4.4    size_t*             _cards_scanned;
     4.5    size_t              _total_cards_scanned;
     4.6  
     4.7 -  // _traversal_in_progress is "true" iff a traversal is in progress.
     4.8 -
     4.9 -  bool _traversal_in_progress;
    4.10 -  void set_traversal(bool b) { _traversal_in_progress = b; }
    4.11 -
    4.12    // Used for caching the closure that is responsible for scanning
    4.13    // references into the collection set.
    4.14    OopsInHeapRegionClosure** _cset_rs_update_cl;
    4.15 @@ -76,10 +71,6 @@
    4.16    bool concurrentRefineOneCard_impl(jbyte* card_ptr, int worker_i,
    4.17                                      bool check_for_refs_into_cset);
    4.18  
    4.19 -protected:
    4.20 -  template <class T> void write_ref_nv(HeapRegion* from, T* p);
    4.21 -  template <class T> void par_write_ref_nv(HeapRegion* from, T* p, int tid);
    4.22 -
    4.23  public:
    4.24    // This is called to reset dual hash tables after the gc pause
    4.25    // is finished and the initial hash table is no longer being
    4.26 @@ -117,22 +108,8 @@
    4.27  
    4.28    // Record, if necessary, the fact that *p (where "p" is in region "from",
    4.29    // which is required to be non-NULL) has changed to a new non-NULL value.
    4.30 -  // [Below the virtual version calls a non-virtual protected
    4.31 -  // workhorse that is templatified for narrow vs wide oop.]
    4.32 -  inline void write_ref(HeapRegion* from, oop* p) {
    4.33 -    write_ref_nv(from, p);
    4.34 -  }
    4.35 -  inline void write_ref(HeapRegion* from, narrowOop* p) {
    4.36 -    write_ref_nv(from, p);
    4.37 -  }
    4.38 -  inline void par_write_ref(HeapRegion* from, oop* p, int tid) {
    4.39 -    par_write_ref_nv(from, p, tid);
    4.40 -  }
    4.41 -  inline void par_write_ref(HeapRegion* from, narrowOop* p, int tid) {
    4.42 -    par_write_ref_nv(from, p, tid);
    4.43 -  }
    4.44 -
    4.45 -  bool self_forwarded(oop obj);
    4.46 +  template <class T> void write_ref(HeapRegion* from, T* p);
    4.47 +  template <class T> void par_write_ref(HeapRegion* from, T* p, int tid);
    4.48  
    4.49    // Requires "region_bm" and "card_bm" to be bitmaps with 1 bit per region
    4.50    // or card, respectively, such that a region or card with a corresponding
    4.51 @@ -186,9 +163,8 @@
    4.52  
    4.53  public:
    4.54    UpdateRSOopClosure(G1RemSet* rs, int worker_i = 0) :
    4.55 -    _from(NULL), _rs(rs), _worker_i(worker_i) {
    4.56 -    guarantee(_rs != NULL, "Requires an HRIntoG1RemSet");
    4.57 -  }
    4.58 +    _from(NULL), _rs(rs), _worker_i(worker_i)
    4.59 +  {}
    4.60  
    4.61    void set_from(HeapRegion* from) {
    4.62      assert(from != NULL, "from region must be non-NULL");
    4.63 @@ -215,3 +191,43 @@
    4.64    virtual void do_oop(narrowOop* p) { do_oop_work(p); }
    4.65    virtual void do_oop(      oop* p) { do_oop_work(p); }
    4.66  };
    4.67 +
    4.68 +class UpdateRSOrPushRefOopClosure: public OopClosure {
    4.69 +  G1CollectedHeap* _g1;
    4.70 +  G1RemSet* _g1_rem_set;
    4.71 +  HeapRegion* _from;
    4.72 +  OopsInHeapRegionClosure* _push_ref_cl;
    4.73 +  bool _record_refs_into_cset;
    4.74 +  int _worker_i;
    4.75 +
    4.76 +  template <class T> void do_oop_work(T* p);
    4.77 +
    4.78 +public:
    4.79 +  UpdateRSOrPushRefOopClosure(G1CollectedHeap* g1h,
    4.80 +                              G1RemSet* rs,
    4.81 +                              OopsInHeapRegionClosure* push_ref_cl,
    4.82 +                              bool record_refs_into_cset,
    4.83 +                              int worker_i = 0) :
    4.84 +    _g1(g1h),
    4.85 +    _g1_rem_set(rs),
    4.86 +    _from(NULL),
    4.87 +    _record_refs_into_cset(record_refs_into_cset),
    4.88 +    _push_ref_cl(push_ref_cl),
    4.89 +    _worker_i(worker_i) { }
    4.90 +
    4.91 +  void set_from(HeapRegion* from) {
    4.92 +    assert(from != NULL, "from region must be non-NULL");
    4.93 +    _from = from;
    4.94 +  }
    4.95 +
    4.96 +  bool self_forwarded(oop obj) {
    4.97 +    bool result = (obj->is_forwarded() && (obj->forwardee()== obj));
    4.98 +    return result;
    4.99 +  }
   4.100 +
   4.101 +  virtual void do_oop(narrowOop* p) { do_oop_work(p); }
   4.102 +  virtual void do_oop(oop* p)       { do_oop_work(p); }
   4.103 +
   4.104 +  bool apply_to_weak_ref_discovered_field() { return true; }
   4.105 +};
   4.106 +
     5.1 --- a/src/share/vm/gc_implementation/g1/g1RemSet.inline.hpp	Wed Nov 17 09:21:51 2010 -0500
     5.2 +++ b/src/share/vm/gc_implementation/g1/g1RemSet.inline.hpp	Fri Nov 19 13:19:49 2010 -0800
     5.3 @@ -31,17 +31,12 @@
     5.4  }
     5.5  
     5.6  template <class T>
     5.7 -inline void G1RemSet::write_ref_nv(HeapRegion* from, T* p) {
     5.8 -  par_write_ref_nv(from, p, 0);
     5.9 -}
    5.10 -
    5.11 -inline bool G1RemSet::self_forwarded(oop obj) {
    5.12 -  bool result =  (obj->is_forwarded() && (obj->forwardee()== obj));
    5.13 -  return result;
    5.14 +inline void G1RemSet::write_ref(HeapRegion* from, T* p) {
    5.15 +  par_write_ref(from, p, 0);
    5.16  }
    5.17  
    5.18  template <class T>
    5.19 -inline void G1RemSet::par_write_ref_nv(HeapRegion* from, T* p, int tid) {
    5.20 +inline void G1RemSet::par_write_ref(HeapRegion* from, T* p, int tid) {
    5.21    oop obj = oopDesc::load_decode_heap_oop(p);
    5.22  #ifdef ASSERT
    5.23    // can't do because of races
    5.24 @@ -62,34 +57,15 @@
    5.25    assert(from == NULL || from->is_in_reserved(p), "p is not in from");
    5.26  
    5.27    HeapRegion* to = _g1->heap_region_containing(obj);
    5.28 -  // The test below could be optimized by applying a bit op to to and from.
    5.29 -  if (to != NULL && from != NULL && from != to) {
    5.30 -    // The _traversal_in_progress flag is true during the collection pause,
    5.31 -    // false during the evacuation failure handling. This should avoid a
    5.32 -    // potential loop if we were to add the card containing 'p' to the DCQS
    5.33 -    // that's used to regenerate the remembered sets for the collection set,
    5.34 -    // in the event of an evacuation failure, here. The UpdateRSImmediate
    5.35 -    // closure will eventally call this routine.
    5.36 -    if (_traversal_in_progress &&
    5.37 -        to->in_collection_set() && !self_forwarded(obj)) {
    5.38 -
    5.39 -      assert(_cset_rs_update_cl[tid] != NULL, "should have been set already");
    5.40 -      _cset_rs_update_cl[tid]->do_oop(p);
    5.41 -
    5.42 -      // Deferred updates to the CSet are either discarded (in the normal case),
    5.43 -      // or processed (if an evacuation failure occurs) at the end
    5.44 -      // of the collection.
    5.45 -      // See G1RemSet::cleanup_after_oops_into_collection_set_do().
    5.46 -    } else {
    5.47 +  if (to != NULL && from != to) {
    5.48  #if G1_REM_SET_LOGGING
    5.49 -      gclog_or_tty->print_cr("Adding " PTR_FORMAT " (" PTR_FORMAT ") to RS"
    5.50 -                             " for region [" PTR_FORMAT ", " PTR_FORMAT ")",
    5.51 -                             p, obj,
    5.52 -                             to->bottom(), to->end());
    5.53 +    gclog_or_tty->print_cr("Adding " PTR_FORMAT " (" PTR_FORMAT ") to RS"
    5.54 +                           " for region [" PTR_FORMAT ", " PTR_FORMAT ")",
    5.55 +                           p, obj,
    5.56 +                           to->bottom(), to->end());
    5.57  #endif
    5.58 -      assert(to->rem_set() != NULL, "Need per-region 'into' remsets.");
    5.59 -      to->rem_set()->add_reference(p, tid);
    5.60 -    }
    5.61 +    assert(to->rem_set() != NULL, "Need per-region 'into' remsets.");
    5.62 +    to->rem_set()->add_reference(p, tid);
    5.63    }
    5.64  }
    5.65  
    5.66 @@ -108,3 +84,64 @@
    5.67    }
    5.68  }
    5.69  
    5.70 +template <class T>
    5.71 +inline void UpdateRSOrPushRefOopClosure::do_oop_work(T* p) {
    5.72 +  oop obj = oopDesc::load_decode_heap_oop(p);
    5.73 +#ifdef ASSERT
    5.74 +  // can't do because of races
    5.75 +  // assert(obj == NULL || obj->is_oop(), "expected an oop");
    5.76 +
    5.77 +  // Do the safe subset of is_oop
    5.78 +  if (obj != NULL) {
    5.79 +#ifdef CHECK_UNHANDLED_OOPS
    5.80 +    oopDesc* o = obj.obj();
    5.81 +#else
    5.82 +    oopDesc* o = obj;
    5.83 +#endif // CHECK_UNHANDLED_OOPS
    5.84 +    assert((intptr_t)o % MinObjAlignmentInBytes == 0, "not oop aligned");
    5.85 +    assert(Universe::heap()->is_in_reserved(obj), "must be in heap");
    5.86 +  }
    5.87 +#endif // ASSERT
    5.88 +
    5.89 +  assert(_from != NULL, "from region must be non-NULL");
    5.90 +
    5.91 +  HeapRegion* to = _g1->heap_region_containing(obj);
    5.92 +  if (to != NULL && _from != to) {
    5.93 +    // The _record_refs_into_cset flag is true during the RSet
    5.94 +    // updating part of an evacuation pause. It is false at all
    5.95 +    // other times:
    5.96 +    //  * rebuilding the rembered sets after a full GC
    5.97 +    //  * during concurrent refinement.
    5.98 +    //  * updating the remembered sets of regions in the collection
    5.99 +    //    set in the event of an evacuation failure (when deferred
   5.100 +    //    updates are enabled).
   5.101 +
   5.102 +    if (_record_refs_into_cset && to->in_collection_set()) {
   5.103 +      // We are recording references that point into the collection
   5.104 +      // set and this particular reference does exactly that...
   5.105 +      // If the referenced object has already been forwarded
   5.106 +      // to itself, we are handling an evacuation failure and
   5.107 +      // we have already visited/tried to copy this object
   5.108 +      // there is no need to retry.
   5.109 +      if (!self_forwarded(obj)) {
   5.110 +        assert(_push_ref_cl != NULL, "should not be null");
   5.111 +        // Push the reference in the refs queue of the G1ParScanThreadState
   5.112 +        // instance for this worker thread.
   5.113 +        _push_ref_cl->do_oop(p);
   5.114 +      }
   5.115 +
   5.116 +      // Deferred updates to the CSet are either discarded (in the normal case),
   5.117 +      // or processed (if an evacuation failure occurs) at the end
   5.118 +      // of the collection.
   5.119 +      // See G1RemSet::cleanup_after_oops_into_collection_set_do().
   5.120 +    } else {
   5.121 +      // We either don't care about pushing references that point into the
   5.122 +      // collection set (i.e. we're not during an evacuation pause) _or_
   5.123 +      // the reference doesn't point into the collection set. Either way
   5.124 +      // we add the reference directly to the RSet of the region containing
   5.125 +      // the referenced object.
   5.126 +      _g1_rem_set->par_write_ref(_from, p, _worker_i);
   5.127 +    }
   5.128 +  }
   5.129 +}
   5.130 +
     6.1 --- a/src/share/vm/includeDB_core	Wed Nov 17 09:21:51 2010 -0500
     6.2 +++ b/src/share/vm/includeDB_core	Fri Nov 19 13:19:49 2010 -0800
     6.3 @@ -4454,6 +4454,7 @@
     6.4  universe.cpp                            generation.hpp
     6.5  universe.cpp                            handles.inline.hpp
     6.6  universe.cpp                            hashtable.inline.hpp
     6.7 +universe.cpp                            init.hpp
     6.8  universe.cpp                            instanceKlass.hpp
     6.9  universe.cpp                            instanceKlassKlass.hpp
    6.10  universe.cpp                            instanceRefKlass.hpp
     7.1 --- a/src/share/vm/memory/universe.cpp	Wed Nov 17 09:21:51 2010 -0500
     7.2 +++ b/src/share/vm/memory/universe.cpp	Fri Nov 19 13:19:49 2010 -0800
     7.3 @@ -945,6 +945,7 @@
     7.4  extern void initialize_converter_functions();
     7.5  
     7.6  bool universe_post_init() {
     7.7 +  assert(!is_init_completed(), "Error: initialization not yet completed!");
     7.8    Universe::_fully_initialized = true;
     7.9    EXCEPTION_MARK;
    7.10    { ResourceMark rm;
     8.1 --- a/src/share/vm/runtime/init.cpp	Wed Nov 17 09:21:51 2010 -0500
     8.2 +++ b/src/share/vm/runtime/init.cpp	Fri Nov 19 13:19:49 2010 -0800
     8.3 @@ -160,5 +160,6 @@
     8.4  
     8.5  
     8.6  void set_init_completed() {
     8.7 +  assert(Universe::is_fully_initialized(), "Should have completed initialization");
     8.8    _init_completed = true;
     8.9  }

mercurial