8049421: G1 Class Unloading after completing a concurrent mark cycle

Mon, 07 Jul 2014 10:12:40 +0200

author
stefank
date
Mon, 07 Jul 2014 10:12:40 +0200
changeset 6992
2c6ef90f030a
parent 6991
882004b9e7e1
child 6993
870c03421152

8049421: G1 Class Unloading after completing a concurrent mark cycle
Reviewed-by: tschatzl, ehelin, brutisso, coleenp, roland, iveresov
Contributed-by: stefan.karlsson@oracle.com, mikael.gerdin@oracle.com

src/share/vm/c1/c1_Runtime1.cpp file | annotate | diff | comparison | revisions
src/share/vm/ci/ciEnv.hpp file | annotate | diff | comparison | revisions
src/share/vm/ci/ciKlass.hpp file | annotate | diff | comparison | revisions
src/share/vm/ci/ciMethodData.cpp file | annotate | diff | comparison | revisions
src/share/vm/ci/ciMethodData.hpp file | annotate | diff | comparison | revisions
src/share/vm/ci/ciObjectFactory.cpp file | annotate | diff | comparison | revisions
src/share/vm/ci/ciObjectFactory.hpp file | annotate | diff | comparison | revisions
src/share/vm/classfile/classLoaderData.cpp file | annotate | diff | comparison | revisions
src/share/vm/classfile/classLoaderData.hpp file | annotate | diff | comparison | revisions
src/share/vm/classfile/dictionary.cpp file | annotate | diff | comparison | revisions
src/share/vm/classfile/dictionary.hpp file | annotate | diff | comparison | revisions
src/share/vm/classfile/metadataOnStackMark.cpp file | annotate | diff | comparison | revisions
src/share/vm/classfile/symbolTable.cpp file | annotate | diff | comparison | revisions
src/share/vm/classfile/systemDictionary.cpp file | annotate | diff | comparison | revisions
src/share/vm/classfile/systemDictionary.hpp file | annotate | diff | comparison | revisions
src/share/vm/code/codeCache.cpp file | annotate | diff | comparison | revisions
src/share/vm/code/codeCache.hpp file | annotate | diff | comparison | revisions
src/share/vm/code/compiledIC.cpp file | annotate | diff | comparison | revisions
src/share/vm/code/nmethod.cpp file | annotate | diff | comparison | revisions
src/share/vm/code/nmethod.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/g1/concurrentMark.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/g1/concurrentMark.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/g1/g1BlockOffsetTable.inline.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/g1/g1CodeCacheRemSet.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/g1/g1CodeCacheRemSet.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/g1/g1EvacFailure.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/g1/g1MarkSweep.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/g1/g1OopClosures.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/g1/g1OopClosures.inline.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/g1/g1_specialized_oop_closures.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/g1/heapRegion.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/g1/heapRegion.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/g1/heapRegion.inline.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/g1/satbQueue.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/g1/satbQueue.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/parNew/parNewGeneration.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/parNew/parNewGeneration.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/shared/markSweep.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/shared/markSweep.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/shared/vmGCOperations.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/shared/vmGCOperations.hpp file | annotate | diff | comparison | revisions
src/share/vm/memory/cardTableModRefBS.cpp file | annotate | diff | comparison | revisions
src/share/vm/memory/defNewGeneration.cpp file | annotate | diff | comparison | revisions
src/share/vm/memory/genCollectedHeap.cpp file | annotate | diff | comparison | revisions
src/share/vm/memory/genCollectedHeap.hpp file | annotate | diff | comparison | revisions
src/share/vm/memory/genMarkSweep.cpp file | annotate | diff | comparison | revisions
src/share/vm/memory/iterator.cpp file | annotate | diff | comparison | revisions
src/share/vm/memory/iterator.hpp file | annotate | diff | comparison | revisions
src/share/vm/memory/metadataFactory.hpp file | annotate | diff | comparison | revisions
src/share/vm/memory/sharedHeap.cpp file | annotate | diff | comparison | revisions
src/share/vm/memory/sharedHeap.hpp file | annotate | diff | comparison | revisions
src/share/vm/oops/instanceKlass.cpp file | annotate | diff | comparison | revisions
src/share/vm/oops/instanceKlass.hpp file | annotate | diff | comparison | revisions
src/share/vm/oops/klass.cpp file | annotate | diff | comparison | revisions
src/share/vm/oops/klass.hpp file | annotate | diff | comparison | revisions
src/share/vm/prims/jvmtiTagMap.cpp file | annotate | diff | comparison | revisions
src/share/vm/prims/whitebox.cpp file | annotate | diff | comparison | revisions
src/share/vm/prims/whitebox.hpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/thread.cpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/thread.hpp file | annotate | diff | comparison | revisions
src/share/vm/utilities/array.hpp file | annotate | diff | comparison | revisions
test/testlibrary/whitebox/sun/hotspot/WhiteBox.java file | annotate | diff | comparison | revisions
     1.1 --- a/src/share/vm/c1/c1_Runtime1.cpp	Tue Jul 01 09:03:55 2014 +0200
     1.2 +++ b/src/share/vm/c1/c1_Runtime1.cpp	Mon Jul 07 10:12:40 2014 +0200
     1.3 @@ -1018,6 +1018,7 @@
     1.4                n_copy->set_data((intx) (load_klass()));
     1.5              } else {
     1.6                assert(mirror() != NULL, "klass not set");
     1.7 +              // Don't need a G1 pre-barrier here since we assert above that data isn't an oop.
     1.8                n_copy->set_data(cast_from_oop<intx>(mirror()));
     1.9              }
    1.10  
     2.1 --- a/src/share/vm/ci/ciEnv.hpp	Tue Jul 01 09:03:55 2014 +0200
     2.2 +++ b/src/share/vm/ci/ciEnv.hpp	Mon Jul 07 10:12:40 2014 +0200
     2.3 @@ -184,6 +184,10 @@
     2.4      }
     2.5    }
     2.6  
     2.7 +  void ensure_metadata_alive(ciMetadata* m) {
     2.8 +    _factory->ensure_metadata_alive(m);
     2.9 +  }
    2.10 +
    2.11    ciInstance* get_instance(oop o) {
    2.12      if (o == NULL) return NULL;
    2.13      return get_object(o)->as_instance();
     3.1 --- a/src/share/vm/ci/ciKlass.hpp	Tue Jul 01 09:03:55 2014 +0200
     3.2 +++ b/src/share/vm/ci/ciKlass.hpp	Mon Jul 07 10:12:40 2014 +0200
     3.3 @@ -43,6 +43,7 @@
     3.4    friend class ciMethod;
     3.5    friend class ciMethodData;
     3.6    friend class ciObjArrayKlass;
     3.7 +  friend class ciReceiverTypeData;
     3.8  
     3.9  private:
    3.10    ciSymbol* _name;
     4.1 --- a/src/share/vm/ci/ciMethodData.cpp	Tue Jul 01 09:03:55 2014 +0200
     4.2 +++ b/src/share/vm/ci/ciMethodData.cpp	Mon Jul 07 10:12:40 2014 +0200
     4.3 @@ -170,6 +170,7 @@
     4.4      Klass* k = data->as_ReceiverTypeData()->receiver(row);
     4.5      if (k != NULL) {
     4.6        ciKlass* klass = CURRENT_ENV->get_klass(k);
     4.7 +      CURRENT_ENV->ensure_metadata_alive(klass);
     4.8        set_receiver(row, klass);
     4.9      }
    4.10    }
    4.11 @@ -191,6 +192,7 @@
    4.12  void ciSpeculativeTrapData::translate_from(const ProfileData* data) {
    4.13    Method* m = data->as_SpeculativeTrapData()->method();
    4.14    ciMethod* ci_m = CURRENT_ENV->get_method(m);
    4.15 +  CURRENT_ENV->ensure_metadata_alive(ci_m);
    4.16    set_method(ci_m);
    4.17  }
    4.18  
     5.1 --- a/src/share/vm/ci/ciMethodData.hpp	Tue Jul 01 09:03:55 2014 +0200
     5.2 +++ b/src/share/vm/ci/ciMethodData.hpp	Mon Jul 07 10:12:40 2014 +0200
     5.3 @@ -70,6 +70,7 @@
     5.4      Klass* v = TypeEntries::valid_klass(k);
     5.5      if (v != NULL) {
     5.6        ciKlass* klass = CURRENT_ENV->get_klass(v);
     5.7 +      CURRENT_ENV->ensure_metadata_alive(klass);
     5.8        return with_status(klass, k);
     5.9      }
    5.10      return with_status(NULL, k);
     6.1 --- a/src/share/vm/ci/ciObjectFactory.cpp	Tue Jul 01 09:03:55 2014 +0200
     6.2 +++ b/src/share/vm/ci/ciObjectFactory.cpp	Mon Jul 07 10:12:40 2014 +0200
     6.3 @@ -46,6 +46,9 @@
     6.4  #include "oops/oop.inline.hpp"
     6.5  #include "oops/oop.inline2.hpp"
     6.6  #include "runtime/fieldType.hpp"
     6.7 +#if INCLUDE_ALL_GCS
     6.8 +# include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
     6.9 +#endif
    6.10  
    6.11  // ciObjectFactory
    6.12  //
    6.13 @@ -374,6 +377,37 @@
    6.14    return NULL;
    6.15  }
    6.16  
    6.17 +// ------------------------------------------------------------------
    6.18 +// ciObjectFactory::ensure_metadata_alive
    6.19 +//
    6.20 +// Ensure that the metadata wrapped by the ciMetadata is kept alive by GC.
    6.21 +// This is primarily useful for metadata which is considered as weak roots
    6.22 +// by the GC but need to be strong roots if reachable from a current compilation.
    6.23 +//
    6.24 +void ciObjectFactory::ensure_metadata_alive(ciMetadata* m) {
    6.25 +  ASSERT_IN_VM; // We're handling raw oops here.
    6.26 +
    6.27 +#if INCLUDE_ALL_GCS
    6.28 +  if (!UseG1GC) {
    6.29 +    return;
    6.30 +  }
    6.31 +  Klass* metadata_owner_klass;
    6.32 +  if (m->is_klass()) {
    6.33 +    metadata_owner_klass = m->as_klass()->get_Klass();
    6.34 +  } else if (m->is_method()) {
    6.35 +    metadata_owner_klass = m->as_method()->get_Method()->constants()->pool_holder();
    6.36 +  } else {
    6.37 +    fatal("Not implemented for other types of metadata");
    6.38 +  }
    6.39 +
    6.40 +  oop metadata_holder = metadata_owner_klass->klass_holder();
    6.41 +  if (metadata_holder != NULL) {
    6.42 +    G1SATBCardTableModRefBS::enqueue(metadata_holder);
    6.43 +  }
    6.44 +
    6.45 +#endif
    6.46 +}
    6.47 +
    6.48  //------------------------------------------------------------------
    6.49  // ciObjectFactory::get_unloaded_method
    6.50  //
     7.1 --- a/src/share/vm/ci/ciObjectFactory.hpp	Tue Jul 01 09:03:55 2014 +0200
     7.2 +++ b/src/share/vm/ci/ciObjectFactory.hpp	Mon Jul 07 10:12:40 2014 +0200
     7.3 @@ -75,6 +75,8 @@
     7.4    ciObject* create_new_object(oop o);
     7.5    ciMetadata* create_new_object(Metadata* o);
     7.6  
     7.7 +  void ensure_metadata_alive(ciMetadata* m);
     7.8 +
     7.9    static bool is_equal(NonPermObject* p, oop key) {
    7.10      return p->object()->get_oop() == key;
    7.11    }
     8.1 --- a/src/share/vm/classfile/classLoaderData.cpp	Tue Jul 01 09:03:55 2014 +0200
     8.2 +++ b/src/share/vm/classfile/classLoaderData.cpp	Mon Jul 07 10:12:40 2014 +0200
     8.3 @@ -321,6 +321,27 @@
     8.4    }
     8.5  }
     8.6  
     8.7 +#ifdef ASSERT
     8.8 +class AllAliveClosure : public OopClosure {
     8.9 +  BoolObjectClosure* _is_alive_closure;
    8.10 +  bool _found_dead;
    8.11 + public:
    8.12 +  AllAliveClosure(BoolObjectClosure* is_alive_closure) : _is_alive_closure(is_alive_closure), _found_dead(false) {}
    8.13 +  template <typename T> void do_oop_work(T* p) {
    8.14 +    T heap_oop = oopDesc::load_heap_oop(p);
    8.15 +    if (!oopDesc::is_null(heap_oop)) {
    8.16 +      oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
    8.17 +      if (!_is_alive_closure->do_object_b(obj)) {
    8.18 +        _found_dead = true;
    8.19 +      }
    8.20 +    }
    8.21 +  }
    8.22 +  void do_oop(oop* p)       { do_oop_work<oop>(p); }
    8.23 +  void do_oop(narrowOop* p) { do_oop_work<narrowOop>(p); }
    8.24 +  bool found_dead()         { return _found_dead; }
    8.25 +};
    8.26 +#endif
    8.27 +
    8.28  oop ClassLoaderData::keep_alive_object() const {
    8.29    assert(!keep_alive(), "Don't use with CLDs that are artificially kept alive");
    8.30    return is_anonymous() ? _klasses->java_mirror() : class_loader();
    8.31 @@ -330,7 +351,15 @@
    8.32    bool alive = keep_alive() // null class loader and incomplete anonymous klasses.
    8.33        || is_alive_closure->do_object_b(keep_alive_object());
    8.34  
    8.35 -  assert(!alive || claimed(), "must be claimed");
    8.36 +#ifdef ASSERT
    8.37 +  if (alive) {
    8.38 +    AllAliveClosure all_alive_closure(is_alive_closure);
    8.39 +    KlassToOopClosure klass_closure(&all_alive_closure);
    8.40 +    const_cast<ClassLoaderData*>(this)->oops_do(&all_alive_closure, &klass_closure, false);
    8.41 +    assert(!all_alive_closure.found_dead(), err_msg("Found dead oop in alive cld: " PTR_FORMAT, p2i(this)));
    8.42 +  }
    8.43 +#endif
    8.44 +
    8.45    return alive;
    8.46  }
    8.47  
    8.48 @@ -609,9 +638,36 @@
    8.49  
    8.50  void ClassLoaderDataGraph::always_strong_oops_do(OopClosure* f, KlassClosure* klass_closure, bool must_claim) {
    8.51    if (ClassUnloading) {
    8.52 -    ClassLoaderDataGraph::keep_alive_oops_do(f, klass_closure, must_claim);
    8.53 +    keep_alive_oops_do(f, klass_closure, must_claim);
    8.54    } else {
    8.55 -    ClassLoaderDataGraph::oops_do(f, klass_closure, must_claim);
    8.56 +    oops_do(f, klass_closure, must_claim);
    8.57 +  }
    8.58 +}
    8.59 +
    8.60 +void ClassLoaderDataGraph::cld_do(CLDClosure* cl) {
    8.61 +  for (ClassLoaderData* cld = _head; cl != NULL && cld != NULL; cld = cld->next()) {
    8.62 +    cl->do_cld(cld);
    8.63 +  }
    8.64 +}
    8.65 +
    8.66 +void ClassLoaderDataGraph::roots_cld_do(CLDClosure* strong, CLDClosure* weak) {
    8.67 +  for (ClassLoaderData* cld = _head;  cld != NULL; cld = cld->_next) {
    8.68 +    CLDClosure* closure = cld->keep_alive() ? strong : weak;
    8.69 +    if (closure != NULL) {
    8.70 +      closure->do_cld(cld);
    8.71 +    }
    8.72 +  }
    8.73 +}
    8.74 +
    8.75 +void ClassLoaderDataGraph::keep_alive_cld_do(CLDClosure* cl) {
    8.76 +  roots_cld_do(cl, NULL);
    8.77 +}
    8.78 +
    8.79 +void ClassLoaderDataGraph::always_strong_cld_do(CLDClosure* cl) {
    8.80 +  if (ClassUnloading) {
    8.81 +    keep_alive_cld_do(cl);
    8.82 +  } else {
    8.83 +    cld_do(cl);
    8.84    }
    8.85  }
    8.86  
    8.87 @@ -666,6 +722,16 @@
    8.88    return array;
    8.89  }
    8.90  
    8.91 +bool ClassLoaderDataGraph::unload_list_contains(const void* x) {
    8.92 +  assert(SafepointSynchronize::is_at_safepoint(), "only safe to call at safepoint");
    8.93 +  for (ClassLoaderData* cld = _unloading; cld != NULL; cld = cld->next()) {
    8.94 +    if (cld->metaspace_or_null() != NULL && cld->metaspace_or_null()->contains(x)) {
    8.95 +      return true;
    8.96 +    }
    8.97 +  }
    8.98 +  return false;
    8.99 +}
   8.100 +
   8.101  #ifndef PRODUCT
   8.102  bool ClassLoaderDataGraph::contains_loader_data(ClassLoaderData* loader_data) {
   8.103    for (ClassLoaderData* data = _head; data != NULL; data = data->next()) {
   8.104 @@ -786,6 +852,60 @@
   8.105    return _rw_metaspace;
   8.106  }
   8.107  
   8.108 +ClassLoaderDataGraphKlassIteratorAtomic::ClassLoaderDataGraphKlassIteratorAtomic()
   8.109 +    : _next_klass(NULL) {
   8.110 +  ClassLoaderData* cld = ClassLoaderDataGraph::_head;
   8.111 +  Klass* klass = NULL;
   8.112 +
   8.113 +  // Find the first klass in the CLDG.
   8.114 +  while (cld != NULL) {
   8.115 +    klass = cld->_klasses;
   8.116 +    if (klass != NULL) {
   8.117 +      _next_klass = klass;
   8.118 +      return;
   8.119 +    }
   8.120 +    cld = cld->next();
   8.121 +  }
   8.122 +}
   8.123 +
   8.124 +Klass* ClassLoaderDataGraphKlassIteratorAtomic::next_klass_in_cldg(Klass* klass) {
   8.125 +  Klass* next = klass->next_link();
   8.126 +  if (next != NULL) {
   8.127 +    return next;
   8.128 +  }
   8.129 +
   8.130 +  // No more klasses in the current CLD. Time to find a new CLD.
   8.131 +  ClassLoaderData* cld = klass->class_loader_data();
   8.132 +  while (next == NULL) {
   8.133 +    cld = cld->next();
   8.134 +    if (cld == NULL) {
   8.135 +      break;
   8.136 +    }
   8.137 +    next = cld->_klasses;
   8.138 +  }
   8.139 +
   8.140 +  return next;
   8.141 +}
   8.142 +
   8.143 +Klass* ClassLoaderDataGraphKlassIteratorAtomic::next_klass() {
   8.144 +  Klass* head = (Klass*)_next_klass;
   8.145 +
   8.146 +  while (head != NULL) {
   8.147 +    Klass* next = next_klass_in_cldg(head);
   8.148 +
   8.149 +    Klass* old_head = (Klass*)Atomic::cmpxchg_ptr(next, &_next_klass, head);
   8.150 +
   8.151 +    if (old_head == head) {
   8.152 +      return head; // Won the CAS.
   8.153 +    }
   8.154 +
   8.155 +    head = old_head;
   8.156 +  }
   8.157 +
   8.158 +  // Nothing more for the iterator to hand out.
   8.159 +  assert(head == NULL, err_msg("head is " PTR_FORMAT ", expected not null:", p2i(head)));
   8.160 +  return NULL;
   8.161 +}
   8.162  
   8.163  ClassLoaderDataGraphMetaspaceIterator::ClassLoaderDataGraphMetaspaceIterator() {
   8.164    _data = ClassLoaderDataGraph::_head;
     9.1 --- a/src/share/vm/classfile/classLoaderData.hpp	Tue Jul 01 09:03:55 2014 +0200
     9.2 +++ b/src/share/vm/classfile/classLoaderData.hpp	Mon Jul 07 10:12:40 2014 +0200
     9.3 @@ -31,7 +31,6 @@
     9.4  #include "memory/metaspaceCounters.hpp"
     9.5  #include "runtime/mutex.hpp"
     9.6  #include "utilities/growableArray.hpp"
     9.7 -
     9.8  #if INCLUDE_TRACE
     9.9  # include "utilities/ticks.hpp"
    9.10  #endif
    9.11 @@ -59,6 +58,7 @@
    9.12  class ClassLoaderDataGraph : public AllStatic {
    9.13    friend class ClassLoaderData;
    9.14    friend class ClassLoaderDataGraphMetaspaceIterator;
    9.15 +  friend class ClassLoaderDataGraphKlassIteratorAtomic;
    9.16    friend class VMStructs;
    9.17   private:
    9.18    // All CLDs (except the null CLD) can be reached by walking _head->_next->...
    9.19 @@ -75,9 +75,16 @@
    9.20    static ClassLoaderData* find_or_create(Handle class_loader, TRAPS);
    9.21    static void purge();
    9.22    static void clear_claimed_marks();
    9.23 +  // oops do
    9.24    static void oops_do(OopClosure* f, KlassClosure* klass_closure, bool must_claim);
    9.25 +  static void keep_alive_oops_do(OopClosure* blk, KlassClosure* klass_closure, bool must_claim);
    9.26    static void always_strong_oops_do(OopClosure* blk, KlassClosure* klass_closure, bool must_claim);
    9.27 -  static void keep_alive_oops_do(OopClosure* blk, KlassClosure* klass_closure, bool must_claim);
    9.28 +  // cld do
    9.29 +  static void cld_do(CLDClosure* cl);
    9.30 +  static void roots_cld_do(CLDClosure* strong, CLDClosure* weak);
    9.31 +  static void keep_alive_cld_do(CLDClosure* cl);
    9.32 +  static void always_strong_cld_do(CLDClosure* cl);
    9.33 +  // klass do
    9.34    static void classes_do(KlassClosure* klass_closure);
    9.35    static void classes_do(void f(Klass* const));
    9.36    static void loaded_classes_do(KlassClosure* klass_closure);
    9.37 @@ -102,6 +109,7 @@
    9.38    static void dump() { dump_on(tty); }
    9.39    static void verify();
    9.40  
    9.41 +  static bool unload_list_contains(const void* x);
    9.42  #ifndef PRODUCT
    9.43    static bool contains_loader_data(ClassLoaderData* loader_data);
    9.44  #endif
    9.45 @@ -134,6 +142,7 @@
    9.46    };
    9.47  
    9.48    friend class ClassLoaderDataGraph;
    9.49 +  friend class ClassLoaderDataGraphKlassIteratorAtomic;
    9.50    friend class ClassLoaderDataGraphMetaspaceIterator;
    9.51    friend class MetaDataFactory;
    9.52    friend class Method;
    9.53 @@ -195,7 +204,6 @@
    9.54  
    9.55    void unload();
    9.56    bool keep_alive() const       { return _keep_alive; }
    9.57 -  bool is_alive(BoolObjectClosure* is_alive_closure) const;
    9.58    void classes_do(void f(Klass*));
    9.59    void loaded_classes_do(KlassClosure* klass_closure);
    9.60    void classes_do(void f(InstanceKlass*));
    9.61 @@ -207,6 +215,9 @@
    9.62    MetaWord* allocate(size_t size);
    9.63  
    9.64   public:
    9.65 +
    9.66 +  bool is_alive(BoolObjectClosure* is_alive_closure) const;
    9.67 +
    9.68    // Accessors
    9.69    Metaspace* metaspace_or_null() const     { return _metaspace; }
    9.70  
    9.71 @@ -290,6 +301,16 @@
    9.72    void initialize_shared_metaspaces();
    9.73  };
    9.74  
    9.75 +// An iterator that distributes Klasses to parallel worker threads.
    9.76 +class ClassLoaderDataGraphKlassIteratorAtomic : public StackObj {
    9.77 +  volatile Klass* _next_klass;
    9.78 + public:
    9.79 +  ClassLoaderDataGraphKlassIteratorAtomic();
    9.80 +  Klass* next_klass();
    9.81 + private:
    9.82 +  static Klass* next_klass_in_cldg(Klass* klass);
    9.83 +};
    9.84 +
    9.85  class ClassLoaderDataGraphMetaspaceIterator : public StackObj {
    9.86    ClassLoaderData* _data;
    9.87   public:
    10.1 --- a/src/share/vm/classfile/dictionary.cpp	Tue Jul 01 09:03:55 2014 +0200
    10.2 +++ b/src/share/vm/classfile/dictionary.cpp	Mon Jul 07 10:12:40 2014 +0200
    10.3 @@ -199,6 +199,26 @@
    10.4    return class_was_unloaded;
    10.5  }
    10.6  
    10.7 +void Dictionary::roots_oops_do(OopClosure* strong, OopClosure* weak) {
    10.8 +  // Skip the strong roots probe marking if the closures are the same.
    10.9 +  if (strong == weak) {
   10.10 +    oops_do(strong);
   10.11 +    return;
   10.12 +  }
   10.13 +
   10.14 +  for (int index = 0; index < table_size(); index++) {
   10.15 +    for (DictionaryEntry *probe = bucket(index);
   10.16 +                          probe != NULL;
   10.17 +                          probe = probe->next()) {
   10.18 +      Klass* e = probe->klass();
   10.19 +      ClassLoaderData* loader_data = probe->loader_data();
   10.20 +      if (is_strongly_reachable(loader_data, e)) {
   10.21 +        probe->set_strongly_reachable();
   10.22 +      }
   10.23 +    }
   10.24 +  }
   10.25 +  _pd_cache_table->roots_oops_do(strong, weak);
   10.26 +}
   10.27  
   10.28  void Dictionary::always_strong_oops_do(OopClosure* blk) {
   10.29    // Follow all system classes and temporary placeholders in dictionary; only
   10.30 @@ -490,6 +510,23 @@
   10.31    }
   10.32  }
   10.33  
   10.34 +void ProtectionDomainCacheTable::roots_oops_do(OopClosure* strong, OopClosure* weak) {
   10.35 +  for (int index = 0; index < table_size(); index++) {
   10.36 +    for (ProtectionDomainCacheEntry* probe = bucket(index);
   10.37 +                                     probe != NULL;
   10.38 +                                     probe = probe->next()) {
   10.39 +      if (probe->is_strongly_reachable()) {
   10.40 +        probe->reset_strongly_reachable();
   10.41 +        probe->oops_do(strong);
   10.42 +      } else {
   10.43 +        if (weak != NULL) {
   10.44 +          probe->oops_do(weak);
   10.45 +        }
   10.46 +      }
   10.47 +    }
   10.48 +  }
   10.49 +}
   10.50 +
   10.51  uint ProtectionDomainCacheTable::bucket_size() {
   10.52    return sizeof(ProtectionDomainCacheEntry);
   10.53  }
    11.1 --- a/src/share/vm/classfile/dictionary.hpp	Tue Jul 01 09:03:55 2014 +0200
    11.2 +++ b/src/share/vm/classfile/dictionary.hpp	Mon Jul 07 10:12:40 2014 +0200
    11.3 @@ -89,6 +89,7 @@
    11.4    // GC support
    11.5    void oops_do(OopClosure* f);
    11.6    void always_strong_oops_do(OopClosure* blk);
    11.7 +  void roots_oops_do(OopClosure* strong, OopClosure* weak);
    11.8  
    11.9    void always_strong_classes_do(KlassClosure* closure);
   11.10  
   11.11 @@ -218,6 +219,7 @@
   11.12    // GC support
   11.13    void oops_do(OopClosure* f);
   11.14    void always_strong_oops_do(OopClosure* f);
   11.15 +  void roots_oops_do(OopClosure* strong, OopClosure* weak);
   11.16  
   11.17    static uint bucket_size();
   11.18  
    12.1 --- a/src/share/vm/classfile/metadataOnStackMark.cpp	Tue Jul 01 09:03:55 2014 +0200
    12.2 +++ b/src/share/vm/classfile/metadataOnStackMark.cpp	Mon Jul 07 10:12:40 2014 +0200
    12.3 @@ -47,8 +47,11 @@
    12.4    if (_marked_objects == NULL) {
    12.5      _marked_objects = new (ResourceObj::C_HEAP, mtClass) GrowableArray<Metadata*>(1000, true);
    12.6    }
    12.7 +
    12.8    Threads::metadata_do(Metadata::mark_on_stack);
    12.9 -  CodeCache::alive_nmethods_do(nmethod::mark_on_stack);
   12.10 +  if (JvmtiExport::has_redefined_a_class()) {
   12.11 +    CodeCache::alive_nmethods_do(nmethod::mark_on_stack);
   12.12 +  }
   12.13    CompileBroker::mark_on_stack();
   12.14    JvmtiCurrentBreakpoints::metadata_do(Metadata::mark_on_stack);
   12.15    ThreadService::metadata_do(Metadata::mark_on_stack);
    13.1 --- a/src/share/vm/classfile/symbolTable.cpp	Tue Jul 01 09:03:55 2014 +0200
    13.2 +++ b/src/share/vm/classfile/symbolTable.cpp	Mon Jul 07 10:12:40 2014 +0200
    13.3 @@ -36,6 +36,7 @@
    13.4  #include "runtime/mutexLocker.hpp"
    13.5  #include "utilities/hashtable.inline.hpp"
    13.6  #if INCLUDE_ALL_GCS
    13.7 +#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
    13.8  #include "gc_implementation/g1/g1StringDedup.hpp"
    13.9  #endif
   13.10  
   13.11 @@ -704,11 +705,26 @@
   13.12    return lookup(chars, length);
   13.13  }
   13.14  
   13.15 +// Tell the GC that this string was looked up in the StringTable.
   13.16 +static void ensure_string_alive(oop string) {
   13.17 +  // A lookup in the StringTable could return an object that was previously
   13.18 +  // considered dead. The SATB part of G1 needs to get notified about this
   13.19 +  // potential resurrection, otherwise the marking might not find the object.
   13.20 +#if INCLUDE_ALL_GCS
   13.21 +  if (UseG1GC && string != NULL) {
   13.22 +    G1SATBCardTableModRefBS::enqueue(string);
   13.23 +  }
   13.24 +#endif
   13.25 +}
   13.26  
   13.27  oop StringTable::lookup(jchar* name, int len) {
   13.28    unsigned int hash = hash_string(name, len);
   13.29    int index = the_table()->hash_to_index(hash);
   13.30 -  return the_table()->lookup(index, name, len, hash);
   13.31 +  oop string = the_table()->lookup(index, name, len, hash);
   13.32 +
   13.33 +  ensure_string_alive(string);
   13.34 +
   13.35 +  return string;
   13.36  }
   13.37  
   13.38  
   13.39 @@ -719,7 +735,10 @@
   13.40    oop found_string = the_table()->lookup(index, name, len, hashValue);
   13.41  
   13.42    // Found
   13.43 -  if (found_string != NULL) return found_string;
   13.44 +  if (found_string != NULL) {
   13.45 +    ensure_string_alive(found_string);
   13.46 +    return found_string;
   13.47 +  }
   13.48  
   13.49    debug_only(StableMemoryChecker smc(name, len * sizeof(name[0])));
   13.50    assert(!Universe::heap()->is_in_reserved(name),
   13.51 @@ -744,11 +763,17 @@
   13.52  
   13.53    // Grab the StringTable_lock before getting the_table() because it could
   13.54    // change at safepoint.
   13.55 -  MutexLocker ml(StringTable_lock, THREAD);
   13.56 +  oop added_or_found;
   13.57 +  {
   13.58 +    MutexLocker ml(StringTable_lock, THREAD);
   13.59 +    // Otherwise, add to symbol to table
   13.60 +    added_or_found = the_table()->basic_add(index, string, name, len,
   13.61 +                                  hashValue, CHECK_NULL);
   13.62 +  }
   13.63  
   13.64 -  // Otherwise, add to symbol to table
   13.65 -  return the_table()->basic_add(index, string, name, len,
   13.66 -                                hashValue, CHECK_NULL);
   13.67 +  ensure_string_alive(added_or_found);
   13.68 +
   13.69 +  return added_or_found;
   13.70  }
   13.71  
   13.72  oop StringTable::intern(Symbol* symbol, TRAPS) {
    14.1 --- a/src/share/vm/classfile/systemDictionary.cpp	Tue Jul 01 09:03:55 2014 +0200
    14.2 +++ b/src/share/vm/classfile/systemDictionary.cpp	Mon Jul 07 10:12:40 2014 +0200
    14.3 @@ -1613,13 +1613,7 @@
    14.4  // system dictionary and follows the remaining classes' contents.
    14.5  
    14.6  void SystemDictionary::always_strong_oops_do(OopClosure* blk) {
    14.7 -  blk->do_oop(&_java_system_loader);
    14.8 -  blk->do_oop(&_system_loader_lock_obj);
    14.9 -
   14.10 -  dictionary()->always_strong_oops_do(blk);
   14.11 -
   14.12 -  // Visit extra methods
   14.13 -  invoke_method_table()->oops_do(blk);
   14.14 +  roots_oops_do(blk, NULL);
   14.15  }
   14.16  
   14.17  void SystemDictionary::always_strong_classes_do(KlassClosure* closure) {
   14.18 @@ -1686,6 +1680,17 @@
   14.19    return unloading_occurred;
   14.20  }
   14.21  
   14.22 +void SystemDictionary::roots_oops_do(OopClosure* strong, OopClosure* weak) {
   14.23 +  strong->do_oop(&_java_system_loader);
   14.24 +  strong->do_oop(&_system_loader_lock_obj);
   14.25 +
   14.26 +  // Adjust dictionary
   14.27 +  dictionary()->roots_oops_do(strong, weak);
   14.28 +
   14.29 +  // Visit extra methods
   14.30 +  invoke_method_table()->oops_do(strong);
   14.31 +}
   14.32 +
   14.33  void SystemDictionary::oops_do(OopClosure* f) {
   14.34    f->do_oop(&_java_system_loader);
   14.35    f->do_oop(&_system_loader_lock_obj);
    15.1 --- a/src/share/vm/classfile/systemDictionary.hpp	Tue Jul 01 09:03:55 2014 +0200
    15.2 +++ b/src/share/vm/classfile/systemDictionary.hpp	Mon Jul 07 10:12:40 2014 +0200
    15.3 @@ -335,6 +335,7 @@
    15.4  
    15.5    // Applies "f->do_oop" to all root oops in the system dictionary.
    15.6    static void oops_do(OopClosure* f);
    15.7 +  static void roots_oops_do(OopClosure* strong, OopClosure* weak);
    15.8  
    15.9    // System loader lock
   15.10    static oop system_loader_lock()           { return _system_loader_lock_obj; }
    16.1 --- a/src/share/vm/code/codeCache.cpp	Tue Jul 01 09:03:55 2014 +0200
    16.2 +++ b/src/share/vm/code/codeCache.cpp	Mon Jul 07 10:12:40 2014 +0200
    16.3 @@ -337,6 +337,11 @@
    16.4  // Walk the list of methods which might contain non-perm oops.
    16.5  void CodeCache::scavenge_root_nmethods_do(CodeBlobClosure* f) {
    16.6    assert_locked_or_safepoint(CodeCache_lock);
    16.7 +
    16.8 +  if (UseG1GC) {
    16.9 +    return;
   16.10 +  }
   16.11 +
   16.12    debug_only(mark_scavenge_root_nmethods());
   16.13  
   16.14    for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
   16.15 @@ -362,6 +367,11 @@
   16.16  
   16.17  void CodeCache::add_scavenge_root_nmethod(nmethod* nm) {
   16.18    assert_locked_or_safepoint(CodeCache_lock);
   16.19 +
   16.20 +  if (UseG1GC) {
   16.21 +    return;
   16.22 +  }
   16.23 +
   16.24    nm->set_on_scavenge_root_list();
   16.25    nm->set_scavenge_root_link(_scavenge_root_nmethods);
   16.26    set_scavenge_root_nmethods(nm);
   16.27 @@ -370,6 +380,11 @@
   16.28  
   16.29  void CodeCache::drop_scavenge_root_nmethod(nmethod* nm) {
   16.30    assert_locked_or_safepoint(CodeCache_lock);
   16.31 +
   16.32 +  if (UseG1GC) {
   16.33 +    return;
   16.34 +  }
   16.35 +
   16.36    print_trace("drop_scavenge_root", nm);
   16.37    nmethod* last = NULL;
   16.38    nmethod* cur = scavenge_root_nmethods();
   16.39 @@ -391,6 +406,11 @@
   16.40  
   16.41  void CodeCache::prune_scavenge_root_nmethods() {
   16.42    assert_locked_or_safepoint(CodeCache_lock);
   16.43 +
   16.44 +  if (UseG1GC) {
   16.45 +    return;
   16.46 +  }
   16.47 +
   16.48    debug_only(mark_scavenge_root_nmethods());
   16.49  
   16.50    nmethod* last = NULL;
   16.51 @@ -423,6 +443,10 @@
   16.52  
   16.53  #ifndef PRODUCT
   16.54  void CodeCache::asserted_non_scavengable_nmethods_do(CodeBlobClosure* f) {
   16.55 +  if (UseG1GC) {
   16.56 +    return;
   16.57 +  }
   16.58 +
   16.59    // While we are here, verify the integrity of the list.
   16.60    mark_scavenge_root_nmethods();
   16.61    for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
   16.62 @@ -463,9 +487,36 @@
   16.63  }
   16.64  #endif //PRODUCT
   16.65  
   16.66 +void CodeCache::verify_clean_inline_caches() {
   16.67 +#ifdef ASSERT
   16.68 +  FOR_ALL_ALIVE_BLOBS(cb) {
   16.69 +    if (cb->is_nmethod()) {
   16.70 +      nmethod* nm = (nmethod*)cb;
   16.71 +      assert(!nm->is_unloaded(), "Tautology");
   16.72 +      nm->verify_clean_inline_caches();
   16.73 +      nm->verify();
   16.74 +    }
   16.75 +  }
   16.76 +#endif
   16.77 +}
   16.78 +
   16.79 +void CodeCache::verify_icholder_relocations() {
   16.80 +#ifdef ASSERT
   16.81 +  // make sure that we aren't leaking icholders
   16.82 +  int count = 0;
   16.83 +  FOR_ALL_BLOBS(cb) {
   16.84 +    if (cb->is_nmethod()) {
   16.85 +      nmethod* nm = (nmethod*)cb;
   16.86 +      count += nm->verify_icholder_relocations();
   16.87 +    }
   16.88 +  }
   16.89 +
   16.90 +  assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() ==
   16.91 +         CompiledICHolder::live_count(), "must agree");
   16.92 +#endif
   16.93 +}
   16.94  
   16.95  void CodeCache::gc_prologue() {
   16.96 -  assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_epilogue must be called");
   16.97  }
   16.98  
   16.99  void CodeCache::gc_epilogue() {
  16.100 @@ -478,41 +529,15 @@
  16.101          nm->cleanup_inline_caches();
  16.102        }
  16.103        DEBUG_ONLY(nm->verify());
  16.104 -      nm->fix_oop_relocations();
  16.105 +      DEBUG_ONLY(nm->verify_oop_relocations());
  16.106      }
  16.107    }
  16.108    set_needs_cache_clean(false);
  16.109    prune_scavenge_root_nmethods();
  16.110 -  assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_prologue must be called");
  16.111  
  16.112 -#ifdef ASSERT
  16.113 -  // make sure that we aren't leaking icholders
  16.114 -  int count = 0;
  16.115 -  FOR_ALL_BLOBS(cb) {
  16.116 -    if (cb->is_nmethod()) {
  16.117 -      RelocIterator iter((nmethod*)cb);
  16.118 -      while(iter.next()) {
  16.119 -        if (iter.type() == relocInfo::virtual_call_type) {
  16.120 -          if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc())) {
  16.121 -            CompiledIC *ic = CompiledIC_at(&iter);
  16.122 -            if (TraceCompiledIC) {
  16.123 -              tty->print("noticed icholder " INTPTR_FORMAT " ", p2i(ic->cached_icholder()));
  16.124 -              ic->print();
  16.125 -            }
  16.126 -            assert(ic->cached_icholder() != NULL, "must be non-NULL");
  16.127 -            count++;
  16.128 -          }
  16.129 -        }
  16.130 -      }
  16.131 -    }
  16.132 -  }
  16.133 -
  16.134 -  assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() ==
  16.135 -         CompiledICHolder::live_count(), "must agree");
  16.136 -#endif
  16.137 +  verify_icholder_relocations();
  16.138  }
  16.139  
  16.140 -
  16.141  void CodeCache::verify_oops() {
  16.142    MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
  16.143    VerifyOopClosure voc;
    17.1 --- a/src/share/vm/code/codeCache.hpp	Tue Jul 01 09:03:55 2014 +0200
    17.2 +++ b/src/share/vm/code/codeCache.hpp	Mon Jul 07 10:12:40 2014 +0200
    17.3 @@ -134,10 +134,6 @@
    17.4    // to) any unmarked codeBlobs in the cache.  Sets "marked_for_unloading"
    17.5    // to "true" iff some code got unloaded.
    17.6    static void do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred);
    17.7 -  static void oops_do(OopClosure* f) {
    17.8 -    CodeBlobToOopClosure oopc(f, /*do_marking=*/ false);
    17.9 -    blobs_do(&oopc);
   17.10 -  }
   17.11    static void asserted_non_scavengable_nmethods_do(CodeBlobClosure* f = NULL) PRODUCT_RETURN;
   17.12    static void scavenge_root_nmethods_do(CodeBlobClosure* f);
   17.13  
   17.14 @@ -172,6 +168,9 @@
   17.15    static void set_needs_cache_clean(bool v)      { _needs_cache_clean = v;    }
   17.16    static void clear_inline_caches();             // clear all inline caches
   17.17  
   17.18 +  static void verify_clean_inline_caches();
   17.19 +  static void verify_icholder_relocations();
   17.20 +
   17.21    // Deoptimization
   17.22    static int  mark_for_deoptimization(DepChange& changes);
   17.23  #ifdef HOTSWAP
    18.1 --- a/src/share/vm/code/compiledIC.cpp	Tue Jul 01 09:03:55 2014 +0200
    18.2 +++ b/src/share/vm/code/compiledIC.cpp	Mon Jul 07 10:12:40 2014 +0200
    18.3 @@ -99,13 +99,13 @@
    18.4    }
    18.5  
    18.6    {
    18.7 -  MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
    18.8 +    MutexLockerEx pl(SafepointSynchronize::is_at_safepoint() ? NULL : Patching_lock, Mutex::_no_safepoint_check_flag);
    18.9  #ifdef ASSERT
   18.10 -  CodeBlob* cb = CodeCache::find_blob_unsafe(_ic_call);
   18.11 -  assert(cb != NULL && cb->is_nmethod(), "must be nmethod");
   18.12 +    CodeBlob* cb = CodeCache::find_blob_unsafe(_ic_call);
   18.13 +    assert(cb != NULL && cb->is_nmethod(), "must be nmethod");
   18.14  #endif
   18.15 -  _ic_call->set_destination_mt_safe(entry_point);
   18.16 -}
   18.17 +     _ic_call->set_destination_mt_safe(entry_point);
   18.18 +  }
   18.19  
   18.20    if (is_optimized() || is_icstub) {
   18.21      // Optimized call sites don't have a cache value and ICStub call
   18.22 @@ -529,7 +529,7 @@
   18.23  void CompiledStaticCall::set_to_clean() {
   18.24    assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call");
   18.25    // Reset call site
   18.26 -  MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
   18.27 +  MutexLockerEx pl(SafepointSynchronize::is_at_safepoint() ? NULL : Patching_lock, Mutex::_no_safepoint_check_flag);
   18.28  #ifdef ASSERT
   18.29    CodeBlob* cb = CodeCache::find_blob_unsafe(this);
   18.30    assert(cb != NULL && cb->is_nmethod(), "must be nmethod");
    19.1 --- a/src/share/vm/code/nmethod.cpp	Tue Jul 01 09:03:55 2014 +0200
    19.2 +++ b/src/share/vm/code/nmethod.cpp	Mon Jul 07 10:12:40 2014 +0200
    19.3 @@ -49,6 +49,8 @@
    19.4  
    19.5  PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
    19.6  
    19.7 +unsigned char nmethod::_global_unloading_clock = 0;
    19.8 +
    19.9  #ifdef DTRACE_ENABLED
   19.10  
   19.11  // Only bother with this argument setup if dtrace is available
   19.12 @@ -466,6 +468,7 @@
   19.13  // Fill in default values for various flag fields
   19.14  void nmethod::init_defaults() {
   19.15    _state                      = in_use;
   19.16 +  _unloading_clock            = 0;
   19.17    _marked_for_reclamation     = 0;
   19.18    _has_flushed_dependencies   = 0;
   19.19    _has_unsafe_access          = 0;
   19.20 @@ -484,7 +487,11 @@
   19.21    _oops_do_mark_link       = NULL;
   19.22    _jmethod_id              = NULL;
   19.23    _osr_link                = NULL;
   19.24 -  _scavenge_root_link      = NULL;
   19.25 +  if (UseG1GC) {
   19.26 +    _unloading_next        = NULL;
   19.27 +  } else {
   19.28 +    _scavenge_root_link    = NULL;
   19.29 +  }
   19.30    _scavenge_root_state     = 0;
   19.31    _compiler                = NULL;
   19.32  #if INCLUDE_RTM_OPT
   19.33 @@ -1190,6 +1197,77 @@
   19.34    }
   19.35  }
   19.36  
   19.37 +void nmethod::verify_clean_inline_caches() {
   19.38 +  assert_locked_or_safepoint(CompiledIC_lock);
   19.39 +
   19.40 +  // If the method is not entrant or zombie then a JMP is plastered over the
   19.41 +  // first few bytes.  If an oop in the old code was there, that oop
   19.42 +  // should not get GC'd.  Skip the first few bytes of oops on
   19.43 +  // not-entrant methods.
   19.44 +  address low_boundary = verified_entry_point();
   19.45 +  if (!is_in_use()) {
   19.46 +    low_boundary += NativeJump::instruction_size;
   19.47 +    // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
   19.48 +    // This means that the low_boundary is going to be a little too high.
   19.49 +    // This shouldn't matter, since oops of non-entrant methods are never used.
   19.50 +    // In fact, why are we bothering to look at oops in a non-entrant method??
   19.51 +  }
   19.52 +
   19.53 +  ResourceMark rm;
   19.54 +  RelocIterator iter(this, low_boundary);
   19.55 +  while(iter.next()) {
   19.56 +    switch(iter.type()) {
   19.57 +      case relocInfo::virtual_call_type:
   19.58 +      case relocInfo::opt_virtual_call_type: {
   19.59 +        CompiledIC *ic = CompiledIC_at(&iter);
   19.60 +        // Ok, to lookup references to zombies here
   19.61 +        CodeBlob *cb = CodeCache::find_blob_unsafe(ic->ic_destination());
   19.62 +        if( cb != NULL && cb->is_nmethod() ) {
   19.63 +          nmethod* nm = (nmethod*)cb;
   19.64 +          // Verify that inline caches pointing to both zombie and not_entrant methods are clean
   19.65 +          if (!nm->is_in_use() || (nm->method()->code() != nm)) {
   19.66 +            assert(ic->is_clean(), "IC should be clean");
   19.67 +          }
   19.68 +        }
   19.69 +        break;
   19.70 +      }
   19.71 +      case relocInfo::static_call_type: {
   19.72 +        CompiledStaticCall *csc = compiledStaticCall_at(iter.reloc());
   19.73 +        CodeBlob *cb = CodeCache::find_blob_unsafe(csc->destination());
   19.74 +        if( cb != NULL && cb->is_nmethod() ) {
   19.75 +          nmethod* nm = (nmethod*)cb;
   19.76 +          // Verify that inline caches pointing to both zombie and not_entrant methods are clean
   19.77 +          if (!nm->is_in_use() || (nm->method()->code() != nm)) {
   19.78 +            assert(csc->is_clean(), "IC should be clean");
   19.79 +          }
   19.80 +        }
   19.81 +        break;
   19.82 +      }
   19.83 +    }
   19.84 +  }
   19.85 +}
   19.86 +
   19.87 +int nmethod::verify_icholder_relocations() {
   19.88 +  int count = 0;
   19.89 +
   19.90 +  RelocIterator iter(this);
   19.91 +  while(iter.next()) {
   19.92 +    if (iter.type() == relocInfo::virtual_call_type) {
   19.93 +      if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc())) {
   19.94 +        CompiledIC *ic = CompiledIC_at(&iter);
   19.95 +        if (TraceCompiledIC) {
   19.96 +          tty->print("noticed icholder " INTPTR_FORMAT " ", p2i(ic->cached_icholder()));
   19.97 +          ic->print();
   19.98 +        }
   19.99 +        assert(ic->cached_icholder() != NULL, "must be non-NULL");
  19.100 +        count++;
  19.101 +      }
  19.102 +    }
  19.103 +  }
  19.104 +
  19.105 +  return count;
  19.106 +}
  19.107 +
  19.108  // This is a private interface with the sweeper.
  19.109  void nmethod::mark_as_seen_on_stack() {
  19.110    assert(is_alive(), "Must be an alive method");
  19.111 @@ -1222,6 +1300,23 @@
  19.112    mdo->inc_decompile_count();
  19.113  }
  19.114  
  19.115 +void nmethod::increase_unloading_clock() {
  19.116 +  _global_unloading_clock++;
  19.117 +  if (_global_unloading_clock == 0) {
  19.118 +    // _nmethods are allocated with _unloading_clock == 0,
  19.119 +    // so 0 is never used as a clock value.
  19.120 +    _global_unloading_clock = 1;
  19.121 +  }
  19.122 +}
  19.123 +
  19.124 +void nmethod::set_unloading_clock(unsigned char unloading_clock) {
  19.125 +  OrderAccess::release_store((volatile jubyte*)&_unloading_clock, unloading_clock);
  19.126 +}
  19.127 +
  19.128 +unsigned char nmethod::unloading_clock() {
  19.129 +  return (unsigned char)OrderAccess::load_acquire((volatile jubyte*)&_unloading_clock);
  19.130 +}
  19.131 +
  19.132  void nmethod::make_unloaded(BoolObjectClosure* is_alive, oop cause) {
  19.133  
  19.134    post_compiled_method_unload();
  19.135 @@ -1267,6 +1362,10 @@
  19.136      // for later on.
  19.137      CodeCache::set_needs_cache_clean(true);
  19.138    }
  19.139 +
  19.140 +  // Unregister must be done before the state change
  19.141 +  Universe::heap()->unregister_nmethod(this);
  19.142 +
  19.143    _state = unloaded;
  19.144  
  19.145    // Log the unloading.
  19.146 @@ -1621,6 +1720,35 @@
  19.147    set_unload_reported();
  19.148  }
  19.149  
  19.150 +void static clean_ic_if_metadata_is_dead(CompiledIC *ic, BoolObjectClosure *is_alive) {
  19.151 +  if (ic->is_icholder_call()) {
  19.152 +    // The only exception is compiledICHolder oops which may
  19.153 +    // yet be marked below. (We check this further below).
  19.154 +    CompiledICHolder* cichk_oop = ic->cached_icholder();
  19.155 +    if (cichk_oop->holder_method()->method_holder()->is_loader_alive(is_alive) &&
  19.156 +        cichk_oop->holder_klass()->is_loader_alive(is_alive)) {
  19.157 +      return;
  19.158 +    }
  19.159 +  } else {
  19.160 +    Metadata* ic_oop = ic->cached_metadata();
  19.161 +    if (ic_oop != NULL) {
  19.162 +      if (ic_oop->is_klass()) {
  19.163 +        if (((Klass*)ic_oop)->is_loader_alive(is_alive)) {
  19.164 +          return;
  19.165 +        }
  19.166 +      } else if (ic_oop->is_method()) {
  19.167 +        if (((Method*)ic_oop)->method_holder()->is_loader_alive(is_alive)) {
  19.168 +          return;
  19.169 +        }
  19.170 +      } else {
  19.171 +        ShouldNotReachHere();
  19.172 +      }
  19.173 +    }
  19.174 +  }
  19.175 +
  19.176 +  ic->set_to_clean();
  19.177 +}
  19.178 +
  19.179  // This is called at the end of the strong tracing/marking phase of a
  19.180  // GC to unload an nmethod if it contains otherwise unreachable
  19.181  // oops.
  19.182 @@ -1664,31 +1792,7 @@
  19.183      while(iter.next()) {
  19.184        if (iter.type() == relocInfo::virtual_call_type) {
  19.185          CompiledIC *ic = CompiledIC_at(&iter);
  19.186 -        if (ic->is_icholder_call()) {
  19.187 -          // The only exception is compiledICHolder oops which may
  19.188 -          // yet be marked below. (We check this further below).
  19.189 -          CompiledICHolder* cichk_oop = ic->cached_icholder();
  19.190 -          if (cichk_oop->holder_method()->method_holder()->is_loader_alive(is_alive) &&
  19.191 -              cichk_oop->holder_klass()->is_loader_alive(is_alive)) {
  19.192 -            continue;
  19.193 -          }
  19.194 -        } else {
  19.195 -          Metadata* ic_oop = ic->cached_metadata();
  19.196 -          if (ic_oop != NULL) {
  19.197 -            if (ic_oop->is_klass()) {
  19.198 -              if (((Klass*)ic_oop)->is_loader_alive(is_alive)) {
  19.199 -                continue;
  19.200 -              }
  19.201 -            } else if (ic_oop->is_method()) {
  19.202 -              if (((Method*)ic_oop)->method_holder()->is_loader_alive(is_alive)) {
  19.203 -                continue;
  19.204 -              }
  19.205 -            } else {
  19.206 -              ShouldNotReachHere();
  19.207 -            }
  19.208 -          }
  19.209 -        }
  19.210 -        ic->set_to_clean();
  19.211 +        clean_ic_if_metadata_is_dead(ic, is_alive);
  19.212        }
  19.213      }
  19.214    }
  19.215 @@ -1726,6 +1830,175 @@
  19.216    verify_metadata_loaders(low_boundary, is_alive);
  19.217  }
  19.218  
  19.219 +template <class CompiledICorStaticCall>
  19.220 +static bool clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address addr, BoolObjectClosure *is_alive, nmethod* from) {
  19.221 +  // Ok, to lookup references to zombies here
  19.222 +  CodeBlob *cb = CodeCache::find_blob_unsafe(addr);
  19.223 +  if (cb != NULL && cb->is_nmethod()) {
  19.224 +    nmethod* nm = (nmethod*)cb;
  19.225 +
  19.226 +    if (nm->unloading_clock() != nmethod::global_unloading_clock()) {
  19.227 +      // The nmethod has not been processed yet.
  19.228 +      return true;
  19.229 +    }
  19.230 +
  19.231 +    // Clean inline caches pointing to both zombie and not_entrant methods
  19.232 +    if (!nm->is_in_use() || (nm->method()->code() != nm)) {
  19.233 +      ic->set_to_clean();
  19.234 +      assert(ic->is_clean(), err_msg("nmethod " PTR_FORMAT "not clean %s", from, from->method()->name_and_sig_as_C_string()));
  19.235 +    }
  19.236 +  }
  19.237 +
  19.238 +  return false;
  19.239 +}
  19.240 +
  19.241 +static bool clean_if_nmethod_is_unloaded(CompiledIC *ic, BoolObjectClosure *is_alive, nmethod* from) {
  19.242 +  return clean_if_nmethod_is_unloaded(ic, ic->ic_destination(), is_alive, from);
  19.243 +}
  19.244 +
  19.245 +static bool clean_if_nmethod_is_unloaded(CompiledStaticCall *csc, BoolObjectClosure *is_alive, nmethod* from) {
  19.246 +  return clean_if_nmethod_is_unloaded(csc, csc->destination(), is_alive, from);
  19.247 +}
  19.248 +
  19.249 +bool nmethod::do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred) {
  19.250 +  ResourceMark rm;
  19.251 +
  19.252 +  // Make sure the oop's ready to receive visitors
  19.253 +  assert(!is_zombie() && !is_unloaded(),
  19.254 +         "should not call follow on zombie or unloaded nmethod");
  19.255 +
  19.256 +  // If the method is not entrant then a JMP is plastered over the
  19.257 +  // first few bytes.  If an oop in the old code was there, that oop
  19.258 +  // should not get GC'd.  Skip the first few bytes of oops on
  19.259 +  // not-entrant methods.
  19.260 +  address low_boundary = verified_entry_point();
  19.261 +  if (is_not_entrant()) {
  19.262 +    low_boundary += NativeJump::instruction_size;
  19.263 +    // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
  19.264 +    // (See comment above.)
  19.265 +  }
  19.266 +
  19.267 +  // The RedefineClasses() API can cause the class unloading invariant
  19.268 +  // to no longer be true. See jvmtiExport.hpp for details.
  19.269 +  // Also, leave a debugging breadcrumb in local flag.
  19.270 +  bool a_class_was_redefined = JvmtiExport::has_redefined_a_class();
  19.271 +  if (a_class_was_redefined) {
  19.272 +    // This set of the unloading_occurred flag is done before the
  19.273 +    // call to post_compiled_method_unload() so that the unloading
  19.274 +    // of this nmethod is reported.
  19.275 +    unloading_occurred = true;
  19.276 +  }
  19.277 +
  19.278 +  // Exception cache
  19.279 +  clean_exception_cache(is_alive);
  19.280 +
  19.281 +  bool is_unloaded = false;
  19.282 +  bool postponed = false;
  19.283 +
  19.284 +  RelocIterator iter(this, low_boundary);
  19.285 +  while(iter.next()) {
  19.286 +
  19.287 +    switch (iter.type()) {
  19.288 +
  19.289 +    case relocInfo::virtual_call_type:
  19.290 +      if (unloading_occurred) {
  19.291 +        // If class unloading occurred we first iterate over all inline caches and
  19.292 +        // clear ICs where the cached oop is referring to an unloaded klass or method.
  19.293 +        clean_ic_if_metadata_is_dead(CompiledIC_at(&iter), is_alive);
  19.294 +      }
  19.295 +
  19.296 +      postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
  19.297 +      break;
  19.298 +
  19.299 +    case relocInfo::opt_virtual_call_type:
  19.300 +      postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
  19.301 +      break;
  19.302 +
  19.303 +    case relocInfo::static_call_type:
  19.304 +      postponed |= clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), is_alive, this);
  19.305 +      break;
  19.306 +
  19.307 +    case relocInfo::oop_type:
  19.308 +      if (!is_unloaded) {
  19.309 +        // Unload check
  19.310 +        oop_Relocation* r = iter.oop_reloc();
  19.311 +        // Traverse those oops directly embedded in the code.
  19.312 +        // Other oops (oop_index>0) are seen as part of scopes_oops.
  19.313 +        assert(1 == (r->oop_is_immediate()) +
  19.314 +                  (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
  19.315 +              "oop must be found in exactly one place");
  19.316 +        if (r->oop_is_immediate() && r->oop_value() != NULL) {
  19.317 +          if (can_unload(is_alive, r->oop_addr(), unloading_occurred)) {
  19.318 +            is_unloaded = true;
  19.319 +          }
  19.320 +        }
  19.321 +      }
  19.322 +      break;
  19.323 +
  19.324 +    }
  19.325 +  }
  19.326 +
  19.327 +  if (is_unloaded) {
  19.328 +    return postponed;
  19.329 +  }
  19.330 +
  19.331 +  // Scopes
  19.332 +  for (oop* p = oops_begin(); p < oops_end(); p++) {
  19.333 +    if (*p == Universe::non_oop_word())  continue;  // skip non-oops
  19.334 +    if (can_unload(is_alive, p, unloading_occurred)) {
  19.335 +      is_unloaded = true;
  19.336 +      break;
  19.337 +    }
  19.338 +  }
  19.339 +
  19.340 +  if (is_unloaded) {
  19.341 +    return postponed;
  19.342 +  }
  19.343 +
  19.344 +  // Ensure that all metadata is still alive
  19.345 +  verify_metadata_loaders(low_boundary, is_alive);
  19.346 +
  19.347 +  return postponed;
  19.348 +}
  19.349 +
  19.350 +void nmethod::do_unloading_parallel_postponed(BoolObjectClosure* is_alive, bool unloading_occurred) {
  19.351 +  ResourceMark rm;
  19.352 +
  19.353 +  // Make sure the oop's ready to receive visitors
  19.354 +  assert(!is_zombie(),
  19.355 +         "should not call follow on zombie nmethod");
  19.356 +
  19.357 +  // If the method is not entrant then a JMP is plastered over the
  19.358 +  // first few bytes.  If an oop in the old code was there, that oop
  19.359 +  // should not get GC'd.  Skip the first few bytes of oops on
  19.360 +  // not-entrant methods.
  19.361 +  address low_boundary = verified_entry_point();
  19.362 +  if (is_not_entrant()) {
  19.363 +    low_boundary += NativeJump::instruction_size;
  19.364 +    // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
  19.365 +    // (See comment above.)
  19.366 +  }
  19.367 +
  19.368 +  RelocIterator iter(this, low_boundary);
  19.369 +  while(iter.next()) {
  19.370 +
  19.371 +    switch (iter.type()) {
  19.372 +
  19.373 +    case relocInfo::virtual_call_type:
  19.374 +      clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
  19.375 +      break;
  19.376 +
  19.377 +    case relocInfo::opt_virtual_call_type:
  19.378 +      clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
  19.379 +      break;
  19.380 +
  19.381 +    case relocInfo::static_call_type:
  19.382 +      clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), is_alive, this);
  19.383 +      break;
  19.384 +    }
  19.385 +  }
  19.386 +}
  19.387 +
  19.388  #ifdef ASSERT
  19.389  
  19.390  class CheckClass : AllStatic {
  19.391 @@ -1942,7 +2215,7 @@
  19.392      assert(cur != NULL, "not NULL-terminated");
  19.393      nmethod* next = cur->_oops_do_mark_link;
  19.394      cur->_oops_do_mark_link = NULL;
  19.395 -    cur->fix_oop_relocations();
  19.396 +    cur->verify_oop_relocations();
  19.397      NOT_PRODUCT(if (TraceScavenge)  cur->print_on(tty, "oops_do, unmark"));
  19.398      cur = next;
  19.399    }
  19.400 @@ -2484,6 +2757,10 @@
  19.401  };
  19.402  
  19.403  void nmethod::verify_scavenge_root_oops() {
  19.404 +  if (UseG1GC) {
  19.405 +    return;
  19.406 +  }
  19.407 +
  19.408    if (!on_scavenge_root_list()) {
  19.409      // Actually look inside, to verify the claim that it's clean.
  19.410      DebugScavengeRoot debug_scavenge_root(this);
    20.1 --- a/src/share/vm/code/nmethod.hpp	Tue Jul 01 09:03:55 2014 +0200
    20.2 +++ b/src/share/vm/code/nmethod.hpp	Mon Jul 07 10:12:40 2014 +0200
    20.3 @@ -116,6 +116,11 @@
    20.4    friend class NMethodSweeper;
    20.5    friend class CodeCache;  // scavengable oops
    20.6   private:
    20.7 +
    20.8 +  // GC support to help figure out if an nmethod has been
    20.9 +  // cleaned/unloaded by the current GC.
   20.10 +  static unsigned char _global_unloading_clock;
   20.11 +
   20.12    // Shared fields for all nmethod's
   20.13    Method*   _method;
   20.14    int       _entry_bci;        // != InvocationEntryBci if this nmethod is an on-stack replacement method
   20.15 @@ -123,7 +128,13 @@
   20.16  
   20.17    // To support simple linked-list chaining of nmethods:
   20.18    nmethod*  _osr_link;         // from InstanceKlass::osr_nmethods_head
   20.19 -  nmethod*  _scavenge_root_link; // from CodeCache::scavenge_root_nmethods
   20.20 +
   20.21 +  union {
   20.22 +    // Used by G1 to chain nmethods.
   20.23 +    nmethod* _unloading_next;
   20.24 +    // Used by non-G1 GCs to chain nmethods.
   20.25 +    nmethod* _scavenge_root_link; // from CodeCache::scavenge_root_nmethods
   20.26 +  };
   20.27  
   20.28    static nmethod* volatile _oops_do_mark_nmethods;
   20.29    nmethod*        volatile _oops_do_mark_link;
   20.30 @@ -185,6 +196,8 @@
   20.31    // Protected by Patching_lock
   20.32    volatile unsigned char _state;             // {alive, not_entrant, zombie, unloaded}
   20.33  
   20.34 +  volatile unsigned char _unloading_clock;   // Incremented after GC unloaded/cleaned the nmethod
   20.35 +
   20.36  #ifdef ASSERT
   20.37    bool _oops_are_stale;  // indicates that it's no longer safe to access oops section
   20.38  #endif
   20.39 @@ -442,6 +455,15 @@
   20.40    bool  unload_reported()                         { return _unload_reported; }
   20.41    void  set_unload_reported()                     { _unload_reported = true; }
   20.42  
   20.43 +  void set_unloading_next(nmethod* next)          { _unloading_next = next; }
   20.44 +  nmethod* unloading_next()                       { return _unloading_next; }
   20.45 +
   20.46 +  static unsigned char global_unloading_clock()   { return _global_unloading_clock; }
   20.47 +  static void increase_unloading_clock();
   20.48 +
   20.49 +  void set_unloading_clock(unsigned char unloading_clock);
   20.50 +  unsigned char unloading_clock();
   20.51 +
   20.52    bool  is_marked_for_deoptimization() const      { return _marked_for_deoptimization; }
   20.53    void  mark_for_deoptimization()                 { _marked_for_deoptimization = true; }
   20.54  
   20.55 @@ -557,6 +579,10 @@
   20.56      return (addr >= code_begin() && addr < verified_entry_point());
   20.57    }
   20.58  
   20.59 +  // Verify calls to dead methods have been cleaned.
   20.60 +  void verify_clean_inline_caches();
   20.61 +  // Verify and count cached icholder relocations.
   20.62 +  int  verify_icholder_relocations();
   20.63    // Check that all metadata is still alive
   20.64    void verify_metadata_loaders(address low_boundary, BoolObjectClosure* is_alive);
   20.65  
   20.66 @@ -582,6 +608,10 @@
   20.67  
   20.68    // GC support
   20.69    void do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred);
   20.70 +  //  The parallel versions are used by G1.
   20.71 +  bool do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred);
   20.72 +  void do_unloading_parallel_postponed(BoolObjectClosure* is_alive, bool unloading_occurred);
   20.73 +  //  Unload a nmethod if the *root object is dead.
   20.74    bool can_unload(BoolObjectClosure* is_alive, oop* root, bool unloading_occurred);
   20.75  
   20.76    void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map,
    21.1 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Tue Jul 01 09:03:55 2014 +0200
    21.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Mon Jul 07 10:12:40 2014 +0200
    21.3 @@ -1570,11 +1570,11 @@
    21.4    }
    21.5  
    21.6    if (MetaspaceGC::should_concurrent_collect()) {
    21.7 -      if (Verbose && PrintGCDetails) {
    21.8 +    if (Verbose && PrintGCDetails) {
    21.9        gclog_or_tty->print("CMSCollector: collect for metadata allocation ");
   21.10 -      }
   21.11 -      return true;
   21.12 -    }
   21.13 +    }
   21.14 +    return true;
   21.15 +  }
   21.16  
   21.17    return false;
   21.18  }
   21.19 @@ -3028,20 +3028,21 @@
   21.20    HandleMark  hm;
   21.21    GenCollectedHeap* gch = GenCollectedHeap::heap();
   21.22  
   21.23 -  // Get a clear set of claim bits for the strong roots processing to work with.
   21.24 +  // Get a clear set of claim bits for the roots processing to work with.
   21.25    ClassLoaderDataGraph::clear_claimed_marks();
   21.26  
   21.27    // Mark from roots one level into CMS
   21.28    MarkRefsIntoClosure notOlder(_span, verification_mark_bm());
   21.29    gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
   21.30  
   21.31 -  gch->gen_process_strong_roots(_cmsGen->level(),
   21.32 -                                true,   // younger gens are roots
   21.33 -                                true,   // activate StrongRootsScope
   21.34 -                                SharedHeap::ScanningOption(roots_scanning_options()),
   21.35 -                                &notOlder,
   21.36 -                                NULL,
   21.37 -                                NULL); // SSS: Provide correct closure
   21.38 +  gch->gen_process_roots(_cmsGen->level(),
   21.39 +                         true,   // younger gens are roots
   21.40 +                         true,   // activate StrongRootsScope
   21.41 +                         SharedHeap::ScanningOption(roots_scanning_options()),
   21.42 +                         should_unload_classes(),
   21.43 +                         &notOlder,
   21.44 +                         NULL,
   21.45 +                         NULL);  // SSS: Provide correct closure
   21.46  
   21.47    // Now mark from the roots
   21.48    MarkFromRootsClosure markFromRootsClosure(this, _span,
   21.49 @@ -3092,22 +3093,24 @@
   21.50    HandleMark  hm;
   21.51    GenCollectedHeap* gch = GenCollectedHeap::heap();
   21.52  
   21.53 -  // Get a clear set of claim bits for the strong roots processing to work with.
   21.54 +  // Get a clear set of claim bits for the roots processing to work with.
   21.55    ClassLoaderDataGraph::clear_claimed_marks();
   21.56  
   21.57    // Mark from roots one level into CMS
   21.58    MarkRefsIntoVerifyClosure notOlder(_span, verification_mark_bm(),
   21.59                                       markBitMap());
   21.60 -  KlassToOopClosure klass_closure(&notOlder);
   21.61 +  CLDToOopClosure cld_closure(&notOlder, true);
   21.62  
   21.63    gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
   21.64 -  gch->gen_process_strong_roots(_cmsGen->level(),
   21.65 -                                true,   // younger gens are roots
   21.66 -                                true,   // activate StrongRootsScope
   21.67 -                                SharedHeap::ScanningOption(roots_scanning_options()),
   21.68 -                                &notOlder,
   21.69 -                                NULL,
   21.70 -                                &klass_closure);
   21.71 +
   21.72 +  gch->gen_process_roots(_cmsGen->level(),
   21.73 +                         true,   // younger gens are roots
   21.74 +                         true,   // activate StrongRootsScope
   21.75 +                         SharedHeap::ScanningOption(roots_scanning_options()),
   21.76 +                         should_unload_classes(),
   21.77 +                         &notOlder,
   21.78 +                         NULL,
   21.79 +                         &cld_closure);
   21.80  
   21.81    // Now mark from the roots
   21.82    MarkFromRootsVerifyClosure markFromRootsClosure(this, _span,
   21.83 @@ -3294,12 +3297,10 @@
   21.84  void CMSCollector::setup_cms_unloading_and_verification_state() {
   21.85    const  bool should_verify =   VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
   21.86                               || VerifyBeforeExit;
   21.87 -  const  int  rso           =   SharedHeap::SO_Strings | SharedHeap::SO_AllCodeCache;
   21.88 +  const  int  rso           =   SharedHeap::SO_AllCodeCache;
   21.89  
   21.90    // We set the proper root for this CMS cycle here.
   21.91    if (should_unload_classes()) {   // Should unload classes this cycle
   21.92 -    remove_root_scanning_option(SharedHeap::SO_AllClasses);
   21.93 -    add_root_scanning_option(SharedHeap::SO_SystemClasses);
   21.94      remove_root_scanning_option(rso);  // Shrink the root set appropriately
   21.95      set_verifying(should_verify);    // Set verification state for this cycle
   21.96      return;                            // Nothing else needs to be done at this time
   21.97 @@ -3307,8 +3308,6 @@
   21.98  
   21.99    // Not unloading classes this cycle
  21.100    assert(!should_unload_classes(), "Inconsitency!");
  21.101 -  remove_root_scanning_option(SharedHeap::SO_SystemClasses);
  21.102 -  add_root_scanning_option(SharedHeap::SO_AllClasses);
  21.103  
  21.104    if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) {
  21.105      // Include symbols, strings and code cache elements to prevent their resurrection.
  21.106 @@ -3719,15 +3718,16 @@
  21.107        gch->set_par_threads(0);
  21.108      } else {
  21.109        // The serial version.
  21.110 -      KlassToOopClosure klass_closure(&notOlder);
  21.111 +      CLDToOopClosure cld_closure(&notOlder, true);
  21.112        gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
  21.113 -      gch->gen_process_strong_roots(_cmsGen->level(),
  21.114 -                                    true,   // younger gens are roots
  21.115 -                                    true,   // activate StrongRootsScope
  21.116 -                                    SharedHeap::ScanningOption(roots_scanning_options()),
  21.117 -                                    &notOlder,
  21.118 -                                    NULL,
  21.119 -                                    &klass_closure);
  21.120 +      gch->gen_process_roots(_cmsGen->level(),
  21.121 +                             true,   // younger gens are roots
  21.122 +                             true,   // activate StrongRootsScope
  21.123 +                             SharedHeap::ScanningOption(roots_scanning_options()),
  21.124 +                             should_unload_classes(),
  21.125 +                             &notOlder,
  21.126 +                             NULL,
  21.127 +                             &cld_closure);
  21.128      }
  21.129    }
  21.130  
  21.131 @@ -5203,7 +5203,6 @@
  21.132    _timer.start();
  21.133    GenCollectedHeap* gch = GenCollectedHeap::heap();
  21.134    Par_MarkRefsIntoClosure par_mri_cl(_collector->_span, &(_collector->_markBitMap));
  21.135 -  KlassToOopClosure klass_closure(&par_mri_cl);
  21.136  
  21.137    // ---------- young gen roots --------------
  21.138    {
  21.139 @@ -5219,13 +5218,17 @@
  21.140    // ---------- remaining roots --------------
  21.141    _timer.reset();
  21.142    _timer.start();
  21.143 -  gch->gen_process_strong_roots(_collector->_cmsGen->level(),
  21.144 -                                false,     // yg was scanned above
  21.145 -                                false,     // this is parallel code
  21.146 -                                SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
  21.147 -                                &par_mri_cl,
  21.148 -                                NULL,
  21.149 -                                &klass_closure);
  21.150 +
  21.151 +  CLDToOopClosure cld_closure(&par_mri_cl, true);
  21.152 +
  21.153 +  gch->gen_process_roots(_collector->_cmsGen->level(),
  21.154 +                         false,     // yg was scanned above
  21.155 +                         false,     // this is parallel code
  21.156 +                         SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
  21.157 +                         _collector->should_unload_classes(),
  21.158 +                         &par_mri_cl,
  21.159 +                         NULL,
  21.160 +                         &cld_closure);
  21.161    assert(_collector->should_unload_classes()
  21.162           || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_AllCodeCache),
  21.163           "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
  21.164 @@ -5354,13 +5357,15 @@
  21.165    // ---------- remaining roots --------------
  21.166    _timer.reset();
  21.167    _timer.start();
  21.168 -  gch->gen_process_strong_roots(_collector->_cmsGen->level(),
  21.169 -                                false,     // yg was scanned above
  21.170 -                                false,     // this is parallel code
  21.171 -                                SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
  21.172 -                                &par_mrias_cl,
  21.173 -                                NULL,
  21.174 -                                NULL);     // The dirty klasses will be handled below
  21.175 +  gch->gen_process_roots(_collector->_cmsGen->level(),
  21.176 +                         false,     // yg was scanned above
  21.177 +                         false,     // this is parallel code
  21.178 +                         SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
  21.179 +                         _collector->should_unload_classes(),
  21.180 +                         &par_mrias_cl,
  21.181 +                         NULL,
  21.182 +                         NULL);     // The dirty klasses will be handled below
  21.183 +
  21.184    assert(_collector->should_unload_classes()
  21.185           || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_AllCodeCache),
  21.186           "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
  21.187 @@ -5415,7 +5420,7 @@
  21.188    // We might have added oops to ClassLoaderData::_handles during the
  21.189    // concurrent marking phase. These oops point to newly allocated objects
  21.190    // that are guaranteed to be kept alive. Either by the direct allocation
  21.191 -  // code, or when the young collector processes the strong roots. Hence,
  21.192 +  // code, or when the young collector processes the roots. Hence,
  21.193    // we don't have to revisit the _handles block during the remark phase.
  21.194  
  21.195    // ---------- rescan dirty cards ------------
  21.196 @@ -5837,7 +5842,7 @@
  21.197      cms_space,
  21.198      n_workers, workers, task_queues());
  21.199  
  21.200 -  // Set up for parallel process_strong_roots work.
  21.201 +  // Set up for parallel process_roots work.
  21.202    gch->set_par_threads(n_workers);
  21.203    // We won't be iterating over the cards in the card table updating
  21.204    // the younger_gen cards, so we shouldn't call the following else
  21.205 @@ -5846,7 +5851,7 @@
  21.206    // gch->rem_set()->prepare_for_younger_refs_iterate(true); // parallel
  21.207  
  21.208    // The young gen rescan work will not be done as part of
  21.209 -  // process_strong_roots (which currently doesn't knw how to
  21.210 +  // process_roots (which currently doesn't know how to
  21.211    // parallelize such a scan), but rather will be broken up into
  21.212    // a set of parallel tasks (via the sampling that the [abortable]
  21.213    // preclean phase did of EdenSpace, plus the [two] tasks of
  21.214 @@ -5943,13 +5948,15 @@
  21.215  
  21.216      gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
  21.217      GenCollectedHeap::StrongRootsScope srs(gch);
  21.218 -    gch->gen_process_strong_roots(_cmsGen->level(),
  21.219 -                                  true,  // younger gens as roots
  21.220 -                                  false, // use the local StrongRootsScope
  21.221 -                                  SharedHeap::ScanningOption(roots_scanning_options()),
  21.222 -                                  &mrias_cl,
  21.223 -                                  NULL,
  21.224 -                                  NULL);  // The dirty klasses will be handled below
  21.225 +
  21.226 +    gch->gen_process_roots(_cmsGen->level(),
  21.227 +                           true,  // younger gens as roots
  21.228 +                           false, // use the local StrongRootsScope
  21.229 +                           SharedHeap::ScanningOption(roots_scanning_options()),
  21.230 +                           should_unload_classes(),
  21.231 +                           &mrias_cl,
  21.232 +                           NULL,
  21.233 +                           NULL); // The dirty klasses will be handled below
  21.234  
  21.235      assert(should_unload_classes()
  21.236             || (roots_scanning_options() & SharedHeap::SO_AllCodeCache),
  21.237 @@ -5989,7 +5996,7 @@
  21.238    // We might have added oops to ClassLoaderData::_handles during the
  21.239    // concurrent marking phase. These oops point to newly allocated objects
  21.240    // that are guaranteed to be kept alive. Either by the direct allocation
  21.241 -  // code, or when the young collector processes the strong roots. Hence,
  21.242 +  // code, or when the young collector processes the roots. Hence,
  21.243    // we don't have to revisit the _handles block during the remark phase.
  21.244  
  21.245    verify_work_stacks_empty();
  21.246 @@ -6239,15 +6246,14 @@
  21.247        // Clean up unreferenced symbols in symbol table.
  21.248        SymbolTable::unlink();
  21.249      }
  21.250 -  }
  21.251 -
  21.252 -  // CMS doesn't use the StringTable as hard roots when class unloading is turned off.
  21.253 -  // Need to check if we really scanned the StringTable.
  21.254 -  if ((roots_scanning_options() & SharedHeap::SO_Strings) == 0) {
  21.255 -    GCTraceTime t("scrub string table", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
  21.256 -    // Delete entries for dead interned strings.
  21.257 -    StringTable::unlink(&_is_alive_closure);
  21.258 -  }
  21.259 +
  21.260 +    {
  21.261 +      GCTraceTime t("scrub string table", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
  21.262 +      // Delete entries for dead interned strings.
  21.263 +      StringTable::unlink(&_is_alive_closure);
  21.264 +    }
  21.265 +  }
  21.266 +
  21.267  
  21.268    // Restore any preserved marks as a result of mark stack or
  21.269    // work queue overflow
    22.1 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Tue Jul 01 09:03:55 2014 +0200
    22.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Mon Jul 07 10:12:40 2014 +0200
    22.3 @@ -32,6 +32,7 @@
    22.4  #include "gc_implementation/shared/generationCounters.hpp"
    22.5  #include "memory/freeBlockDictionary.hpp"
    22.6  #include "memory/generation.hpp"
    22.7 +#include "memory/iterator.hpp"
    22.8  #include "runtime/mutexLocker.hpp"
    22.9  #include "runtime/virtualspace.hpp"
   22.10  #include "services/memoryService.hpp"
    23.1 --- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Tue Jul 01 09:03:55 2014 +0200
    23.2 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Mon Jul 07 10:12:40 2014 +0200
    23.3 @@ -24,6 +24,7 @@
    23.4  
    23.5  #include "precompiled.hpp"
    23.6  #include "classfile/symbolTable.hpp"
    23.7 +#include "code/codeCache.hpp"
    23.8  #include "gc_implementation/g1/concurrentMark.inline.hpp"
    23.9  #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
   23.10  #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
   23.11 @@ -39,6 +40,7 @@
   23.12  #include "gc_implementation/shared/gcTimer.hpp"
   23.13  #include "gc_implementation/shared/gcTrace.hpp"
   23.14  #include "gc_implementation/shared/gcTraceTime.hpp"
   23.15 +#include "memory/allocation.hpp"
   23.16  #include "memory/genOopClosures.inline.hpp"
   23.17  #include "memory/referencePolicy.hpp"
   23.18  #include "memory/resourceArea.hpp"
   23.19 @@ -57,8 +59,8 @@
   23.20    _bmWordSize = 0;
   23.21  }
   23.22  
   23.23 -HeapWord* CMBitMapRO::getNextMarkedWordAddress(HeapWord* addr,
   23.24 -                                               HeapWord* limit) const {
   23.25 +HeapWord* CMBitMapRO::getNextMarkedWordAddress(const HeapWord* addr,
   23.26 +                                               const HeapWord* limit) const {
   23.27    // First we must round addr *up* to a possible object boundary.
   23.28    addr = (HeapWord*)align_size_up((intptr_t)addr,
   23.29                                    HeapWordSize << _shifter);
   23.30 @@ -75,8 +77,8 @@
   23.31    return nextAddr;
   23.32  }
   23.33  
   23.34 -HeapWord* CMBitMapRO::getNextUnmarkedWordAddress(HeapWord* addr,
   23.35 -                                                 HeapWord* limit) const {
   23.36 +HeapWord* CMBitMapRO::getNextUnmarkedWordAddress(const HeapWord* addr,
   23.37 +                                                 const HeapWord* limit) const {
   23.38    size_t addrOffset = heapWordToOffset(addr);
   23.39    if (limit == NULL) {
   23.40      limit = _bmStartWord + _bmWordSize;
   23.41 @@ -1222,6 +1224,9 @@
   23.42  };
   23.43  
   23.44  void ConcurrentMark::scanRootRegions() {
   23.45 +  // Start of concurrent marking.
   23.46 +  ClassLoaderDataGraph::clear_claimed_marks();
   23.47 +
   23.48    // scan_in_progress() will have been set to true only if there was
   23.49    // at least one root region to scan. So, if it's false, we
   23.50    // should not attempt to do any further work.
   23.51 @@ -1270,7 +1275,7 @@
   23.52    CMConcurrentMarkingTask markingTask(this, cmThread());
   23.53    if (use_parallel_marking_threads()) {
   23.54      _parallel_workers->set_active_workers((int)active_workers);
   23.55 -    // Don't set _n_par_threads because it affects MT in proceess_strong_roots()
   23.56 +    // Don't set _n_par_threads because it affects MT in process_roots()
   23.57      // and the decisions on that MT processing is made elsewhere.
   23.58      assert(_parallel_workers->active_workers() > 0, "Should have been set");
   23.59      _parallel_workers->run_task(&markingTask);
   23.60 @@ -2138,14 +2143,6 @@
   23.61    // Update the soft reference policy with the new heap occupancy.
   23.62    Universe::update_heap_info_at_gc();
   23.63  
   23.64 -  // We need to make this be a "collection" so any collection pause that
   23.65 -  // races with it goes around and waits for completeCleanup to finish.
   23.66 -  g1h->increment_total_collections();
   23.67 -
   23.68 -  // We reclaimed old regions so we should calculate the sizes to make
   23.69 -  // sure we update the old gen/space data.
   23.70 -  g1h->g1mm()->update_sizes();
   23.71 -
   23.72    if (VerifyDuringGC) {
   23.73      HandleMark hm;  // handle scope
   23.74      Universe::heap()->prepare_for_verify();
   23.75 @@ -2154,6 +2151,19 @@
   23.76    }
   23.77  
   23.78    g1h->verify_region_sets_optional();
   23.79 +
   23.80 +  // We need to make this be a "collection" so any collection pause that
   23.81 +  // races with it goes around and waits for completeCleanup to finish.
   23.82 +  g1h->increment_total_collections();
   23.83 +
   23.84 +  // Clean out dead classes and update Metaspace sizes.
   23.85 +  ClassLoaderDataGraph::purge();
   23.86 +  MetaspaceGC::compute_new_size();
   23.87 +
   23.88 +  // We reclaimed old regions so we should calculate the sizes to make
   23.89 +  // sure we update the old gen/space data.
   23.90 +  g1h->g1mm()->update_sizes();
   23.91 +
   23.92    g1h->trace_heap_after_concurrent_cycle();
   23.93  }
   23.94  
   23.95 @@ -2440,6 +2450,26 @@
   23.96    _g1h->set_par_threads(0);
   23.97  }
   23.98  
   23.99 +void ConcurrentMark::weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes) {
  23.100 +  G1CollectedHeap::heap()->parallel_cleaning(is_alive, true, true, purged_classes);
  23.101 +}
  23.102 +
  23.103 +// Helper class to get rid of some boilerplate code.
  23.104 +class G1RemarkGCTraceTime : public GCTraceTime {
  23.105 +  static bool doit_and_prepend(bool doit) {
  23.106 +    if (doit) {
  23.107 +      gclog_or_tty->put(' ');
  23.108 +    }
  23.109 +    return doit;
  23.110 +  }
  23.111 +
  23.112 + public:
  23.113 +  G1RemarkGCTraceTime(const char* title, bool doit)
  23.114 +    : GCTraceTime(title, doit_and_prepend(doit), false, G1CollectedHeap::heap()->gc_timer_cm(),
  23.115 +        G1CollectedHeap::heap()->concurrent_mark()->concurrent_gc_id()) {
  23.116 +  }
  23.117 +};
  23.118 +
  23.119  void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
  23.120    if (has_overflown()) {
  23.121      // Skip processing the discovered references if we have
  23.122 @@ -2552,9 +2582,28 @@
  23.123      return;
  23.124    }
  23.125  
  23.126 -  g1h->unlink_string_and_symbol_table(&g1_is_alive,
  23.127 -                                      /* process_strings */ false, // currently strings are always roots
  23.128 -                                      /* process_symbols */ true);
  23.129 +  assert(_markStack.isEmpty(), "Marking should have completed");
  23.130 +
  23.131 +  // Unload Klasses, String, Symbols, Code Cache, etc.
  23.132 +
  23.133 +  G1RemarkGCTraceTime trace("Unloading", G1Log::finer());
  23.134 +
  23.135 +  bool purged_classes;
  23.136 +
  23.137 +  {
  23.138 +    G1RemarkGCTraceTime trace("System Dictionary Unloading", G1Log::finest());
  23.139 +    purged_classes = SystemDictionary::do_unloading(&g1_is_alive);
  23.140 +  }
  23.141 +
  23.142 +  {
  23.143 +    G1RemarkGCTraceTime trace("Parallel Unloading", G1Log::finest());
  23.144 +    weakRefsWorkParallelPart(&g1_is_alive, purged_classes);
  23.145 +  }
  23.146 +
  23.147 +  if (G1StringDedup::is_enabled()) {
  23.148 +    G1RemarkGCTraceTime trace("String Deduplication Unlink", G1Log::finest());
  23.149 +    G1StringDedup::unlink(&g1_is_alive);
  23.150 +  }
  23.151  }
  23.152  
  23.153  void ConcurrentMark::swapMarkBitMaps() {
  23.154 @@ -2563,6 +2612,57 @@
  23.155    _nextMarkBitMap  = (CMBitMap*)  temp;
  23.156  }
  23.157  
  23.158 +class CMObjectClosure;
  23.159 +
  23.160 +// Closure for iterating over objects, currently only used for
  23.161 +// processing SATB buffers.
  23.162 +class CMObjectClosure : public ObjectClosure {
  23.163 +private:
  23.164 +  CMTask* _task;
  23.165 +
  23.166 +public:
  23.167 +  void do_object(oop obj) {
  23.168 +    _task->deal_with_reference(obj);
  23.169 +  }
  23.170 +
  23.171 +  CMObjectClosure(CMTask* task) : _task(task) { }
  23.172 +};
  23.173 +
  23.174 +class G1RemarkThreadsClosure : public ThreadClosure {
  23.175 +  CMObjectClosure _cm_obj;
  23.176 +  G1CMOopClosure _cm_cl;
  23.177 +  MarkingCodeBlobClosure _code_cl;
  23.178 +  int _thread_parity;
  23.179 +  bool _is_par;
  23.180 +
  23.181 + public:
  23.182 +  G1RemarkThreadsClosure(G1CollectedHeap* g1h, CMTask* task, bool is_par) :
  23.183 +    _cm_obj(task), _cm_cl(g1h, g1h->concurrent_mark(), task), _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations),
  23.184 +    _thread_parity(SharedHeap::heap()->strong_roots_parity()), _is_par(is_par) {}
  23.185 +
  23.186 +  void do_thread(Thread* thread) {
  23.187 +    if (thread->is_Java_thread()) {
  23.188 +      if (thread->claim_oops_do(_is_par, _thread_parity)) {
  23.189 +        JavaThread* jt = (JavaThread*)thread;
  23.190 +
  23.191 +        // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking
  23.192 +        // however the liveness of oops reachable from nmethods have very complex lifecycles:
  23.193 +        // * Alive if on the stack of an executing method
  23.194 +        // * Weakly reachable otherwise
  23.195 +        // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be
  23.196 +        // live by the SATB invariant but other oops recorded in nmethods may behave differently.
  23.197 +        jt->nmethods_do(&_code_cl);
  23.198 +
  23.199 +        jt->satb_mark_queue().apply_closure_and_empty(&_cm_obj);
  23.200 +      }
  23.201 +    } else if (thread->is_VM_thread()) {
  23.202 +      if (thread->claim_oops_do(_is_par, _thread_parity)) {
  23.203 +        JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_obj);
  23.204 +      }
  23.205 +    }
  23.206 +  }
  23.207 +};
  23.208 +
  23.209  class CMRemarkTask: public AbstractGangTask {
  23.210  private:
  23.211    ConcurrentMark* _cm;
  23.212 @@ -2574,6 +2674,14 @@
  23.213      if (worker_id < _cm->active_tasks()) {
  23.214        CMTask* task = _cm->task(worker_id);
  23.215        task->record_start_time();
  23.216 +      {
  23.217 +        ResourceMark rm;
  23.218 +        HandleMark hm;
  23.219 +
  23.220 +        G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task, !_is_serial);
  23.221 +        Threads::threads_do(&threads_f);
  23.222 +      }
  23.223 +
  23.224        do {
  23.225          task->do_marking_step(1000000000.0 /* something very large */,
  23.226                                true         /* do_termination       */,
  23.227 @@ -2596,6 +2704,8 @@
  23.228    HandleMark   hm;
  23.229    G1CollectedHeap* g1h = G1CollectedHeap::heap();
  23.230  
  23.231 +  G1RemarkGCTraceTime trace("Finalize Marking", G1Log::finer());
  23.232 +
  23.233    g1h->ensure_parsability(false);
  23.234  
  23.235    if (G1CollectedHeap::use_parallel_gc_threads()) {
  23.236 @@ -3421,20 +3531,6 @@
  23.237    }
  23.238  };
  23.239  
  23.240 -// Closure for iterating over objects, currently only used for
  23.241 -// processing SATB buffers.
  23.242 -class CMObjectClosure : public ObjectClosure {
  23.243 -private:
  23.244 -  CMTask* _task;
  23.245 -
  23.246 -public:
  23.247 -  void do_object(oop obj) {
  23.248 -    _task->deal_with_reference(obj);
  23.249 -  }
  23.250 -
  23.251 -  CMObjectClosure(CMTask* task) : _task(task) { }
  23.252 -};
  23.253 -
  23.254  G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
  23.255                                 ConcurrentMark* cm,
  23.256                                 CMTask* task)
  23.257 @@ -3900,15 +3996,6 @@
  23.258      }
  23.259    }
  23.260  
  23.261 -  if (!concurrent() && !has_aborted()) {
  23.262 -    // We should only do this during remark.
  23.263 -    if (G1CollectedHeap::use_parallel_gc_threads()) {
  23.264 -      satb_mq_set.par_iterate_closure_all_threads(_worker_id);
  23.265 -    } else {
  23.266 -      satb_mq_set.iterate_closure_all_threads();
  23.267 -    }
  23.268 -  }
  23.269 -
  23.270    _draining_satb_buffers = false;
  23.271  
  23.272    assert(has_aborted() ||
    24.1 --- a/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Tue Jul 01 09:03:55 2014 +0200
    24.2 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Mon Jul 07 10:12:40 2014 +0200
    24.3 @@ -25,6 +25,7 @@
    24.4  #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP
    24.5  #define SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP
    24.6  
    24.7 +#include "classfile/javaClasses.hpp"
    24.8  #include "gc_implementation/g1/heapRegionSet.hpp"
    24.9  #include "gc_implementation/shared/gcId.hpp"
   24.10  #include "utilities/taskqueue.hpp"
   24.11 @@ -86,19 +87,19 @@
   24.12    // Return the address corresponding to the next marked bit at or after
   24.13    // "addr", and before "limit", if "limit" is non-NULL.  If there is no
   24.14    // such bit, returns "limit" if that is non-NULL, or else "endWord()".
   24.15 -  HeapWord* getNextMarkedWordAddress(HeapWord* addr,
   24.16 -                                     HeapWord* limit = NULL) const;
   24.17 +  HeapWord* getNextMarkedWordAddress(const HeapWord* addr,
   24.18 +                                     const HeapWord* limit = NULL) const;
   24.19    // Return the address corresponding to the next unmarked bit at or after
   24.20    // "addr", and before "limit", if "limit" is non-NULL.  If there is no
   24.21    // such bit, returns "limit" if that is non-NULL, or else "endWord()".
   24.22 -  HeapWord* getNextUnmarkedWordAddress(HeapWord* addr,
   24.23 -                                       HeapWord* limit = NULL) const;
   24.24 +  HeapWord* getNextUnmarkedWordAddress(const HeapWord* addr,
   24.25 +                                       const HeapWord* limit = NULL) const;
   24.26  
   24.27    // conversion utilities
   24.28    HeapWord* offsetToHeapWord(size_t offset) const {
   24.29      return _bmStartWord + (offset << _shifter);
   24.30    }
   24.31 -  size_t heapWordToOffset(HeapWord* addr) const {
   24.32 +  size_t heapWordToOffset(const HeapWord* addr) const {
   24.33      return pointer_delta(addr, _bmStartWord) >> _shifter;
   24.34    }
   24.35    int heapWordDiffToOffsetDiff(size_t diff) const;
   24.36 @@ -476,6 +477,7 @@
   24.37    ForceOverflowSettings _force_overflow_conc;
   24.38    ForceOverflowSettings _force_overflow_stw;
   24.39  
   24.40 +  void weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes);
   24.41    void weakRefsWork(bool clear_all_soft_refs);
   24.42  
   24.43    void swapMarkBitMaps();
    25.1 --- a/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp	Tue Jul 01 09:03:55 2014 +0200
    25.2 +++ b/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp	Mon Jul 07 10:12:40 2014 +0200
    25.3 @@ -426,7 +426,7 @@
    25.4        q = n;
    25.5        oop obj = oop(q);
    25.6        if (obj->klass_or_null() == NULL) return q;
    25.7 -      n += obj->size();
    25.8 +      n += block_size(q);
    25.9      }
   25.10      assert(q <= next_boundary && n > next_boundary, "Consequence of loop");
   25.11      // [q, n) is the block that crosses the boundary.
    26.1 --- a/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.inline.hpp	Tue Jul 01 09:03:55 2014 +0200
    26.2 +++ b/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.inline.hpp	Mon Jul 07 10:12:40 2014 +0200
    26.3 @@ -113,7 +113,7 @@
    26.4      q = n;
    26.5      oop obj = oop(q);
    26.6      if (obj->klass_or_null() == NULL) return q;
    26.7 -    n += obj->size();
    26.8 +    n += block_size(q);
    26.9    }
   26.10    assert(q <= n, "wrong order for q and addr");
   26.11    assert(addr < n, "wrong order for addr and n");
    27.1 --- a/src/share/vm/gc_implementation/g1/g1CodeCacheRemSet.cpp	Tue Jul 01 09:03:55 2014 +0200
    27.2 +++ b/src/share/vm/gc_implementation/g1/g1CodeCacheRemSet.cpp	Mon Jul 07 10:12:40 2014 +0200
    27.3 @@ -30,23 +30,52 @@
    27.4  
    27.5  PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
    27.6  
    27.7 -G1CodeRootChunk::G1CodeRootChunk() : _top(NULL), _next(NULL), _prev(NULL) {
    27.8 +G1CodeRootChunk::G1CodeRootChunk() : _top(NULL), _next(NULL), _prev(NULL), _free(NULL) {
    27.9    _top = bottom();
   27.10  }
   27.11  
   27.12  void G1CodeRootChunk::reset() {
   27.13    _next = _prev = NULL;
   27.14 +  _free = NULL;
   27.15    _top = bottom();
   27.16  }
   27.17  
   27.18  void G1CodeRootChunk::nmethods_do(CodeBlobClosure* cl) {
   27.19 -  nmethod** cur = bottom();
   27.20 +  NmethodOrLink* cur = bottom();
   27.21    while (cur != _top) {
   27.22 -    cl->do_code_blob(*cur);
   27.23 +    if (is_nmethod(cur)) {
   27.24 +      cl->do_code_blob(cur->_nmethod);
   27.25 +    }
   27.26      cur++;
   27.27    }
   27.28  }
   27.29  
   27.30 +bool G1CodeRootChunk::remove_lock_free(nmethod* method) {
   27.31 +  NmethodOrLink* cur = bottom();
   27.32 +
   27.33 +  for (NmethodOrLink* cur = bottom(); cur != _top; cur++) {
   27.34 +    if (cur->_nmethod == method) {
   27.35 +      bool result = Atomic::cmpxchg_ptr(NULL, &cur->_nmethod, method) == method;
   27.36 +
   27.37 +      if (!result) {
   27.38 +        // Someone else cleared out this entry.
   27.39 +        return false;
   27.40 +      }
   27.41 +
   27.42 +      // The method was cleared. Time to link it into the free list.
   27.43 +      NmethodOrLink* prev_free;
   27.44 +      do {
   27.45 +        prev_free = (NmethodOrLink*)_free;
   27.46 +        cur->_link = prev_free;
   27.47 +      } while (Atomic::cmpxchg_ptr(cur, &_free, prev_free) != prev_free);
   27.48 +
   27.49 +      return true;
   27.50 +    }
   27.51 +  }
   27.52 +
   27.53 +  return false;
   27.54 +}
   27.55 +
   27.56  G1CodeRootChunkManager::G1CodeRootChunkManager() : _free_list(), _num_chunks_handed_out(0) {
   27.57    _free_list.initialize();
   27.58    _free_list.set_size(G1CodeRootChunk::word_size());
   27.59 @@ -140,34 +169,43 @@
   27.60  
   27.61  void G1CodeRootSet::add(nmethod* method) {
   27.62    if (!contains(method)) {
   27.63 -    // Try to add the nmethod. If there is not enough space, get a new chunk.
   27.64 -    if (_list.head() == NULL || _list.head()->is_full()) {
   27.65 -      G1CodeRootChunk* cur = new_chunk();
   27.66 +    // Find the first chunk thatisn't full.
   27.67 +    G1CodeRootChunk* cur = _list.head();
   27.68 +    while (cur != NULL) {
   27.69 +      if (!cur->is_full()) {
   27.70 +        break;
   27.71 +      }
   27.72 +      cur = cur->next();
   27.73 +    }
   27.74 +
   27.75 +    // All chunks are full, get a new chunk.
   27.76 +    if (cur == NULL) {
   27.77 +      cur = new_chunk();
   27.78        _list.return_chunk_at_head(cur);
   27.79      }
   27.80 -    bool result = _list.head()->add(method);
   27.81 +
   27.82 +    // Add the nmethod.
   27.83 +    bool result = cur->add(method);
   27.84 +
   27.85      guarantee(result, err_msg("Not able to add nmethod "PTR_FORMAT" to newly allocated chunk.", method));
   27.86 +
   27.87      _length++;
   27.88    }
   27.89  }
   27.90  
   27.91 -void G1CodeRootSet::remove(nmethod* method) {
   27.92 +void G1CodeRootSet::remove_lock_free(nmethod* method) {
   27.93    G1CodeRootChunk* found = find(method);
   27.94    if (found != NULL) {
   27.95 -    bool result = found->remove(method);
   27.96 -    guarantee(result, err_msg("could not find nmethod "PTR_FORMAT" during removal although we previously found it", method));
   27.97 -    // eventually free completely emptied chunk
   27.98 -    if (found->is_empty()) {
   27.99 -      _list.remove_chunk(found);
  27.100 -      free(found);
  27.101 +    bool result = found->remove_lock_free(method);
  27.102 +    if (result) {
  27.103 +      Atomic::dec_ptr((volatile intptr_t*)&_length);
  27.104      }
  27.105 -    _length--;
  27.106    }
  27.107    assert(!contains(method), err_msg(PTR_FORMAT" still contains nmethod "PTR_FORMAT, this, method));
  27.108  }
  27.109  
  27.110  nmethod* G1CodeRootSet::pop() {
  27.111 -  do {
  27.112 +  while (true) {
  27.113      G1CodeRootChunk* cur = _list.head();
  27.114      if (cur == NULL) {
  27.115        assert(_length == 0, "when there are no chunks, there should be no elements");
  27.116 @@ -180,7 +218,7 @@
  27.117      } else {
  27.118        free(_list.get_chunk_at_head());
  27.119      }
  27.120 -  } while (true);
  27.121 +  }
  27.122  }
  27.123  
  27.124  G1CodeRootChunk* G1CodeRootSet::find(nmethod* method) {
    28.1 --- a/src/share/vm/gc_implementation/g1/g1CodeCacheRemSet.hpp	Tue Jul 01 09:03:55 2014 +0200
    28.2 +++ b/src/share/vm/gc_implementation/g1/g1CodeCacheRemSet.hpp	Mon Jul 07 10:12:40 2014 +0200
    28.3 @@ -31,6 +31,14 @@
    28.4  
    28.5  class CodeBlobClosure;
    28.6  
    28.7 +// The elements of the G1CodeRootChunk is either:
    28.8 +//  1) nmethod pointers
    28.9 +//  2) nodes in an internally chained free list
   28.10 +typedef union {
   28.11 +  nmethod* _nmethod;
   28.12 +  void*    _link;
   28.13 +} NmethodOrLink;
   28.14 +
   28.15  class G1CodeRootChunk : public CHeapObj<mtGC> {
   28.16   private:
   28.17    static const int NUM_ENTRIES = 32;
   28.18 @@ -38,16 +46,28 @@
   28.19    G1CodeRootChunk*     _next;
   28.20    G1CodeRootChunk*     _prev;
   28.21  
   28.22 -  nmethod** _top;
   28.23 +  NmethodOrLink*          _top;
   28.24 +  // First free position within the chunk.
   28.25 +  volatile NmethodOrLink* _free;
   28.26  
   28.27 -  nmethod* _data[NUM_ENTRIES];
   28.28 +  NmethodOrLink _data[NUM_ENTRIES];
   28.29  
   28.30 -  nmethod** bottom() const {
   28.31 -    return (nmethod**) &(_data[0]);
   28.32 +  NmethodOrLink* bottom() const {
   28.33 +    return (NmethodOrLink*) &(_data[0]);
   28.34    }
   28.35  
   28.36 -  nmethod** end() const {
   28.37 -    return (nmethod**) &(_data[NUM_ENTRIES]);
   28.38 +  NmethodOrLink* end() const {
   28.39 +    return (NmethodOrLink*) &(_data[NUM_ENTRIES]);
   28.40 +  }
   28.41 +
   28.42 +  bool is_link(NmethodOrLink* nmethod_or_link) {
   28.43 +    return nmethod_or_link->_link == NULL ||
   28.44 +        (bottom() <= nmethod_or_link->_link
   28.45 +        && nmethod_or_link->_link < end());
   28.46 +  }
   28.47 +
   28.48 +  bool is_nmethod(NmethodOrLink* nmethod_or_link) {
   28.49 +    return !is_link(nmethod_or_link);
   28.50    }
   28.51  
   28.52   public:
   28.53 @@ -85,46 +105,55 @@
   28.54    }
   28.55  
   28.56    bool is_full() const {
   28.57 -    return _top == (nmethod**)end();
   28.58 +    return _top == end() && _free == NULL;
   28.59    }
   28.60  
   28.61    bool contains(nmethod* method) {
   28.62 -    nmethod** cur = bottom();
   28.63 +    NmethodOrLink* cur = bottom();
   28.64      while (cur != _top) {
   28.65 -      if (*cur == method) return true;
   28.66 +      if (cur->_nmethod == method) return true;
   28.67        cur++;
   28.68      }
   28.69      return false;
   28.70    }
   28.71  
   28.72    bool add(nmethod* method) {
   28.73 -    if (is_full()) return false;
   28.74 -    *_top = method;
   28.75 -    _top++;
   28.76 +    if (is_full()) {
   28.77 +      return false;
   28.78 +    }
   28.79 +
   28.80 +    if (_free != NULL) {
   28.81 +      // Take from internally chained free list
   28.82 +      NmethodOrLink* first_free = (NmethodOrLink*)_free;
   28.83 +      _free = (NmethodOrLink*)_free->_link;
   28.84 +      first_free->_nmethod = method;
   28.85 +    } else {
   28.86 +      // Take from top.
   28.87 +      _top->_nmethod = method;
   28.88 +      _top++;
   28.89 +    }
   28.90 +
   28.91      return true;
   28.92    }
   28.93  
   28.94 -  bool remove(nmethod* method) {
   28.95 -    nmethod** cur = bottom();
   28.96 -    while (cur != _top) {
   28.97 -      if (*cur == method) {
   28.98 -        memmove(cur, cur + 1, (_top - (cur + 1)) * sizeof(nmethod**));
   28.99 -        _top--;
  28.100 -        return true;
  28.101 -      }
  28.102 -      cur++;
  28.103 -    }
  28.104 -    return false;
  28.105 -  }
  28.106 +  bool remove_lock_free(nmethod* method);
  28.107  
  28.108    void nmethods_do(CodeBlobClosure* blk);
  28.109  
  28.110    nmethod* pop() {
  28.111 -    if (is_empty()) {
  28.112 -      return NULL;
  28.113 +    if (_free != NULL) {
  28.114 +      // Kill the free list.
  28.115 +      _free = NULL;
  28.116      }
  28.117 -    _top--;
  28.118 -    return *_top;
  28.119 +
  28.120 +    while (!is_empty()) {
  28.121 +      _top--;
  28.122 +      if (is_nmethod(_top)) {
  28.123 +        return _top->_nmethod;
  28.124 +      }
  28.125 +    }
  28.126 +
  28.127 +    return NULL;
  28.128    }
  28.129  };
  28.130  
  28.131 @@ -193,7 +222,7 @@
  28.132    // method is likely to be repeatedly called with the same nmethod.
  28.133    void add(nmethod* method);
  28.134  
  28.135 -  void remove(nmethod* method);
  28.136 +  void remove_lock_free(nmethod* method);
  28.137    nmethod* pop();
  28.138  
  28.139    bool contains(nmethod* method);
    29.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Tue Jul 01 09:03:55 2014 +0200
    29.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Mon Jul 07 10:12:40 2014 +0200
    29.3 @@ -55,6 +55,7 @@
    29.4  #include "gc_implementation/shared/gcTrace.hpp"
    29.5  #include "gc_implementation/shared/gcTraceTime.hpp"
    29.6  #include "gc_implementation/shared/isGCActiveMark.hpp"
    29.7 +#include "memory/allocation.hpp"
    29.8  #include "memory/gcLocker.inline.hpp"
    29.9  #include "memory/generationSpec.hpp"
   29.10  #include "memory/iterator.hpp"
   29.11 @@ -87,10 +88,10 @@
   29.12  // G1ParVerifyTask uses heap_region_par_iterate_chunked() for parallelism.
   29.13  // The number of GC workers is passed to heap_region_par_iterate_chunked().
   29.14  // It does use run_task() which sets _n_workers in the task.
   29.15 -// G1ParTask executes g1_process_strong_roots() ->
   29.16 -// SharedHeap::process_strong_roots() which calls eventually to
   29.17 +// G1ParTask executes g1_process_roots() ->
   29.18 +// SharedHeap::process_roots() which calls eventually to
   29.19  // CardTableModRefBS::par_non_clean_card_iterate_work() which uses
   29.20 -// SequentialSubTasksDone.  SharedHeap::process_strong_roots() also
   29.21 +// SequentialSubTasksDone.  SharedHeap::process_roots() also
   29.22  // directly uses SubTasksDone (_process_strong_tasks field in SharedHeap).
   29.23  //
   29.24  
   29.25 @@ -3391,25 +3392,19 @@
   29.26      if (!silent) { gclog_or_tty->print("Roots "); }
   29.27      VerifyRootsClosure rootsCl(vo);
   29.28      VerifyKlassClosure klassCl(this, &rootsCl);
   29.29 +    CLDToKlassAndOopClosure cldCl(&klassCl, &rootsCl, false);
   29.30  
   29.31      // We apply the relevant closures to all the oops in the
   29.32 -    // system dictionary, class loader data graph and the string table.
   29.33 -    // Don't verify the code cache here, since it's verified below.
   29.34 -    const int so = SO_AllClasses | SO_Strings;
   29.35 -
   29.36 -    // Need cleared claim bits for the strong roots processing
   29.37 -    ClassLoaderDataGraph::clear_claimed_marks();
   29.38 -
   29.39 -    process_strong_roots(true,      // activate StrongRootsScope
   29.40 -                         ScanningOption(so),  // roots scanning options
   29.41 -                         &rootsCl,
   29.42 -                         &klassCl
   29.43 -                         );
   29.44 -
   29.45 -    // Verify the nmethods in the code cache.
   29.46 +    // system dictionary, class loader data graph, the string table
   29.47 +    // and the nmethods in the code cache.
   29.48      G1VerifyCodeRootOopClosure codeRootsCl(this, &rootsCl, vo);
   29.49      G1VerifyCodeRootBlobClosure blobsCl(&codeRootsCl);
   29.50 -    CodeCache::blobs_do(&blobsCl);
   29.51 +
   29.52 +    process_all_roots(true,            // activate StrongRootsScope
   29.53 +                      SO_AllCodeCache, // roots scanning options
   29.54 +                      &rootsCl,
   29.55 +                      &cldCl,
   29.56 +                      &blobsCl);
   29.57  
   29.58      bool failures = rootsCl.failures() || codeRootsCl.failures();
   29.59  
   29.60 @@ -4339,11 +4334,7 @@
   29.61    assert(_mutator_alloc_region.get() == NULL, "post-condition");
   29.62  }
   29.63  
   29.64 -void G1CollectedHeap::init_gc_alloc_regions(EvacuationInfo& evacuation_info) {
   29.65 -  assert_at_safepoint(true /* should_be_vm_thread */);
   29.66 -
   29.67 -  _survivor_gc_alloc_region.init();
   29.68 -  _old_gc_alloc_region.init();
   29.69 +void G1CollectedHeap::use_retained_old_gc_alloc_region(EvacuationInfo& evacuation_info) {
   29.70    HeapRegion* retained_region = _retained_old_gc_alloc_region;
   29.71    _retained_old_gc_alloc_region = NULL;
   29.72  
   29.73 @@ -4375,6 +4366,15 @@
   29.74    }
   29.75  }
   29.76  
   29.77 +void G1CollectedHeap::init_gc_alloc_regions(EvacuationInfo& evacuation_info) {
   29.78 +  assert_at_safepoint(true /* should_be_vm_thread */);
   29.79 +
   29.80 +  _survivor_gc_alloc_region.init();
   29.81 +  _old_gc_alloc_region.init();
   29.82 +
   29.83 +  use_retained_old_gc_alloc_region(evacuation_info);
   29.84 +}
   29.85 +
   29.86  void G1CollectedHeap::release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info) {
   29.87    evacuation_info.set_allocation_regions(_survivor_gc_alloc_region.count() +
   29.88                                           _old_gc_alloc_region.count());
   29.89 @@ -4608,7 +4608,7 @@
   29.90    }
   29.91  }
   29.92  
   29.93 -template <G1Barrier barrier, bool do_mark_object>
   29.94 +template <G1Barrier barrier, G1Mark do_mark_object>
   29.95  template <class T>
   29.96  void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
   29.97    T heap_oop = oopDesc::load_heap_oop(p);
   29.98 @@ -4630,7 +4630,7 @@
   29.99      }
  29.100      assert(forwardee != NULL, "forwardee should not be NULL");
  29.101      oopDesc::encode_store_heap_oop(p, forwardee);
  29.102 -    if (do_mark_object && forwardee != obj) {
  29.103 +    if (do_mark_object != G1MarkNone && forwardee != obj) {
  29.104        // If the object is self-forwarded we don't need to explicitly
  29.105        // mark it, the evacuation failure protocol will do so.
  29.106        mark_forwarded_object(obj, forwardee);
  29.107 @@ -4641,9 +4641,8 @@
  29.108      }
  29.109    } else {
  29.110      // The object is not in collection set. If we're a root scanning
  29.111 -    // closure during an initial mark pause (i.e. do_mark_object will
  29.112 -    // be true) then attempt to mark the object.
  29.113 -    if (do_mark_object) {
  29.114 +    // closure during an initial mark pause then attempt to mark the object.
  29.115 +    if (do_mark_object == G1MarkFromRoot) {
  29.116        mark_object(obj);
  29.117      }
  29.118    }
  29.119 @@ -4653,8 +4652,8 @@
  29.120    }
  29.121  }
  29.122  
  29.123 -template void G1ParCopyClosure<G1BarrierEvac, false>::do_oop_work(oop* p);
  29.124 -template void G1ParCopyClosure<G1BarrierEvac, false>::do_oop_work(narrowOop* p);
  29.125 +template void G1ParCopyClosure<G1BarrierEvac, G1MarkNone>::do_oop_work(oop* p);
  29.126 +template void G1ParCopyClosure<G1BarrierEvac, G1MarkNone>::do_oop_work(narrowOop* p);
  29.127  
  29.128  class G1ParEvacuateFollowersClosure : public VoidClosure {
  29.129  protected:
  29.130 @@ -4767,6 +4766,51 @@
  29.131      _n_workers = active_workers;
  29.132    }
  29.133  
  29.134 +  // Helps out with CLD processing.
  29.135 +  //
  29.136 +  // During InitialMark we need to:
  29.137 +  // 1) Scavenge all CLDs for the young GC.
  29.138 +  // 2) Mark all objects directly reachable from strong CLDs.
  29.139 +  template <G1Mark do_mark_object>
  29.140 +  class G1CLDClosure : public CLDClosure {
  29.141 +    G1ParCopyClosure<G1BarrierNone,  do_mark_object>* _oop_closure;
  29.142 +    G1ParCopyClosure<G1BarrierKlass, do_mark_object>  _oop_in_klass_closure;
  29.143 +    G1KlassScanClosure                                _klass_in_cld_closure;
  29.144 +    bool                                              _claim;
  29.145 +
  29.146 +   public:
  29.147 +    G1CLDClosure(G1ParCopyClosure<G1BarrierNone, do_mark_object>* oop_closure,
  29.148 +                 bool only_young, bool claim)
  29.149 +        : _oop_closure(oop_closure),
  29.150 +          _oop_in_klass_closure(oop_closure->g1(),
  29.151 +                                oop_closure->pss(),
  29.152 +                                oop_closure->rp()),
  29.153 +          _klass_in_cld_closure(&_oop_in_klass_closure, only_young),
  29.154 +          _claim(claim) {
  29.155 +
  29.156 +    }
  29.157 +
  29.158 +    void do_cld(ClassLoaderData* cld) {
  29.159 +      cld->oops_do(_oop_closure, &_klass_in_cld_closure, _claim);
  29.160 +    }
  29.161 +  };
  29.162 +
  29.163 +  class G1CodeBlobClosure: public CodeBlobClosure {
  29.164 +    OopClosure* _f;
  29.165 +
  29.166 +   public:
  29.167 +    G1CodeBlobClosure(OopClosure* f) : _f(f) {}
  29.168 +    void do_code_blob(CodeBlob* blob) {
  29.169 +      nmethod* that = blob->as_nmethod_or_null();
  29.170 +      if (that != NULL) {
  29.171 +        if (!that->test_set_oops_do_mark()) {
  29.172 +          that->oops_do(_f);
  29.173 +          that->fix_oop_relocations();
  29.174 +        }
  29.175 +      }
  29.176 +    }
  29.177 +  };
  29.178 +
  29.179    void work(uint worker_id) {
  29.180      if (worker_id >= _n_workers) return;  // no work needed this round
  29.181  
  29.182 @@ -4784,40 +4828,62 @@
  29.183  
  29.184        pss.set_evac_failure_closure(&evac_failure_cl);
  29.185  
  29.186 -      G1ParScanExtRootClosure        only_scan_root_cl(_g1h, &pss, rp);
  29.187 -      G1ParScanMetadataClosure       only_scan_metadata_cl(_g1h, &pss, rp);
  29.188 -
  29.189 -      G1ParScanAndMarkExtRootClosure scan_mark_root_cl(_g1h, &pss, rp);
  29.190 -      G1ParScanAndMarkMetadataClosure scan_mark_metadata_cl(_g1h, &pss, rp);
  29.191 -
  29.192 -      bool only_young                 = _g1h->g1_policy()->gcs_are_young();
  29.193 -      G1KlassScanClosure              scan_mark_klasses_cl_s(&scan_mark_metadata_cl, false);
  29.194 -      G1KlassScanClosure              only_scan_klasses_cl_s(&only_scan_metadata_cl, only_young);
  29.195 -
  29.196 -      OopClosure*                    scan_root_cl = &only_scan_root_cl;
  29.197 -      G1KlassScanClosure*            scan_klasses_cl = &only_scan_klasses_cl_s;
  29.198 +      bool only_young = _g1h->g1_policy()->gcs_are_young();
  29.199 +
  29.200 +      // Non-IM young GC.
  29.201 +      G1ParCopyClosure<G1BarrierNone, G1MarkNone>             scan_only_root_cl(_g1h, &pss, rp);
  29.202 +      G1CLDClosure<G1MarkNone>                                scan_only_cld_cl(&scan_only_root_cl,
  29.203 +                                                                               only_young, // Only process dirty klasses.
  29.204 +                                                                               false);     // No need to claim CLDs.
  29.205 +      // IM young GC.
  29.206 +      //    Strong roots closures.
  29.207 +      G1ParCopyClosure<G1BarrierNone, G1MarkFromRoot>         scan_mark_root_cl(_g1h, &pss, rp);
  29.208 +      G1CLDClosure<G1MarkFromRoot>                            scan_mark_cld_cl(&scan_mark_root_cl,
  29.209 +                                                                               false, // Process all klasses.
  29.210 +                                                                               true); // Need to claim CLDs.
  29.211 +      //    Weak roots closures.
  29.212 +      G1ParCopyClosure<G1BarrierNone, G1MarkPromotedFromRoot> scan_mark_weak_root_cl(_g1h, &pss, rp);
  29.213 +      G1CLDClosure<G1MarkPromotedFromRoot>                    scan_mark_weak_cld_cl(&scan_mark_weak_root_cl,
  29.214 +                                                                                    false, // Process all klasses.
  29.215 +                                                                                    true); // Need to claim CLDs.
  29.216 +
  29.217 +      G1CodeBlobClosure scan_only_code_cl(&scan_only_root_cl);
  29.218 +      G1CodeBlobClosure scan_mark_code_cl(&scan_mark_root_cl);
  29.219 +      // IM Weak code roots are handled later.
  29.220 +
  29.221 +      OopClosure* strong_root_cl;
  29.222 +      OopClosure* weak_root_cl;
  29.223 +      CLDClosure* strong_cld_cl;
  29.224 +      CLDClosure* weak_cld_cl;
  29.225 +      CodeBlobClosure* strong_code_cl;
  29.226  
  29.227        if (_g1h->g1_policy()->during_initial_mark_pause()) {
  29.228          // We also need to mark copied objects.
  29.229 -        scan_root_cl = &scan_mark_root_cl;
  29.230 -        scan_klasses_cl = &scan_mark_klasses_cl_s;
  29.231 +        strong_root_cl = &scan_mark_root_cl;
  29.232 +        weak_root_cl   = &scan_mark_weak_root_cl;
  29.233 +        strong_cld_cl  = &scan_mark_cld_cl;
  29.234 +        weak_cld_cl    = &scan_mark_weak_cld_cl;
  29.235 +        strong_code_cl = &scan_mark_code_cl;
  29.236 +      } else {
  29.237 +        strong_root_cl = &scan_only_root_cl;
  29.238 +        weak_root_cl   = &scan_only_root_cl;
  29.239 +        strong_cld_cl  = &scan_only_cld_cl;
  29.240 +        weak_cld_cl    = &scan_only_cld_cl;
  29.241 +        strong_code_cl = &scan_only_code_cl;
  29.242        }
  29.243  
  29.244 -      G1ParPushHeapRSClosure          push_heap_rs_cl(_g1h, &pss);
  29.245 -
  29.246 -      // Don't scan the scavengable methods in the code cache as part
  29.247 -      // of strong root scanning. The code roots that point into a
  29.248 -      // region in the collection set are scanned when we scan the
  29.249 -      // region's RSet.
  29.250 -      int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings;
  29.251 +
  29.252 +      G1ParPushHeapRSClosure  push_heap_rs_cl(_g1h, &pss);
  29.253  
  29.254        pss.start_strong_roots();
  29.255 -      _g1h->g1_process_strong_roots(/* is scavenging */ true,
  29.256 -                                    SharedHeap::ScanningOption(so),
  29.257 -                                    scan_root_cl,
  29.258 -                                    &push_heap_rs_cl,
  29.259 -                                    scan_klasses_cl,
  29.260 -                                    worker_id);
  29.261 +      _g1h->g1_process_roots(strong_root_cl,
  29.262 +                             weak_root_cl,
  29.263 +                             &push_heap_rs_cl,
  29.264 +                             strong_cld_cl,
  29.265 +                             weak_cld_cl,
  29.266 +                             strong_code_cl,
  29.267 +                             worker_id);
  29.268 +
  29.269        pss.end_strong_roots();
  29.270  
  29.271        {
  29.272 @@ -4855,24 +4921,31 @@
  29.273  
  29.274  void
  29.275  G1CollectedHeap::
  29.276 -g1_process_strong_roots(bool is_scavenging,
  29.277 -                        ScanningOption so,
  29.278 -                        OopClosure* scan_non_heap_roots,
  29.279 -                        OopsInHeapRegionClosure* scan_rs,
  29.280 -                        G1KlassScanClosure* scan_klasses,
  29.281 -                        uint worker_i) {
  29.282 -
  29.283 -  // First scan the strong roots
  29.284 +g1_process_roots(OopClosure* scan_non_heap_roots,
  29.285 +                 OopClosure* scan_non_heap_weak_roots,
  29.286 +                 OopsInHeapRegionClosure* scan_rs,
  29.287 +                 CLDClosure* scan_strong_clds,
  29.288 +                 CLDClosure* scan_weak_clds,
  29.289 +                 CodeBlobClosure* scan_strong_code,
  29.290 +                 uint worker_i) {
  29.291 +
  29.292 +  // First scan the shared roots.
  29.293    double ext_roots_start = os::elapsedTime();
  29.294    double closure_app_time_sec = 0.0;
  29.295  
  29.296 +  bool during_im = _g1h->g1_policy()->during_initial_mark_pause();
  29.297 +
  29.298    BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots);
  29.299 -
  29.300 -  process_strong_roots(false, // no scoping; this is parallel code
  29.301 -                       so,
  29.302 -                       &buf_scan_non_heap_roots,
  29.303 -                       scan_klasses
  29.304 -                       );
  29.305 +  BufferingOopClosure buf_scan_non_heap_weak_roots(scan_non_heap_weak_roots);
  29.306 +
  29.307 +  process_roots(false, // no scoping; this is parallel code
  29.308 +                SharedHeap::SO_None,
  29.309 +                &buf_scan_non_heap_roots,
  29.310 +                &buf_scan_non_heap_weak_roots,
  29.311 +                scan_strong_clds,
  29.312 +                // Initial Mark handles the weak CLDs separately.
  29.313 +                (during_im ? NULL : scan_weak_clds),
  29.314 +                scan_strong_code);
  29.315  
  29.316    // Now the CM ref_processor roots.
  29.317    if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) {
  29.318 @@ -4883,10 +4956,21 @@
  29.319      ref_processor_cm()->weak_oops_do(&buf_scan_non_heap_roots);
  29.320    }
  29.321  
  29.322 +  if (during_im) {
  29.323 +    // Barrier to make sure all workers passed
  29.324 +    // the strong CLD and strong nmethods phases.
  29.325 +    active_strong_roots_scope()->wait_until_all_workers_done_with_threads(n_par_threads());
  29.326 +
  29.327 +    // Now take the complement of the strong CLDs.
  29.328 +    ClassLoaderDataGraph::roots_cld_do(NULL, scan_weak_clds);
  29.329 +  }
  29.330 +
  29.331    // Finish up any enqueued closure apps (attributed as object copy time).
  29.332    buf_scan_non_heap_roots.done();
  29.333 -
  29.334 -  double obj_copy_time_sec = buf_scan_non_heap_roots.closure_app_seconds();
  29.335 +  buf_scan_non_heap_weak_roots.done();
  29.336 +
  29.337 +  double obj_copy_time_sec = buf_scan_non_heap_roots.closure_app_seconds()
  29.338 +      + buf_scan_non_heap_weak_roots.closure_app_seconds();
  29.339  
  29.340    g1_policy()->phase_times()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0);
  29.341  
  29.342 @@ -4910,22 +4994,10 @@
  29.343    }
  29.344    g1_policy()->phase_times()->record_satb_filtering_time(worker_i, satb_filtering_ms);
  29.345  
  29.346 -  // If this is an initial mark pause, and we're not scanning
  29.347 -  // the entire code cache, we need to mark the oops in the
  29.348 -  // strong code root lists for the regions that are not in
  29.349 -  // the collection set.
  29.350 -  // Note all threads participate in this set of root tasks.
  29.351 -  double mark_strong_code_roots_ms = 0.0;
  29.352 -  if (g1_policy()->during_initial_mark_pause() && !(so & SO_AllCodeCache)) {
  29.353 -    double mark_strong_roots_start = os::elapsedTime();
  29.354 -    mark_strong_code_roots(worker_i);
  29.355 -    mark_strong_code_roots_ms = (os::elapsedTime() - mark_strong_roots_start) * 1000.0;
  29.356 -  }
  29.357 -  g1_policy()->phase_times()->record_strong_code_root_mark_time(worker_i, mark_strong_code_roots_ms);
  29.358 -
  29.359    // Now scan the complement of the collection set.
  29.360 -  CodeBlobToOopClosure eager_scan_code_roots(scan_non_heap_roots, true /* do_marking */);
  29.361 -  g1_rem_set()->oops_into_collection_set_do(scan_rs, &eager_scan_code_roots, worker_i);
  29.362 +  MarkingCodeBlobClosure scavenge_cs_nmethods(scan_non_heap_weak_roots, CodeBlobToOopClosure::FixRelocations);
  29.363 +
  29.364 +  g1_rem_set()->oops_into_collection_set_do(scan_rs, &scavenge_cs_nmethods, worker_i);
  29.365  
  29.366    _process_strong_tasks->all_tasks_completed();
  29.367  }
  29.368 @@ -4947,7 +5019,8 @@
  29.369    bool _do_in_parallel;
  29.370  public:
  29.371    G1StringSymbolTableUnlinkTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols) :
  29.372 -    AbstractGangTask("Par String/Symbol table unlink"), _is_alive(is_alive),
  29.373 +    AbstractGangTask("String/Symbol Unlinking"),
  29.374 +    _is_alive(is_alive),
  29.375      _do_in_parallel(G1CollectedHeap::use_parallel_gc_threads()),
  29.376      _process_strings(process_strings), _strings_processed(0), _strings_removed(0),
  29.377      _process_symbols(process_symbols), _symbols_processed(0), _symbols_removed(0) {
  29.378 @@ -4969,6 +5042,14 @@
  29.379      guarantee(!_process_symbols || !_do_in_parallel || SymbolTable::parallel_claimed_index() >= _initial_symbol_table_size,
  29.380                err_msg("claim value "INT32_FORMAT" after unlink less than initial symbol table size "INT32_FORMAT,
  29.381                        SymbolTable::parallel_claimed_index(), _initial_symbol_table_size));
  29.382 +
  29.383 +    if (G1TraceStringSymbolTableScrubbing) {
  29.384 +      gclog_or_tty->print_cr("Cleaned string and symbol table, "
  29.385 +                             "strings: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed, "
  29.386 +                             "symbols: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed",
  29.387 +                             strings_processed(), strings_removed(),
  29.388 +                             symbols_processed(), symbols_removed());
  29.389 +    }
  29.390    }
  29.391  
  29.392    void work(uint worker_id) {
  29.393 @@ -5004,12 +5085,279 @@
  29.394    size_t symbols_removed()   const { return (size_t)_symbols_removed; }
  29.395  };
  29.396  
  29.397 -void G1CollectedHeap::unlink_string_and_symbol_table(BoolObjectClosure* is_alive,
  29.398 -                                                     bool process_strings, bool process_symbols) {
  29.399 +class G1CodeCacheUnloadingTask VALUE_OBJ_CLASS_SPEC {
  29.400 +private:
  29.401 +  static Monitor* _lock;
  29.402 +
  29.403 +  BoolObjectClosure* const _is_alive;
  29.404 +  const bool               _unloading_occurred;
  29.405 +  const uint               _num_workers;
  29.406 +
  29.407 +  // Variables used to claim nmethods.
  29.408 +  nmethod* _first_nmethod;
  29.409 +  volatile nmethod* _claimed_nmethod;
  29.410 +
  29.411 +  // The list of nmethods that need to be processed by the second pass.
  29.412 +  volatile nmethod* _postponed_list;
  29.413 +  volatile uint     _num_entered_barrier;
  29.414 +
  29.415 + public:
  29.416 +  G1CodeCacheUnloadingTask(uint num_workers, BoolObjectClosure* is_alive, bool unloading_occurred) :
  29.417 +      _is_alive(is_alive),
  29.418 +      _unloading_occurred(unloading_occurred),
  29.419 +      _num_workers(num_workers),
  29.420 +      _first_nmethod(NULL),
  29.421 +      _claimed_nmethod(NULL),
  29.422 +      _postponed_list(NULL),
  29.423 +      _num_entered_barrier(0)
  29.424 +  {
  29.425 +    nmethod::increase_unloading_clock();
  29.426 +    _first_nmethod = CodeCache::alive_nmethod(CodeCache::first());
  29.427 +    _claimed_nmethod = (volatile nmethod*)_first_nmethod;
  29.428 +  }
  29.429 +
  29.430 +  ~G1CodeCacheUnloadingTask() {
  29.431 +    CodeCache::verify_clean_inline_caches();
  29.432 +
  29.433 +    CodeCache::set_needs_cache_clean(false);
  29.434 +    guarantee(CodeCache::scavenge_root_nmethods() == NULL, "Must be");
  29.435 +
  29.436 +    CodeCache::verify_icholder_relocations();
  29.437 +  }
  29.438 +
  29.439 + private:
  29.440 +  void add_to_postponed_list(nmethod* nm) {
  29.441 +      nmethod* old;
  29.442 +      do {
  29.443 +        old = (nmethod*)_postponed_list;
  29.444 +        nm->set_unloading_next(old);
  29.445 +      } while ((nmethod*)Atomic::cmpxchg_ptr(nm, &_postponed_list, old) != old);
  29.446 +  }
  29.447 +
  29.448 +  void clean_nmethod(nmethod* nm) {
  29.449 +    bool postponed = nm->do_unloading_parallel(_is_alive, _unloading_occurred);
  29.450 +
  29.451 +    if (postponed) {
  29.452 +      // This nmethod referred to an nmethod that has not been cleaned/unloaded yet.
  29.453 +      add_to_postponed_list(nm);
  29.454 +    }
  29.455 +
  29.456 +    // Mark that this thread has been cleaned/unloaded.
  29.457 +    // After this call, it will be safe to ask if this nmethod was unloaded or not.
  29.458 +    nm->set_unloading_clock(nmethod::global_unloading_clock());
  29.459 +  }
  29.460 +
  29.461 +  void clean_nmethod_postponed(nmethod* nm) {
  29.462 +    nm->do_unloading_parallel_postponed(_is_alive, _unloading_occurred);
  29.463 +  }
  29.464 +
  29.465 +  static const int MaxClaimNmethods = 16;
  29.466 +
  29.467 +  void claim_nmethods(nmethod** claimed_nmethods, int *num_claimed_nmethods) {
  29.468 +    nmethod* first;
  29.469 +    nmethod* last;
  29.470 +
  29.471 +    do {
  29.472 +      *num_claimed_nmethods = 0;
  29.473 +
  29.474 +      first = last = (nmethod*)_claimed_nmethod;
  29.475 +
  29.476 +      if (first != NULL) {
  29.477 +        for (int i = 0; i < MaxClaimNmethods; i++) {
  29.478 +          last = CodeCache::alive_nmethod(CodeCache::next(last));
  29.479 +
  29.480 +          if (last == NULL) {
  29.481 +            break;
  29.482 +          }
  29.483 +
  29.484 +          claimed_nmethods[i] = last;
  29.485 +          (*num_claimed_nmethods)++;
  29.486 +        }
  29.487 +      }
  29.488 +
  29.489 +    } while ((nmethod*)Atomic::cmpxchg_ptr(last, &_claimed_nmethod, first) != first);
  29.490 +  }
  29.491 +
  29.492 +  nmethod* claim_postponed_nmethod() {
  29.493 +    nmethod* claim;
  29.494 +    nmethod* next;
  29.495 +
  29.496 +    do {
  29.497 +      claim = (nmethod*)_postponed_list;
  29.498 +      if (claim == NULL) {
  29.499 +        return NULL;
  29.500 +      }
  29.501 +
  29.502 +      next = claim->unloading_next();
  29.503 +
  29.504 +    } while ((nmethod*)Atomic::cmpxchg_ptr(next, &_postponed_list, claim) != claim);
  29.505 +
  29.506 +    return claim;
  29.507 +  }
  29.508 +
  29.509 + public:
  29.510 +  // Mark that we're done with the first pass of nmethod cleaning.
  29.511 +  void barrier_mark(uint worker_id) {
  29.512 +    MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);
  29.513 +    _num_entered_barrier++;
  29.514 +    if (_num_entered_barrier == _num_workers) {
  29.515 +      ml.notify_all();
  29.516 +    }
  29.517 +  }
  29.518 +
  29.519 +  // See if we have to wait for the other workers to
  29.520 +  // finish their first-pass nmethod cleaning work.
  29.521 +  void barrier_wait(uint worker_id) {
  29.522 +    if (_num_entered_barrier < _num_workers) {
  29.523 +      MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);
  29.524 +      while (_num_entered_barrier < _num_workers) {
  29.525 +          ml.wait(Mutex::_no_safepoint_check_flag, 0, false);
  29.526 +      }
  29.527 +    }
  29.528 +  }
  29.529 +
  29.530 +  // Cleaning and unloading of nmethods. Some work has to be postponed
  29.531 +  // to the second pass, when we know which nmethods survive.
  29.532 +  void work_first_pass(uint worker_id) {
  29.533 +    // The first nmethods is claimed by the first worker.
  29.534 +    if (worker_id == 0 && _first_nmethod != NULL) {
  29.535 +      clean_nmethod(_first_nmethod);
  29.536 +      _first_nmethod = NULL;
  29.537 +    }
  29.538 +
  29.539 +    int num_claimed_nmethods;
  29.540 +    nmethod* claimed_nmethods[MaxClaimNmethods];
  29.541 +
  29.542 +    while (true) {
  29.543 +      claim_nmethods(claimed_nmethods, &num_claimed_nmethods);
  29.544 +
  29.545 +      if (num_claimed_nmethods == 0) {
  29.546 +        break;
  29.547 +      }
  29.548 +
  29.549 +      for (int i = 0; i < num_claimed_nmethods; i++) {
  29.550 +        clean_nmethod(claimed_nmethods[i]);
  29.551 +      }
  29.552 +    }
  29.553 +  }
  29.554 +
  29.555 +  void work_second_pass(uint worker_id) {
  29.556 +    nmethod* nm;
  29.557 +    // Take care of postponed nmethods.
  29.558 +    while ((nm = claim_postponed_nmethod()) != NULL) {
  29.559 +      clean_nmethod_postponed(nm);
  29.560 +    }
  29.561 +  }
  29.562 +};
  29.563 +
  29.564 +Monitor* G1CodeCacheUnloadingTask::_lock = new Monitor(Mutex::leaf, "Code Cache Unload lock");
  29.565 +
  29.566 +class G1KlassCleaningTask : public StackObj {
  29.567 +  BoolObjectClosure*                      _is_alive;
  29.568 +  volatile jint                           _clean_klass_tree_claimed;
  29.569 +  ClassLoaderDataGraphKlassIteratorAtomic _klass_iterator;
  29.570 +
  29.571 + public:
  29.572 +  G1KlassCleaningTask(BoolObjectClosure* is_alive) :
  29.573 +      _is_alive(is_alive),
  29.574 +      _clean_klass_tree_claimed(0),
  29.575 +      _klass_iterator() {
  29.576 +  }
  29.577 +
  29.578 + private:
  29.579 +  bool claim_clean_klass_tree_task() {
  29.580 +    if (_clean_klass_tree_claimed) {
  29.581 +      return false;
  29.582 +    }
  29.583 +
  29.584 +    return Atomic::cmpxchg(1, (jint*)&_clean_klass_tree_claimed, 0) == 0;
  29.585 +  }
  29.586 +
  29.587 +  InstanceKlass* claim_next_klass() {
  29.588 +    Klass* klass;
  29.589 +    do {
  29.590 +      klass =_klass_iterator.next_klass();
  29.591 +    } while (klass != NULL && !klass->oop_is_instance());
  29.592 +
  29.593 +    return (InstanceKlass*)klass;
  29.594 +  }
  29.595 +
  29.596 +public:
  29.597 +
  29.598 +  void clean_klass(InstanceKlass* ik) {
  29.599 +    ik->clean_implementors_list(_is_alive);
  29.600 +    ik->clean_method_data(_is_alive);
  29.601 +
  29.602 +    // G1 specific cleanup work that has
  29.603 +    // been moved here to be done in parallel.
  29.604 +    ik->clean_dependent_nmethods();
  29.605 +  }
  29.606 +
  29.607 +  void work() {
  29.608 +    ResourceMark rm;
  29.609 +
  29.610 +    // One worker will clean the subklass/sibling klass tree.
  29.611 +    if (claim_clean_klass_tree_task()) {
  29.612 +      Klass::clean_subklass_tree(_is_alive);
  29.613 +    }
  29.614 +
  29.615 +    // All workers will help cleaning the classes,
  29.616 +    InstanceKlass* klass;
  29.617 +    while ((klass = claim_next_klass()) != NULL) {
  29.618 +      clean_klass(klass);
  29.619 +    }
  29.620 +  }
  29.621 +};
  29.622 +
  29.623 +// To minimize the remark pause times, the tasks below are done in parallel.
  29.624 +class G1ParallelCleaningTask : public AbstractGangTask {
  29.625 +private:
  29.626 +  G1StringSymbolTableUnlinkTask _string_symbol_task;
  29.627 +  G1CodeCacheUnloadingTask      _code_cache_task;
  29.628 +  G1KlassCleaningTask           _klass_cleaning_task;
  29.629 +
  29.630 +public:
  29.631 +  // The constructor is run in the VMThread.
  29.632 +  G1ParallelCleaningTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols, uint num_workers, bool unloading_occurred) :
  29.633 +      AbstractGangTask("Parallel Cleaning"),
  29.634 +      _string_symbol_task(is_alive, process_strings, process_symbols),
  29.635 +      _code_cache_task(num_workers, is_alive, unloading_occurred),
  29.636 +      _klass_cleaning_task(is_alive) {
  29.637 +  }
  29.638 +
  29.639 +  // The parallel work done by all worker threads.
  29.640 +  void work(uint worker_id) {
  29.641 +    // Do first pass of code cache cleaning.
  29.642 +    _code_cache_task.work_first_pass(worker_id);
  29.643 +
  29.644 +    // Let the threads, mark that the first pass is done.
  29.645 +    _code_cache_task.barrier_mark(worker_id);
  29.646 +
  29.647 +    // Clean the Strings and Symbols.
  29.648 +    _string_symbol_task.work(worker_id);
  29.649 +
  29.650 +    // Wait for all workers to finish the first code cache cleaning pass.
  29.651 +    _code_cache_task.barrier_wait(worker_id);
  29.652 +
  29.653 +    // Do the second code cache cleaning work, which realize on
  29.654 +    // the liveness information gathered during the first pass.
  29.655 +    _code_cache_task.work_second_pass(worker_id);
  29.656 +
  29.657 +    // Clean all klasses that were not unloaded.
  29.658 +    _klass_cleaning_task.work();
  29.659 +  }
  29.660 +};
  29.661 +
  29.662 +
  29.663 +void G1CollectedHeap::parallel_cleaning(BoolObjectClosure* is_alive,
  29.664 +                                        bool process_strings,
  29.665 +                                        bool process_symbols,
  29.666 +                                        bool class_unloading_occurred) {
  29.667    uint n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
  29.668 -                   _g1h->workers()->active_workers() : 1);
  29.669 -
  29.670 -  G1StringSymbolTableUnlinkTask g1_unlink_task(is_alive, process_strings, process_symbols);
  29.671 +                    workers()->active_workers() : 1);
  29.672 +
  29.673 +  G1ParallelCleaningTask g1_unlink_task(is_alive, process_strings, process_symbols,
  29.674 +                                        n_workers, class_unloading_occurred);
  29.675    if (G1CollectedHeap::use_parallel_gc_threads()) {
  29.676      set_par_threads(n_workers);
  29.677      workers()->run_task(&g1_unlink_task);
  29.678 @@ -5017,12 +5365,21 @@
  29.679    } else {
  29.680      g1_unlink_task.work(0);
  29.681    }
  29.682 -  if (G1TraceStringSymbolTableScrubbing) {
  29.683 -    gclog_or_tty->print_cr("Cleaned string and symbol table, "
  29.684 -                           "strings: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed, "
  29.685 -                           "symbols: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed",
  29.686 -                           g1_unlink_task.strings_processed(), g1_unlink_task.strings_removed(),
  29.687 -                           g1_unlink_task.symbols_processed(), g1_unlink_task.symbols_removed());
  29.688 +}
  29.689 +
  29.690 +void G1CollectedHeap::unlink_string_and_symbol_table(BoolObjectClosure* is_alive,
  29.691 +                                                     bool process_strings, bool process_symbols) {
  29.692 +  {
  29.693 +    uint n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
  29.694 +                     _g1h->workers()->active_workers() : 1);
  29.695 +    G1StringSymbolTableUnlinkTask g1_unlink_task(is_alive, process_strings, process_symbols);
  29.696 +    if (G1CollectedHeap::use_parallel_gc_threads()) {
  29.697 +      set_par_threads(n_workers);
  29.698 +      workers()->run_task(&g1_unlink_task);
  29.699 +      set_par_threads(0);
  29.700 +    } else {
  29.701 +      g1_unlink_task.work(0);
  29.702 +    }
  29.703    }
  29.704  
  29.705    if (G1StringDedup::is_enabled()) {
  29.706 @@ -5615,6 +5972,10 @@
  29.707  
  29.708    {
  29.709      StrongRootsScope srs(this);
  29.710 +    // InitialMark needs claim bits to keep track of the marked-through CLDs.
  29.711 +    if (g1_policy()->during_initial_mark_pause()) {
  29.712 +      ClassLoaderDataGraph::clear_claimed_marks();
  29.713 +    }
  29.714  
  29.715      if (G1CollectedHeap::use_parallel_gc_threads()) {
  29.716        // The individual threads will set their evac-failure closures.
  29.717 @@ -6566,106 +6927,6 @@
  29.718    g1_policy()->phase_times()->record_strong_code_root_purge_time(purge_time_ms);
  29.719  }
  29.720  
  29.721 -// Mark all the code roots that point into regions *not* in the
  29.722 -// collection set.
  29.723 -//
  29.724 -// Note we do not want to use a "marking" CodeBlobToOopClosure while
  29.725 -// walking the the code roots lists of regions not in the collection
  29.726 -// set. Suppose we have an nmethod (M) that points to objects in two
  29.727 -// separate regions - one in the collection set (R1) and one not (R2).
  29.728 -// Using a "marking" CodeBlobToOopClosure here would result in "marking"
  29.729 -// nmethod M when walking the code roots for R1. When we come to scan
  29.730 -// the code roots for R2, we would see that M is already marked and it
  29.731 -// would be skipped and the objects in R2 that are referenced from M
  29.732 -// would not be evacuated.
  29.733 -
  29.734 -class MarkStrongCodeRootCodeBlobClosure: public CodeBlobClosure {
  29.735 -
  29.736 -  class MarkStrongCodeRootOopClosure: public OopClosure {
  29.737 -    ConcurrentMark* _cm;
  29.738 -    HeapRegion* _hr;
  29.739 -    uint _worker_id;
  29.740 -
  29.741 -    template <class T> void do_oop_work(T* p) {
  29.742 -      T heap_oop = oopDesc::load_heap_oop(p);
  29.743 -      if (!oopDesc::is_null(heap_oop)) {
  29.744 -        oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
  29.745 -        // Only mark objects in the region (which is assumed
  29.746 -        // to be not in the collection set).
  29.747 -        if (_hr->is_in(obj)) {
  29.748 -          _cm->grayRoot(obj, (size_t) obj->size(), _worker_id);
  29.749 -        }
  29.750 -      }
  29.751 -    }
  29.752 -
  29.753 -  public:
  29.754 -    MarkStrongCodeRootOopClosure(ConcurrentMark* cm, HeapRegion* hr, uint worker_id) :
  29.755 -      _cm(cm), _hr(hr), _worker_id(worker_id) {
  29.756 -      assert(!_hr->in_collection_set(), "sanity");
  29.757 -    }
  29.758 -
  29.759 -    void do_oop(narrowOop* p) { do_oop_work(p); }
  29.760 -    void do_oop(oop* p)       { do_oop_work(p); }
  29.761 -  };
  29.762 -
  29.763 -  MarkStrongCodeRootOopClosure _oop_cl;
  29.764 -
  29.765 -public:
  29.766 -  MarkStrongCodeRootCodeBlobClosure(ConcurrentMark* cm, HeapRegion* hr, uint worker_id):
  29.767 -    _oop_cl(cm, hr, worker_id) {}
  29.768 -
  29.769 -  void do_code_blob(CodeBlob* cb) {
  29.770 -    nmethod* nm = (cb == NULL) ? NULL : cb->as_nmethod_or_null();
  29.771 -    if (nm != NULL) {
  29.772 -      nm->oops_do(&_oop_cl);
  29.773 -    }
  29.774 -  }
  29.775 -};
  29.776 -
  29.777 -class MarkStrongCodeRootsHRClosure: public HeapRegionClosure {
  29.778 -  G1CollectedHeap* _g1h;
  29.779 -  uint _worker_id;
  29.780 -
  29.781 -public:
  29.782 -  MarkStrongCodeRootsHRClosure(G1CollectedHeap* g1h, uint worker_id) :
  29.783 -    _g1h(g1h), _worker_id(worker_id) {}
  29.784 -
  29.785 -  bool doHeapRegion(HeapRegion *hr) {
  29.786 -    HeapRegionRemSet* hrrs = hr->rem_set();
  29.787 -    if (hr->continuesHumongous()) {
  29.788 -      // Code roots should never be attached to a continuation of a humongous region
  29.789 -      assert(hrrs->strong_code_roots_list_length() == 0,
  29.790 -             err_msg("code roots should never be attached to continuations of humongous region "HR_FORMAT
  29.791 -                     " starting at "HR_FORMAT", but has "SIZE_FORMAT,
  29.792 -                     HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region()),
  29.793 -                     hrrs->strong_code_roots_list_length()));
  29.794 -      return false;
  29.795 -    }
  29.796 -
  29.797 -    if (hr->in_collection_set()) {
  29.798 -      // Don't mark code roots into regions in the collection set here.
  29.799 -      // They will be marked when we scan them.
  29.800 -      return false;
  29.801 -    }
  29.802 -
  29.803 -    MarkStrongCodeRootCodeBlobClosure cb_cl(_g1h->concurrent_mark(), hr, _worker_id);
  29.804 -    hr->strong_code_roots_do(&cb_cl);
  29.805 -    return false;
  29.806 -  }
  29.807 -};
  29.808 -
  29.809 -void G1CollectedHeap::mark_strong_code_roots(uint worker_id) {
  29.810 -  MarkStrongCodeRootsHRClosure cl(this, worker_id);
  29.811 -  if (G1CollectedHeap::use_parallel_gc_threads()) {
  29.812 -    heap_region_par_iterate_chunked(&cl,
  29.813 -                                    worker_id,
  29.814 -                                    workers()->active_workers(),
  29.815 -                                    HeapRegion::ParMarkRootClaimValue);
  29.816 -  } else {
  29.817 -    heap_region_iterate(&cl);
  29.818 -  }
  29.819 -}
  29.820 -
  29.821  class RebuildStrongCodeRootClosure: public CodeBlobClosure {
  29.822    G1CollectedHeap* _g1h;
  29.823  
    30.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Tue Jul 01 09:03:55 2014 +0200
    30.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Mon Jul 07 10:12:40 2014 +0200
    30.3 @@ -210,6 +210,7 @@
    30.4  class RefineCardTableEntryClosure;
    30.5  
    30.6  class G1CollectedHeap : public SharedHeap {
    30.7 +  friend class VM_CollectForMetadataAllocation;
    30.8    friend class VM_G1CollectForAllocation;
    30.9    friend class VM_G1CollectFull;
   30.10    friend class VM_G1IncCollectionPause;
   30.11 @@ -219,7 +220,7 @@
   30.12    friend class OldGCAllocRegion;
   30.13  
   30.14    // Closures used in implementation.
   30.15 -  template <G1Barrier barrier, bool do_mark_object>
   30.16 +  template <G1Barrier barrier, G1Mark do_mark_object>
   30.17    friend class G1ParCopyClosure;
   30.18    friend class G1IsAliveClosure;
   30.19    friend class G1EvacuateFollowersClosure;
   30.20 @@ -346,6 +347,9 @@
   30.21    // It initializes the GC alloc regions at the start of a GC.
   30.22    void init_gc_alloc_regions(EvacuationInfo& evacuation_info);
   30.23  
   30.24 +  // Setup the retained old gc alloc region as the currrent old gc alloc region.
   30.25 +  void use_retained_old_gc_alloc_region(EvacuationInfo& evacuation_info);
   30.26 +
   30.27    // It releases the GC alloc regions at the end of a GC.
   30.28    void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info);
   30.29  
   30.30 @@ -827,12 +831,13 @@
   30.31    // param is for use with parallel roots processing, and should be
   30.32    // the "i" of the calling parallel worker thread's work(i) function.
   30.33    // In the sequential case this param will be ignored.
   30.34 -  void g1_process_strong_roots(bool is_scavenging,
   30.35 -                               ScanningOption so,
   30.36 -                               OopClosure* scan_non_heap_roots,
   30.37 -                               OopsInHeapRegionClosure* scan_rs,
   30.38 -                               G1KlassScanClosure* scan_klasses,
   30.39 -                               uint worker_i);
   30.40 +  void g1_process_roots(OopClosure* scan_non_heap_roots,
   30.41 +                        OopClosure* scan_non_heap_weak_roots,
   30.42 +                        OopsInHeapRegionClosure* scan_rs,
   30.43 +                        CLDClosure* scan_strong_clds,
   30.44 +                        CLDClosure* scan_weak_clds,
   30.45 +                        CodeBlobClosure* scan_strong_code,
   30.46 +                        uint worker_i);
   30.47  
   30.48    // Notifies all the necessary spaces that the committed space has
   30.49    // been updated (either expanded or shrunk). It should be called
   30.50 @@ -1025,7 +1030,7 @@
   30.51    // of G1CollectedHeap::_gc_time_stamp.
   30.52    unsigned int* _worker_cset_start_region_time_stamp;
   30.53  
   30.54 -  enum G1H_process_strong_roots_tasks {
   30.55 +  enum G1H_process_roots_tasks {
   30.56      G1H_PS_filter_satb_buffers,
   30.57      G1H_PS_refProcessor_oops_do,
   30.58      // Leave this one last.
   30.59 @@ -1596,10 +1601,6 @@
   30.60    // Free up superfluous code root memory.
   30.61    void purge_code_root_memory();
   30.62  
   30.63 -  // During an initial mark pause, mark all the code roots that
   30.64 -  // point into regions *not* in the collection set.
   30.65 -  void mark_strong_code_roots(uint worker_id);
   30.66 -
   30.67    // Rebuild the stong code root lists for each region
   30.68    // after a full GC
   30.69    void rebuild_strong_code_roots();
   30.70 @@ -1608,6 +1609,9 @@
   30.71    // in symbol table, possibly in parallel.
   30.72    void unlink_string_and_symbol_table(BoolObjectClosure* is_alive, bool unlink_strings = true, bool unlink_symbols = true);
   30.73  
   30.74 +  // Parallel phase of unloading/cleaning after G1 concurrent mark.
   30.75 +  void parallel_cleaning(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols, bool class_unloading_occurred);
   30.76 +
   30.77    // Redirty logged cards in the refinement queue.
   30.78    void redirty_logged_cards();
   30.79    // Verification
    31.1 --- a/src/share/vm/gc_implementation/g1/g1EvacFailure.hpp	Tue Jul 01 09:03:55 2014 +0200
    31.2 +++ b/src/share/vm/gc_implementation/g1/g1EvacFailure.hpp	Mon Jul 07 10:12:40 2014 +0200
    31.3 @@ -71,6 +71,9 @@
    31.4    bool _during_initial_mark;
    31.5    bool _during_conc_mark;
    31.6    uint _worker_id;
    31.7 +  HeapWord* _end_of_last_gap;
    31.8 +  HeapWord* _last_gap_threshold;
    31.9 +  HeapWord* _last_obj_threshold;
   31.10  
   31.11  public:
   31.12    RemoveSelfForwardPtrObjClosure(G1CollectedHeap* g1, ConcurrentMark* cm,
   31.13 @@ -83,7 +86,10 @@
   31.14      _update_rset_cl(update_rset_cl),
   31.15      _during_initial_mark(during_initial_mark),
   31.16      _during_conc_mark(during_conc_mark),
   31.17 -    _worker_id(worker_id) { }
   31.18 +    _worker_id(worker_id),
   31.19 +    _end_of_last_gap(hr->bottom()),
   31.20 +    _last_gap_threshold(hr->bottom()),
   31.21 +    _last_obj_threshold(hr->bottom()) { }
   31.22  
   31.23    size_t marked_bytes() { return _marked_bytes; }
   31.24  
   31.25 @@ -107,7 +113,12 @@
   31.26      HeapWord* obj_addr = (HeapWord*) obj;
   31.27      assert(_hr->is_in(obj_addr), "sanity");
   31.28      size_t obj_size = obj->size();
   31.29 -    _hr->update_bot_for_object(obj_addr, obj_size);
   31.30 +    HeapWord* obj_end = obj_addr + obj_size;
   31.31 +
   31.32 +    if (_end_of_last_gap != obj_addr) {
   31.33 +      // there was a gap before obj_addr
   31.34 +      _last_gap_threshold = _hr->cross_threshold(_end_of_last_gap, obj_addr);
   31.35 +    }
   31.36  
   31.37      if (obj->is_forwarded() && obj->forwardee() == obj) {
   31.38        // The object failed to move.
   31.39 @@ -115,7 +126,9 @@
   31.40        // We consider all objects that we find self-forwarded to be
   31.41        // live. What we'll do is that we'll update the prev marking
   31.42        // info so that they are all under PTAMS and explicitly marked.
   31.43 -      _cm->markPrev(obj);
   31.44 +      if (!_cm->isPrevMarked(obj)) {
   31.45 +        _cm->markPrev(obj);
   31.46 +      }
   31.47        if (_during_initial_mark) {
   31.48          // For the next marking info we'll only mark the
   31.49          // self-forwarded objects explicitly if we are during
   31.50 @@ -145,13 +158,18 @@
   31.51        // remembered set entries missing given that we skipped cards on
   31.52        // the collection set. So, we'll recreate such entries now.
   31.53        obj->oop_iterate(_update_rset_cl);
   31.54 -      assert(_cm->isPrevMarked(obj), "Should be marked!");
   31.55      } else {
   31.56 +
   31.57        // The object has been either evacuated or is dead. Fill it with a
   31.58        // dummy object.
   31.59 -      MemRegion mr((HeapWord*) obj, obj_size);
   31.60 +      MemRegion mr(obj_addr, obj_size);
   31.61        CollectedHeap::fill_with_object(mr);
   31.62 +
   31.63 +      // must nuke all dead objects which we skipped when iterating over the region
   31.64 +      _cm->clearRangePrevBitmap(MemRegion(_end_of_last_gap, obj_end));
   31.65      }
   31.66 +    _end_of_last_gap = obj_end;
   31.67 +    _last_obj_threshold = _hr->cross_threshold(obj_addr, obj_end);
   31.68    }
   31.69  };
   31.70  
   31.71 @@ -182,13 +200,6 @@
   31.72                                              during_conc_mark,
   31.73                                              _worker_id);
   31.74  
   31.75 -        MemRegion mr(hr->bottom(), hr->end());
   31.76 -        // We'll recreate the prev marking info so we'll first clear
   31.77 -        // the prev bitmap range for this region. We never mark any
   31.78 -        // CSet objects explicitly so the next bitmap range should be
   31.79 -        // cleared anyway.
   31.80 -        _cm->clearRangePrevBitmap(mr);
   31.81 -
   31.82          hr->note_self_forwarding_removal_start(during_initial_mark,
   31.83                                                 during_conc_mark);
   31.84  
    32.1 --- a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp	Tue Jul 01 09:03:55 2014 +0200
    32.2 +++ b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp	Mon Jul 07 10:12:40 2014 +0200
    32.3 @@ -166,7 +166,6 @@
    32.4    _last_update_rs_processed_buffers(_max_gc_threads, "%d"),
    32.5    _last_scan_rs_times_ms(_max_gc_threads, "%.1lf"),
    32.6    _last_strong_code_root_scan_times_ms(_max_gc_threads, "%.1lf"),
    32.7 -  _last_strong_code_root_mark_times_ms(_max_gc_threads, "%.1lf"),
    32.8    _last_obj_copy_times_ms(_max_gc_threads, "%.1lf"),
    32.9    _last_termination_times_ms(_max_gc_threads, "%.1lf"),
   32.10    _last_termination_attempts(_max_gc_threads, SIZE_FORMAT),
   32.11 @@ -193,7 +192,6 @@
   32.12    _last_update_rs_processed_buffers.reset();
   32.13    _last_scan_rs_times_ms.reset();
   32.14    _last_strong_code_root_scan_times_ms.reset();
   32.15 -  _last_strong_code_root_mark_times_ms.reset();
   32.16    _last_obj_copy_times_ms.reset();
   32.17    _last_termination_times_ms.reset();
   32.18    _last_termination_attempts.reset();
   32.19 @@ -214,7 +212,6 @@
   32.20    _last_update_rs_processed_buffers.verify();
   32.21    _last_scan_rs_times_ms.verify();
   32.22    _last_strong_code_root_scan_times_ms.verify();
   32.23 -  _last_strong_code_root_mark_times_ms.verify();
   32.24    _last_obj_copy_times_ms.verify();
   32.25    _last_termination_times_ms.verify();
   32.26    _last_termination_attempts.verify();
   32.27 @@ -229,7 +226,6 @@
   32.28                                 _last_update_rs_times_ms.get(i) +
   32.29                                 _last_scan_rs_times_ms.get(i) +
   32.30                                 _last_strong_code_root_scan_times_ms.get(i) +
   32.31 -                               _last_strong_code_root_mark_times_ms.get(i) +
   32.32                                 _last_obj_copy_times_ms.get(i) +
   32.33                                 _last_termination_times_ms.get(i);
   32.34  
   32.35 @@ -301,9 +297,6 @@
   32.36      if (_last_satb_filtering_times_ms.sum() > 0.0) {
   32.37        _last_satb_filtering_times_ms.print(2, "SATB Filtering (ms)");
   32.38      }
   32.39 -    if (_last_strong_code_root_mark_times_ms.sum() > 0.0) {
   32.40 -     _last_strong_code_root_mark_times_ms.print(2, "Code Root Marking (ms)");
   32.41 -    }
   32.42      _last_update_rs_times_ms.print(2, "Update RS (ms)");
   32.43        _last_update_rs_processed_buffers.print(3, "Processed Buffers");
   32.44      _last_scan_rs_times_ms.print(2, "Scan RS (ms)");
   32.45 @@ -321,9 +314,6 @@
   32.46      if (_last_satb_filtering_times_ms.sum() > 0.0) {
   32.47        _last_satb_filtering_times_ms.print(1, "SATB Filtering (ms)");
   32.48      }
   32.49 -    if (_last_strong_code_root_mark_times_ms.sum() > 0.0) {
   32.50 -      _last_strong_code_root_mark_times_ms.print(1, "Code Root Marking (ms)");
   32.51 -    }
   32.52      _last_update_rs_times_ms.print(1, "Update RS (ms)");
   32.53        _last_update_rs_processed_buffers.print(2, "Processed Buffers");
   32.54      _last_scan_rs_times_ms.print(1, "Scan RS (ms)");
    33.1 --- a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp	Tue Jul 01 09:03:55 2014 +0200
    33.2 +++ b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp	Mon Jul 07 10:12:40 2014 +0200
    33.3 @@ -120,7 +120,6 @@
    33.4    WorkerDataArray<int>    _last_update_rs_processed_buffers;
    33.5    WorkerDataArray<double> _last_scan_rs_times_ms;
    33.6    WorkerDataArray<double> _last_strong_code_root_scan_times_ms;
    33.7 -  WorkerDataArray<double> _last_strong_code_root_mark_times_ms;
    33.8    WorkerDataArray<double> _last_obj_copy_times_ms;
    33.9    WorkerDataArray<double> _last_termination_times_ms;
   33.10    WorkerDataArray<size_t> _last_termination_attempts;
   33.11 @@ -199,10 +198,6 @@
   33.12      _last_strong_code_root_scan_times_ms.set(worker_i, ms);
   33.13    }
   33.14  
   33.15 -  void record_strong_code_root_mark_time(uint worker_i, double ms) {
   33.16 -    _last_strong_code_root_mark_times_ms.set(worker_i, ms);
   33.17 -  }
   33.18 -
   33.19    void record_obj_copy_time(uint worker_i, double ms) {
   33.20      _last_obj_copy_times_ms.set(worker_i, ms);
   33.21    }
   33.22 @@ -369,10 +364,6 @@
   33.23      return _last_strong_code_root_scan_times_ms.average();
   33.24    }
   33.25  
   33.26 -  double average_last_strong_code_root_mark_time(){
   33.27 -    return _last_strong_code_root_mark_times_ms.average();
   33.28 -  }
   33.29 -
   33.30    double average_last_obj_copy_time() {
   33.31      return _last_obj_copy_times_ms.average();
   33.32    }
    34.1 --- a/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Tue Jul 01 09:03:55 2014 +0200
    34.2 +++ b/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Mon Jul 07 10:12:40 2014 +0200
    34.3 @@ -128,13 +128,15 @@
    34.4  
    34.5    SharedHeap* sh = SharedHeap::heap();
    34.6  
    34.7 -  // Need cleared claim bits for the strong roots processing
    34.8 +  // Need cleared claim bits for the roots processing
    34.9    ClassLoaderDataGraph::clear_claimed_marks();
   34.10  
   34.11 -  sh->process_strong_roots(true,  // activate StrongRootsScope
   34.12 -                           SharedHeap::SO_SystemClasses,
   34.13 +  MarkingCodeBlobClosure follow_code_closure(&GenMarkSweep::follow_root_closure, !CodeBlobToOopClosure::FixRelocations);
   34.14 +  sh->process_strong_roots(true,   // activate StrongRootsScope
   34.15 +                           SharedHeap::SO_None,
   34.16                             &GenMarkSweep::follow_root_closure,
   34.17 -                           &GenMarkSweep::follow_klass_closure);
   34.18 +                           &GenMarkSweep::follow_cld_closure,
   34.19 +                           &follow_code_closure);
   34.20  
   34.21    // Process reference objects found during marking
   34.22    ReferenceProcessor* rp = GenMarkSweep::ref_processor();
   34.23 @@ -303,13 +305,15 @@
   34.24  
   34.25    SharedHeap* sh = SharedHeap::heap();
   34.26  
   34.27 -  // Need cleared claim bits for the strong roots processing
   34.28 +  // Need cleared claim bits for the roots processing
   34.29    ClassLoaderDataGraph::clear_claimed_marks();
   34.30  
   34.31 -  sh->process_strong_roots(true,  // activate StrongRootsScope
   34.32 -                           SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_AllCodeCache,
   34.33 -                           &GenMarkSweep::adjust_pointer_closure,
   34.34 -                           &GenMarkSweep::adjust_klass_closure);
   34.35 +  CodeBlobToOopClosure adjust_code_closure(&GenMarkSweep::adjust_pointer_closure, CodeBlobToOopClosure::FixRelocations);
   34.36 +  sh->process_all_roots(true,  // activate StrongRootsScope
   34.37 +                        SharedHeap::SO_AllCodeCache,
   34.38 +                        &GenMarkSweep::adjust_pointer_closure,
   34.39 +                        &GenMarkSweep::adjust_cld_closure,
   34.40 +                        &adjust_code_closure);
   34.41  
   34.42    assert(GenMarkSweep::ref_processor() == g1h->ref_processor_stw(), "Sanity");
   34.43    g1h->ref_processor_stw()->weak_oops_do(&GenMarkSweep::adjust_pointer_closure);
    35.1 --- a/src/share/vm/gc_implementation/g1/g1OopClosures.hpp	Tue Jul 01 09:03:55 2014 +0200
    35.2 +++ b/src/share/vm/gc_implementation/g1/g1OopClosures.hpp	Mon Jul 07 10:12:40 2014 +0200
    35.3 @@ -25,6 +25,8 @@
    35.4  #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1OOPCLOSURES_HPP
    35.5  #define SHARE_VM_GC_IMPLEMENTATION_G1_G1OOPCLOSURES_HPP
    35.6  
    35.7 +#include "memory/iterator.hpp"
    35.8 +
    35.9  class HeapRegion;
   35.10  class G1CollectedHeap;
   35.11  class G1RemSet;
   35.12 @@ -106,7 +108,7 @@
   35.13    template <class T> void do_klass_barrier(T* p, oop new_obj);
   35.14  };
   35.15  
   35.16 -template <G1Barrier barrier, bool do_mark_object>
   35.17 +template <G1Barrier barrier, G1Mark do_mark_object>
   35.18  class G1ParCopyClosure : public G1ParCopyHelper {
   35.19  private:
   35.20    template <class T> void do_oop_work(T* p);
   35.21 @@ -121,19 +123,19 @@
   35.22    template <class T> void do_oop_nv(T* p) { do_oop_work(p); }
   35.23    virtual void do_oop(oop* p)       { do_oop_nv(p); }
   35.24    virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
   35.25 +
   35.26 +  G1CollectedHeap*      g1()  { return _g1; };
   35.27 +  G1ParScanThreadState* pss() { return _par_scan_state; }
   35.28 +  ReferenceProcessor*   rp()  { return _ref_processor; };
   35.29  };
   35.30  
   35.31 -typedef G1ParCopyClosure<G1BarrierNone, false> G1ParScanExtRootClosure;
   35.32 -typedef G1ParCopyClosure<G1BarrierKlass, false> G1ParScanMetadataClosure;
   35.33 -
   35.34 -
   35.35 -typedef G1ParCopyClosure<G1BarrierNone, true> G1ParScanAndMarkExtRootClosure;
   35.36 -typedef G1ParCopyClosure<G1BarrierKlass, true> G1ParScanAndMarkMetadataClosure;
   35.37 -
   35.38 +typedef G1ParCopyClosure<G1BarrierNone,  G1MarkNone>             G1ParScanExtRootClosure;
   35.39 +typedef G1ParCopyClosure<G1BarrierNone,  G1MarkFromRoot>         G1ParScanAndMarkExtRootClosure;
   35.40 +typedef G1ParCopyClosure<G1BarrierNone,  G1MarkPromotedFromRoot> G1ParScanAndMarkWeakExtRootClosure;
   35.41  // We use a separate closure to handle references during evacuation
   35.42  // failure processing.
   35.43  
   35.44 -typedef G1ParCopyClosure<G1BarrierEvac, false> G1ParScanHeapEvacFailureClosure;
   35.45 +typedef G1ParCopyClosure<G1BarrierEvac, G1MarkNone> G1ParScanHeapEvacFailureClosure;
   35.46  
   35.47  class FilterIntoCSClosure: public ExtendedOopClosure {
   35.48    G1CollectedHeap* _g1;
   35.49 @@ -164,10 +166,11 @@
   35.50  };
   35.51  
   35.52  // Closure for iterating over object fields during concurrent marking
   35.53 -class G1CMOopClosure : public ExtendedOopClosure {
   35.54 +class G1CMOopClosure : public MetadataAwareOopClosure {
   35.55 +protected:
   35.56 +  ConcurrentMark*    _cm;
   35.57  private:
   35.58    G1CollectedHeap*   _g1h;
   35.59 -  ConcurrentMark*    _cm;
   35.60    CMTask*            _task;
   35.61  public:
   35.62    G1CMOopClosure(G1CollectedHeap* g1h, ConcurrentMark* cm, CMTask* task);
   35.63 @@ -177,7 +180,7 @@
   35.64  };
   35.65  
   35.66  // Closure to scan the root regions during concurrent marking
   35.67 -class G1RootRegionScanClosure : public ExtendedOopClosure {
   35.68 +class G1RootRegionScanClosure : public MetadataAwareOopClosure {
   35.69  private:
   35.70    G1CollectedHeap* _g1h;
   35.71    ConcurrentMark*  _cm;
    36.1 --- a/src/share/vm/gc_implementation/g1/g1OopClosures.inline.hpp	Tue Jul 01 09:03:55 2014 +0200
    36.2 +++ b/src/share/vm/gc_implementation/g1/g1OopClosures.inline.hpp	Mon Jul 07 10:12:40 2014 +0200
    36.3 @@ -32,6 +32,7 @@
    36.4  #include "gc_implementation/g1/g1RemSet.hpp"
    36.5  #include "gc_implementation/g1/g1RemSet.inline.hpp"
    36.6  #include "gc_implementation/g1/heapRegionRemSet.hpp"
    36.7 +#include "memory/iterator.inline.hpp"
    36.8  #include "runtime/prefetch.inline.hpp"
    36.9  
   36.10  /*
   36.11 @@ -108,10 +109,6 @@
   36.12  
   36.13  template <class T>
   36.14  inline void G1CMOopClosure::do_oop_nv(T* p) {
   36.15 -  assert(_g1h->is_in_g1_reserved((HeapWord*) p), "invariant");
   36.16 -  assert(!_g1h->is_on_master_free_list(
   36.17 -                    _g1h->heap_region_containing((HeapWord*) p)), "invariant");
   36.18 -
   36.19    oop obj = oopDesc::load_decode_heap_oop(p);
   36.20    if (_cm->verbose_high()) {
   36.21      gclog_or_tty->print_cr("[%u] we're looking at location "
    37.1 --- a/src/share/vm/gc_implementation/g1/g1_specialized_oop_closures.hpp	Tue Jul 01 09:03:55 2014 +0200
    37.2 +++ b/src/share/vm/gc_implementation/g1/g1_specialized_oop_closures.hpp	Mon Jul 07 10:12:40 2014 +0200
    37.3 @@ -30,14 +30,21 @@
    37.4  // non-virtually, using a mechanism defined in this file.  Extend these
    37.5  // macros in the obvious way to add specializations for new closures.
    37.6  
    37.7 -// Forward declarations.
    37.8  enum G1Barrier {
    37.9    G1BarrierNone,
   37.10    G1BarrierEvac,
   37.11    G1BarrierKlass
   37.12  };
   37.13  
   37.14 -template<G1Barrier barrier, bool do_mark_object>
   37.15 +enum G1Mark {
   37.16 +  G1MarkNone,
   37.17 +  G1MarkFromRoot,
   37.18 +  G1MarkPromotedFromRoot
   37.19 +};
   37.20 +
   37.21 +// Forward declarations.
   37.22 +
   37.23 +template<G1Barrier barrier, G1Mark do_mark_object>
   37.24  class G1ParCopyClosure;
   37.25  
   37.26  class G1ParScanClosure;
    38.1 --- a/src/share/vm/gc_implementation/g1/heapRegion.cpp	Tue Jul 01 09:03:55 2014 +0200
    38.2 +++ b/src/share/vm/gc_implementation/g1/heapRegion.cpp	Mon Jul 07 10:12:40 2014 +0200
    38.3 @@ -399,7 +399,6 @@
    38.4    // We always recreate the prev marking info and we'll explicitly
    38.5    // mark all objects we find to be self-forwarded on the prev
    38.6    // bitmap. So all objects need to be below PTAMS.
    38.7 -  _prev_top_at_mark_start = top();
    38.8    _prev_marked_bytes = 0;
    38.9  
   38.10    if (during_initial_mark) {
   38.11 @@ -423,6 +422,7 @@
   38.12    assert(0 <= marked_bytes && marked_bytes <= used(),
   38.13           err_msg("marked: "SIZE_FORMAT" used: "SIZE_FORMAT,
   38.14                   marked_bytes, used()));
   38.15 +  _prev_top_at_mark_start = top();
   38.16    _prev_marked_bytes = marked_bytes;
   38.17  }
   38.18  
   38.19 @@ -907,7 +907,8 @@
   38.20      size_t obj_size = block_size(p);
   38.21      object_num += 1;
   38.22  
   38.23 -    if (is_humongous != g1->isHumongous(obj_size)) {
   38.24 +    if (is_humongous != g1->isHumongous(obj_size) &&
   38.25 +        !g1->is_obj_dead(obj, this)) { // Dead objects may have bigger block_size since they span several objects.
   38.26        gclog_or_tty->print_cr("obj "PTR_FORMAT" is of %shumongous size ("
   38.27                               SIZE_FORMAT" words) in a %shumongous region",
   38.28                               p, g1->isHumongous(obj_size) ? "" : "non-",
   38.29 @@ -918,7 +919,9 @@
   38.30  
   38.31      // If it returns false, verify_for_object() will output the
   38.32      // appropriate messasge.
   38.33 -    if (do_bot_verify && !_offsets.verify_for_object(p, obj_size)) {
   38.34 +    if (do_bot_verify &&
   38.35 +        !g1->is_obj_dead(obj, this) &&
   38.36 +        !_offsets.verify_for_object(p, obj_size)) {
   38.37        *failures = true;
   38.38        return;
   38.39      }
   38.40 @@ -926,7 +929,10 @@
   38.41      if (!g1->is_obj_dead_cond(obj, this, vo)) {
   38.42        if (obj->is_oop()) {
   38.43          Klass* klass = obj->klass();
   38.44 -        if (!klass->is_metaspace_object()) {
   38.45 +        bool is_metaspace_object = Metaspace::contains(klass) ||
   38.46 +                                   (vo == VerifyOption_G1UsePrevMarking &&
   38.47 +                                   ClassLoaderDataGraph::unload_list_contains(klass));
   38.48 +        if (!is_metaspace_object) {
   38.49            gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" "
   38.50                                   "not metadata", klass, (void *)obj);
   38.51            *failures = true;
    39.1 --- a/src/share/vm/gc_implementation/g1/heapRegion.hpp	Tue Jul 01 09:03:55 2014 +0200
    39.2 +++ b/src/share/vm/gc_implementation/g1/heapRegion.hpp	Mon Jul 07 10:12:40 2014 +0200
    39.3 @@ -247,11 +247,9 @@
    39.4    bool _evacuation_failed;
    39.5  
    39.6    // A heap region may be a member one of a number of special subsets, each
    39.7 -  // represented as linked lists through the field below.  Currently, these
    39.8 -  // sets include:
    39.9 +  // represented as linked lists through the field below.  Currently, there
   39.10 +  // is only one set:
   39.11    //   The collection set.
   39.12 -  //   The set of allocation regions used in a collection pause.
   39.13 -  //   Spaces that may contain gray objects.
   39.14    HeapRegion* _next_in_special_set;
   39.15  
   39.16    // next region in the young "generation" region set
    40.1 --- a/src/share/vm/gc_implementation/g1/heapRegion.inline.hpp	Tue Jul 01 09:03:55 2014 +0200
    40.2 +++ b/src/share/vm/gc_implementation/g1/heapRegion.inline.hpp	Mon Jul 07 10:12:40 2014 +0200
    40.3 @@ -93,18 +93,27 @@
    40.4  
    40.5  inline bool
    40.6  HeapRegion::block_is_obj(const HeapWord* p) const {
    40.7 -  return p < top();
    40.8 +  G1CollectedHeap* g1h = G1CollectedHeap::heap();
    40.9 +  return !g1h->is_obj_dead(oop(p), this);
   40.10  }
   40.11  
   40.12  inline size_t
   40.13  HeapRegion::block_size(const HeapWord *addr) const {
   40.14 -  const HeapWord* current_top = top();
   40.15 -  if (addr < current_top) {
   40.16 -    return oop(addr)->size();
   40.17 -  } else {
   40.18 -    assert(addr == current_top, "just checking");
   40.19 +  // Old regions' dead objects may have dead classes
   40.20 +  // We need to find the next live object in some other
   40.21 +  // manner than getting the oop size
   40.22 +  G1CollectedHeap* g1h = G1CollectedHeap::heap();
   40.23 +  if (g1h->is_obj_dead(oop(addr), this)) {
   40.24 +    HeapWord* next = g1h->concurrent_mark()->prevMarkBitMap()->
   40.25 +        getNextMarkedWordAddress(addr, prev_top_at_mark_start());
   40.26 +
   40.27 +    assert(next > addr, "must get the next live object");
   40.28 +
   40.29 +    return pointer_delta(next, addr);
   40.30 +  } else if (addr == top()) {
   40.31      return pointer_delta(end(), addr);
   40.32    }
   40.33 +  return oop(addr)->size();
   40.34  }
   40.35  
   40.36  inline HeapWord* HeapRegion::par_allocate_no_bot_updates(size_t word_size) {
    41.1 --- a/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Tue Jul 01 09:03:55 2014 +0200
    41.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Mon Jul 07 10:12:40 2014 +0200
    41.3 @@ -929,7 +929,10 @@
    41.4  
    41.5  void HeapRegionRemSet::remove_strong_code_root(nmethod* nm) {
    41.6    assert(nm != NULL, "sanity");
    41.7 -  _code_roots.remove(nm);
    41.8 +  assert_locked_or_safepoint(CodeCache_lock);
    41.9 +
   41.10 +  _code_roots.remove_lock_free(nm);
   41.11 +
   41.12    // Check that there were no duplicates
   41.13    guarantee(!_code_roots.contains(nm), "duplicate entry found");
   41.14  }
    42.1 --- a/src/share/vm/gc_implementation/g1/satbQueue.cpp	Tue Jul 01 09:03:55 2014 +0200
    42.2 +++ b/src/share/vm/gc_implementation/g1/satbQueue.cpp	Mon Jul 07 10:12:40 2014 +0200
    42.3 @@ -285,37 +285,6 @@
    42.4    _par_closures[i] = par_closure;
    42.5  }
    42.6  
    42.7 -void SATBMarkQueueSet::iterate_closure_all_threads() {
    42.8 -  for(JavaThread* t = Threads::first(); t; t = t->next()) {
    42.9 -    t->satb_mark_queue().apply_closure_and_empty(_closure);
   42.10 -  }
   42.11 -  shared_satb_queue()->apply_closure_and_empty(_closure);
   42.12 -}
   42.13 -
   42.14 -void SATBMarkQueueSet::par_iterate_closure_all_threads(uint worker) {
   42.15 -  SharedHeap* sh = SharedHeap::heap();
   42.16 -  int parity = sh->strong_roots_parity();
   42.17 -
   42.18 -  for(JavaThread* t = Threads::first(); t; t = t->next()) {
   42.19 -    if (t->claim_oops_do(true, parity)) {
   42.20 -      t->satb_mark_queue().apply_closure_and_empty(_par_closures[worker]);
   42.21 -    }
   42.22 -  }
   42.23 -
   42.24 -  // We also need to claim the VMThread so that its parity is updated
   42.25 -  // otherwise the next call to Thread::possibly_parallel_oops_do inside
   42.26 -  // a StrongRootsScope might skip the VMThread because it has a stale
   42.27 -  // parity that matches the parity set by the StrongRootsScope
   42.28 -  //
   42.29 -  // Whichever worker succeeds in claiming the VMThread gets to do
   42.30 -  // the shared queue.
   42.31 -
   42.32 -  VMThread* vmt = VMThread::vm_thread();
   42.33 -  if (vmt->claim_oops_do(true, parity)) {
   42.34 -    shared_satb_queue()->apply_closure_and_empty(_par_closures[worker]);
   42.35 -  }
   42.36 -}
   42.37 -
   42.38  bool SATBMarkQueueSet::apply_closure_to_completed_buffer_work(bool par,
   42.39                                                                uint worker) {
   42.40    BufferNode* nd = NULL;
    43.1 --- a/src/share/vm/gc_implementation/g1/satbQueue.hpp	Tue Jul 01 09:03:55 2014 +0200
    43.2 +++ b/src/share/vm/gc_implementation/g1/satbQueue.hpp	Mon Jul 07 10:12:40 2014 +0200
    43.3 @@ -33,7 +33,9 @@
    43.4  
    43.5  // A ptrQueue whose elements are "oops", pointers to object heads.
    43.6  class ObjPtrQueue: public PtrQueue {
    43.7 +  friend class Threads;
    43.8    friend class SATBMarkQueueSet;
    43.9 +  friend class G1RemarkThreadsClosure;
   43.10  
   43.11  private:
   43.12    // Filter out unwanted entries from the buffer.
   43.13 @@ -119,13 +121,6 @@
   43.14    // closures, one for each parallel GC thread.
   43.15    void set_par_closure(int i, ObjectClosure* closure);
   43.16  
   43.17 -  // Apply the registered closure to all entries on each
   43.18 -  // currently-active buffer and then empty the buffer. It should only
   43.19 -  // be called serially and at a safepoint.
   43.20 -  void iterate_closure_all_threads();
   43.21 -  // Parallel version of the above.
   43.22 -  void par_iterate_closure_all_threads(uint worker);
   43.23 -
   43.24    // If there exists some completed buffer, pop it, then apply the
   43.25    // registered closure to all its elements, and return true.  If no
   43.26    // completed buffers exist, return false.
    44.1 --- a/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Tue Jul 01 09:03:55 2014 +0200
    44.2 +++ b/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Mon Jul 07 10:12:40 2014 +0200
    44.3 @@ -613,18 +613,21 @@
    44.4  
    44.5    KlassScanClosure klass_scan_closure(&par_scan_state.to_space_root_closure(),
    44.6                                        gch->rem_set()->klass_rem_set());
    44.7 -
    44.8 -  int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_ScavengeCodeCache;
    44.9 +  CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure,
   44.10 +                                           &par_scan_state.to_space_root_closure(),
   44.11 +                                           false);
   44.12  
   44.13    par_scan_state.start_strong_roots();
   44.14 -  gch->gen_process_strong_roots(_gen->level(),
   44.15 -                                true,  // Process younger gens, if any,
   44.16 -                                       // as strong roots.
   44.17 -                                false, // no scope; this is parallel code
   44.18 -                                SharedHeap::ScanningOption(so),
   44.19 -                                &par_scan_state.to_space_root_closure(),
   44.20 -                                &par_scan_state.older_gen_closure(),
   44.21 -                                &klass_scan_closure);
   44.22 +  gch->gen_process_roots(_gen->level(),
   44.23 +                         true,  // Process younger gens, if any,
   44.24 +                                // as strong roots.
   44.25 +                         false, // no scope; this is parallel code
   44.26 +                         SharedHeap::SO_ScavengeCodeCache,
   44.27 +                         GenCollectedHeap::StrongAndWeakRoots,
   44.28 +                         &par_scan_state.to_space_root_closure(),
   44.29 +                         &par_scan_state.older_gen_closure(),
   44.30 +                         &cld_scan_closure);
   44.31 +
   44.32    par_scan_state.end_strong_roots();
   44.33  
   44.34    // "evacuate followers".
    45.1 --- a/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp	Tue Jul 01 09:03:55 2014 +0200
    45.2 +++ b/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp	Mon Jul 07 10:12:40 2014 +0200
    45.3 @@ -69,7 +69,7 @@
    45.4    ParScanWithoutBarrierClosure         _to_space_closure; // scan_without_gc_barrier
    45.5    ParScanWithBarrierClosure            _old_gen_closure; // scan_with_gc_barrier
    45.6    ParRootScanWithoutBarrierClosure     _to_space_root_closure; // scan_root_without_gc_barrier
    45.7 -  // One of these two will be passed to process_strong_roots, which will
    45.8 +  // One of these two will be passed to process_roots, which will
    45.9    // set its generation.  The first is for two-gen configs where the
   45.10    // old gen collects the perm gen; the second is for arbitrary configs.
   45.11    // The second isn't used right now (it used to be used for the train, an
    46.1 --- a/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp	Tue Jul 01 09:03:55 2014 +0200
    46.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp	Mon Jul 07 10:12:40 2014 +0200
    46.3 @@ -59,7 +59,7 @@
    46.4  
    46.5    PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
    46.6    CLDToOopClosure mark_and_push_from_clds(&mark_and_push_closure, true);
    46.7 -  CodeBlobToOopClosure mark_and_push_in_blobs(&mark_and_push_closure, /*do_marking=*/ true);
    46.8 +  MarkingCodeBlobClosure mark_and_push_in_blobs(&mark_and_push_closure, !CodeBlobToOopClosure::FixRelocations);
    46.9  
   46.10    if (_java_thread != NULL)
   46.11      _java_thread->oops_do(
   46.12 @@ -100,7 +100,7 @@
   46.13      case threads:
   46.14      {
   46.15        ResourceMark rm;
   46.16 -      CodeBlobToOopClosure each_active_code_blob(&mark_and_push_closure, /*do_marking=*/ true);
   46.17 +      MarkingCodeBlobClosure each_active_code_blob(&mark_and_push_closure, !CodeBlobToOopClosure::FixRelocations);
   46.18        CLDToOopClosure mark_and_push_from_cld(&mark_and_push_closure);
   46.19        Threads::oops_do(&mark_and_push_closure, &mark_and_push_from_cld, &each_active_code_blob);
   46.20      }
    47.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Tue Jul 01 09:03:55 2014 +0200
    47.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Mon Jul 07 10:12:40 2014 +0200
    47.3 @@ -528,14 +528,14 @@
    47.4      Universe::oops_do(mark_and_push_closure());
    47.5      JNIHandles::oops_do(mark_and_push_closure());   // Global (strong) JNI handles
    47.6      CLDToOopClosure mark_and_push_from_cld(mark_and_push_closure());
    47.7 -    CodeBlobToOopClosure each_active_code_blob(mark_and_push_closure(), /*do_marking=*/ true);
    47.8 +    MarkingCodeBlobClosure each_active_code_blob(mark_and_push_closure(), !CodeBlobToOopClosure::FixRelocations);
    47.9      Threads::oops_do(mark_and_push_closure(), &mark_and_push_from_cld, &each_active_code_blob);
   47.10      ObjectSynchronizer::oops_do(mark_and_push_closure());
   47.11      FlatProfiler::oops_do(mark_and_push_closure());
   47.12      Management::oops_do(mark_and_push_closure());
   47.13      JvmtiExport::oops_do(mark_and_push_closure());
   47.14      SystemDictionary::always_strong_oops_do(mark_and_push_closure());
   47.15 -    ClassLoaderDataGraph::always_strong_oops_do(mark_and_push_closure(), follow_klass_closure(), true);
   47.16 +    ClassLoaderDataGraph::always_strong_cld_do(follow_cld_closure());
   47.17      // Do not treat nmethods as strong roots for mark/sweep, since we can unload them.
   47.18      //CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure(mark_and_push_closure()));
   47.19    }
   47.20 @@ -625,16 +625,16 @@
   47.21    FlatProfiler::oops_do(adjust_pointer_closure());
   47.22    Management::oops_do(adjust_pointer_closure());
   47.23    JvmtiExport::oops_do(adjust_pointer_closure());
   47.24 -  // SO_AllClasses
   47.25    SystemDictionary::oops_do(adjust_pointer_closure());
   47.26 -  ClassLoaderDataGraph::oops_do(adjust_pointer_closure(), adjust_klass_closure(), true);
   47.27 +  ClassLoaderDataGraph::cld_do(adjust_cld_closure());
   47.28  
   47.29    // Now adjust pointers in remaining weak roots.  (All of which should
   47.30    // have been cleared if they pointed to non-surviving objects.)
   47.31    // Global (weak) JNI handles
   47.32    JNIHandles::weak_oops_do(&always_true, adjust_pointer_closure());
   47.33  
   47.34 -  CodeCache::oops_do(adjust_pointer_closure());
   47.35 +  CodeBlobToOopClosure adjust_from_blobs(adjust_pointer_closure(), CodeBlobToOopClosure::FixRelocations);
   47.36 +  CodeCache::blobs_do(&adjust_from_blobs);
   47.37    StringTable::oops_do(adjust_pointer_closure());
   47.38    ref_processor()->weak_oops_do(adjust_pointer_closure());
   47.39    PSScavenge::reference_processor()->weak_oops_do(adjust_pointer_closure());
    48.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.hpp	Tue Jul 01 09:03:55 2014 +0200
    48.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.hpp	Mon Jul 07 10:12:40 2014 +0200
    48.3 @@ -40,11 +40,11 @@
    48.4    static CollectorCounters*  _counters;
    48.5  
    48.6    // Closure accessors
    48.7 -  static OopClosure* mark_and_push_closure() { return &MarkSweep::mark_and_push_closure; }
    48.8 -  static KlassClosure* follow_klass_closure() { return &MarkSweep::follow_klass_closure; }
    48.9 -  static VoidClosure* follow_stack_closure() { return (VoidClosure*)&MarkSweep::follow_stack_closure; }
   48.10 -  static OopClosure* adjust_pointer_closure() { return (OopClosure*)&MarkSweep::adjust_pointer_closure; }
   48.11 -  static KlassClosure* adjust_klass_closure() { return &MarkSweep::adjust_klass_closure; }
   48.12 +  static OopClosure* mark_and_push_closure()   { return &MarkSweep::mark_and_push_closure; }
   48.13 +  static VoidClosure* follow_stack_closure()   { return (VoidClosure*)&MarkSweep::follow_stack_closure; }
   48.14 +  static CLDClosure* follow_cld_closure()      { return &MarkSweep::follow_cld_closure; }
   48.15 +  static OopClosure* adjust_pointer_closure()  { return (OopClosure*)&MarkSweep::adjust_pointer_closure; }
   48.16 +  static CLDClosure* adjust_cld_closure()      { return &MarkSweep::adjust_cld_closure; }
   48.17    static BoolObjectClosure* is_alive_closure() { return (BoolObjectClosure*)&MarkSweep::is_alive; }
   48.18  
   48.19   debug_only(public:)  // Used for PSParallelCompact debugging
    49.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Tue Jul 01 09:03:55 2014 +0200
    49.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Mon Jul 07 10:12:40 2014 +0200
    49.3 @@ -2465,7 +2465,6 @@
    49.4    FlatProfiler::oops_do(adjust_pointer_closure());
    49.5    Management::oops_do(adjust_pointer_closure());
    49.6    JvmtiExport::oops_do(adjust_pointer_closure());
    49.7 -  // SO_AllClasses
    49.8    SystemDictionary::oops_do(adjust_pointer_closure());
    49.9    ClassLoaderDataGraph::oops_do(adjust_pointer_closure(), adjust_klass_closure(), true);
   49.10  
   49.11 @@ -2474,7 +2473,8 @@
   49.12    // Global (weak) JNI handles
   49.13    JNIHandles::weak_oops_do(&always_true, adjust_pointer_closure());
   49.14  
   49.15 -  CodeCache::oops_do(adjust_pointer_closure());
   49.16 +  CodeBlobToOopClosure adjust_from_blobs(adjust_pointer_closure(), CodeBlobToOopClosure::FixRelocations);
   49.17 +  CodeCache::blobs_do(&adjust_from_blobs);
   49.18    StringTable::oops_do(adjust_pointer_closure());
   49.19    ref_processor()->weak_oops_do(adjust_pointer_closure());
   49.20    // Roots were visited so references into the young gen in roots
    50.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp	Tue Jul 01 09:03:55 2014 +0200
    50.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp	Mon Jul 07 10:12:40 2014 +0200
    50.3 @@ -100,7 +100,7 @@
    50.4  
    50.5      case code_cache:
    50.6        {
    50.7 -        CodeBlobToOopClosure each_scavengable_code_blob(&roots_to_old_closure, /*do_marking=*/ true);
    50.8 +        MarkingCodeBlobClosure each_scavengable_code_blob(&roots_to_old_closure, CodeBlobToOopClosure::FixRelocations);
    50.9          CodeCache::scavenge_root_nmethods_do(&each_scavengable_code_blob);
   50.10        }
   50.11        break;
   50.12 @@ -123,7 +123,7 @@
   50.13    PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(which);
   50.14    PSScavengeRootsClosure roots_closure(pm);
   50.15    CLDClosure* roots_from_clds = NULL;  // Not needed. All CLDs are already visited.
   50.16 -  CodeBlobToOopClosure roots_in_blobs(&roots_closure, /*do_marking=*/ true);
   50.17 +  MarkingCodeBlobClosure roots_in_blobs(&roots_closure, CodeBlobToOopClosure::FixRelocations);
   50.18  
   50.19    if (_java_thread != NULL)
   50.20      _java_thread->oops_do(&roots_closure, roots_from_clds, &roots_in_blobs);
    51.1 --- a/src/share/vm/gc_implementation/shared/markSweep.cpp	Tue Jul 01 09:03:55 2014 +0200
    51.2 +++ b/src/share/vm/gc_implementation/shared/markSweep.cpp	Mon Jul 07 10:12:40 2014 +0200
    51.3 @@ -54,21 +54,14 @@
    51.4  void MarkSweep::FollowRootClosure::do_oop(narrowOop* p) { follow_root(p); }
    51.5  
    51.6  MarkSweep::MarkAndPushClosure MarkSweep::mark_and_push_closure;
    51.7 -MarkSweep::FollowKlassClosure MarkSweep::follow_klass_closure;
    51.8 -MarkSweep::AdjustKlassClosure MarkSweep::adjust_klass_closure;
    51.9 +CLDToOopClosure               MarkSweep::follow_cld_closure(&mark_and_push_closure);
   51.10 +CLDToOopClosure               MarkSweep::adjust_cld_closure(&adjust_pointer_closure);
   51.11  
   51.12  void MarkSweep::MarkAndPushClosure::do_oop(oop* p)       { mark_and_push(p); }
   51.13  void MarkSweep::MarkAndPushClosure::do_oop(narrowOop* p) { mark_and_push(p); }
   51.14  
   51.15 -void MarkSweep::FollowKlassClosure::do_klass(Klass* klass) {
   51.16 -  klass->oops_do(&MarkSweep::mark_and_push_closure);
   51.17 -}
   51.18 -void MarkSweep::AdjustKlassClosure::do_klass(Klass* klass) {
   51.19 -  klass->oops_do(&MarkSweep::adjust_pointer_closure);
   51.20 -}
   51.21 -
   51.22  void MarkSweep::follow_class_loader(ClassLoaderData* cld) {
   51.23 -  cld->oops_do(&MarkSweep::mark_and_push_closure, &MarkSweep::follow_klass_closure, true);
   51.24 +  MarkSweep::follow_cld_closure.do_cld(cld);
   51.25  }
   51.26  
   51.27  void MarkSweep::follow_stack() {
    52.1 --- a/src/share/vm/gc_implementation/shared/markSweep.hpp	Tue Jul 01 09:03:55 2014 +0200
    52.2 +++ b/src/share/vm/gc_implementation/shared/markSweep.hpp	Mon Jul 07 10:12:40 2014 +0200
    52.3 @@ -65,17 +65,6 @@
    52.4      virtual void do_oop(narrowOop* p);
    52.5    };
    52.6  
    52.7 -  // The one and only place to start following the classes.
    52.8 -  // Should only be applied to the ClassLoaderData klasses list.
    52.9 -  class FollowKlassClosure : public KlassClosure {
   52.10 -   public:
   52.11 -    void do_klass(Klass* klass);
   52.12 -  };
   52.13 -  class AdjustKlassClosure : public KlassClosure {
   52.14 -   public:
   52.15 -    void do_klass(Klass* klass);
   52.16 -  };
   52.17 -
   52.18    class FollowStackClosure: public VoidClosure {
   52.19     public:
   52.20      virtual void do_void();
   52.21 @@ -144,10 +133,10 @@
   52.22    static IsAliveClosure       is_alive;
   52.23    static FollowRootClosure    follow_root_closure;
   52.24    static MarkAndPushClosure   mark_and_push_closure;
   52.25 -  static FollowKlassClosure   follow_klass_closure;
   52.26    static FollowStackClosure   follow_stack_closure;
   52.27 +  static CLDToOopClosure      follow_cld_closure;
   52.28    static AdjustPointerClosure adjust_pointer_closure;
   52.29 -  static AdjustKlassClosure   adjust_klass_closure;
   52.30 +  static CLDToOopClosure      adjust_cld_closure;
   52.31  
   52.32    // Accessors
   52.33    static uint total_invocations() { return _total_invocations; }
    53.1 --- a/src/share/vm/gc_implementation/shared/vmGCOperations.cpp	Tue Jul 01 09:03:55 2014 +0200
    53.2 +++ b/src/share/vm/gc_implementation/shared/vmGCOperations.cpp	Mon Jul 07 10:12:40 2014 +0200
    53.3 @@ -209,6 +209,43 @@
    53.4    gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_level);
    53.5  }
    53.6  
    53.7 +bool VM_CollectForMetadataAllocation::initiate_concurrent_GC() {
    53.8 +#if INCLUDE_ALL_GCS
    53.9 +  if (UseConcMarkSweepGC || UseG1GC) {
   53.10 +    if (UseConcMarkSweepGC && CMSClassUnloadingEnabled) {
   53.11 +      MetaspaceGC::set_should_concurrent_collect(true);
   53.12 +    } else if (UseG1GC) {
   53.13 +      G1CollectedHeap* g1h = G1CollectedHeap::heap();
   53.14 +      g1h->g1_policy()->set_initiate_conc_mark_if_possible();
   53.15 +
   53.16 +      GCCauseSetter x(g1h, _gc_cause);
   53.17 +
   53.18 +      // At this point we are supposed to start a concurrent cycle. We
   53.19 +      // will do so if one is not already in progress.
   53.20 +      bool should_start = g1h->g1_policy()->force_initial_mark_if_outside_cycle(_gc_cause);
   53.21 +
   53.22 +      if (should_start) {
   53.23 +        double pause_target = g1h->g1_policy()->max_pause_time_ms();
   53.24 +        g1h->do_collection_pause_at_safepoint(pause_target);
   53.25 +      }
   53.26 +    }
   53.27 +
   53.28 +    return true;
   53.29 +  }
   53.30 +#endif
   53.31 +  return false;
   53.32 +}
   53.33 +
   53.34 +static void log_metaspace_alloc_failure_for_concurrent_GC() {
   53.35 +  if (Verbose && PrintGCDetails) {
   53.36 +    if (UseConcMarkSweepGC) {
   53.37 +      gclog_or_tty->print_cr("\nCMS full GC for Metaspace");
   53.38 +    } else if (UseG1GC) {
   53.39 +      gclog_or_tty->print_cr("\nG1 full GC for Metaspace");
   53.40 +    }
   53.41 +  }
   53.42 +}
   53.43 +
   53.44  void VM_CollectForMetadataAllocation::doit() {
   53.45    SvcGCMarker sgcm(SvcGCMarker::FULL);
   53.46  
   53.47 @@ -220,54 +257,57 @@
   53.48    // a GC that freed space for the allocation.
   53.49    if (!MetadataAllocationFailALot) {
   53.50      _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
   53.51 -  }
   53.52 -
   53.53 -  if (_result == NULL) {
   53.54 -    if (UseConcMarkSweepGC) {
   53.55 -      if (CMSClassUnloadingEnabled) {
   53.56 -        MetaspaceGC::set_should_concurrent_collect(true);
   53.57 -      }
   53.58 -      // For CMS expand since the collection is going to be concurrent.
   53.59 -      _result =
   53.60 -        _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype);
   53.61 -    }
   53.62 -    if (_result == NULL) {
   53.63 -      // Don't clear the soft refs yet.
   53.64 -      if (Verbose && PrintGCDetails && UseConcMarkSweepGC) {
   53.65 -        gclog_or_tty->print_cr("\nCMS full GC for Metaspace");
   53.66 -      }
   53.67 -      heap->collect_as_vm_thread(GCCause::_metadata_GC_threshold);
   53.68 -      // After a GC try to allocate without expanding.  Could fail
   53.69 -      // and expansion will be tried below.
   53.70 -      _result =
   53.71 -        _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
   53.72 -    }
   53.73 -    if (_result == NULL) {
   53.74 -      // If still failing, allow the Metaspace to expand.
   53.75 -      // See delta_capacity_until_GC() for explanation of the
   53.76 -      // amount of the expansion.
   53.77 -      // This should work unless there really is no more space
   53.78 -      // or a MaxMetaspaceSize has been specified on the command line.
   53.79 -      _result =
   53.80 -        _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype);
   53.81 -      if (_result == NULL) {
   53.82 -        // If expansion failed, do a last-ditch collection and try allocating
   53.83 -        // again.  A last-ditch collection will clear softrefs.  This
   53.84 -        // behavior is similar to the last-ditch collection done for perm
   53.85 -        // gen when it was full and a collection for failed allocation
   53.86 -        // did not free perm gen space.
   53.87 -        heap->collect_as_vm_thread(GCCause::_last_ditch_collection);
   53.88 -        _result =
   53.89 -          _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
   53.90 -      }
   53.91 -    }
   53.92 -    if (Verbose && PrintGCDetails && _result == NULL) {
   53.93 -      gclog_or_tty->print_cr("\nAfter Metaspace GC failed to allocate size "
   53.94 -                             SIZE_FORMAT, _size);
   53.95 +    if (_result != NULL) {
   53.96 +      return;
   53.97      }
   53.98    }
   53.99  
  53.100 -  if (_result == NULL && GC_locker::is_active_and_needs_gc()) {
  53.101 +  if (initiate_concurrent_GC()) {
  53.102 +    // For CMS and G1 expand since the collection is going to be concurrent.
  53.103 +    _result = _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype);
  53.104 +    if (_result != NULL) {
  53.105 +      return;
  53.106 +    }
  53.107 +
  53.108 +    log_metaspace_alloc_failure_for_concurrent_GC();
  53.109 +  }
  53.110 +
  53.111 +  // Don't clear the soft refs yet.
  53.112 +  heap->collect_as_vm_thread(GCCause::_metadata_GC_threshold);
  53.113 +  // After a GC try to allocate without expanding.  Could fail
  53.114 +  // and expansion will be tried below.
  53.115 +  _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
  53.116 +  if (_result != NULL) {
  53.117 +    return;
  53.118 +  }
  53.119 +
  53.120 +  // If still failing, allow the Metaspace to expand.
  53.121 +  // See delta_capacity_until_GC() for explanation of the
  53.122 +  // amount of the expansion.
  53.123 +  // This should work unless there really is no more space
  53.124 +  // or a MaxMetaspaceSize has been specified on the command line.
  53.125 +  _result = _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype);
  53.126 +  if (_result != NULL) {
  53.127 +    return;
  53.128 +  }
  53.129 +
  53.130 +  // If expansion failed, do a last-ditch collection and try allocating
  53.131 +  // again.  A last-ditch collection will clear softrefs.  This
  53.132 +  // behavior is similar to the last-ditch collection done for perm
  53.133 +  // gen when it was full and a collection for failed allocation
  53.134 +  // did not free perm gen space.
  53.135 +  heap->collect_as_vm_thread(GCCause::_last_ditch_collection);
  53.136 +  _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
  53.137 +  if (_result != NULL) {
  53.138 +    return;
  53.139 +  }
  53.140 +
  53.141 +  if (Verbose && PrintGCDetails) {
  53.142 +    gclog_or_tty->print_cr("\nAfter Metaspace GC failed to allocate size "
  53.143 +                           SIZE_FORMAT, _size);
  53.144 +  }
  53.145 +
  53.146 +  if (GC_locker::is_active_and_needs_gc()) {
  53.147      set_gc_locked();
  53.148    }
  53.149  }
    54.1 --- a/src/share/vm/gc_implementation/shared/vmGCOperations.hpp	Tue Jul 01 09:03:55 2014 +0200
    54.2 +++ b/src/share/vm/gc_implementation/shared/vmGCOperations.hpp	Mon Jul 07 10:12:40 2014 +0200
    54.3 @@ -217,6 +217,8 @@
    54.4    virtual VMOp_Type type() const { return VMOp_CollectForMetadataAllocation; }
    54.5    virtual void doit();
    54.6    MetaWord* result() const       { return _result; }
    54.7 +
    54.8 +  bool initiate_concurrent_GC();
    54.9  };
   54.10  
   54.11  class SvcGCMarker : public StackObj {
    55.1 --- a/src/share/vm/memory/cardTableModRefBS.cpp	Tue Jul 01 09:03:55 2014 +0200
    55.2 +++ b/src/share/vm/memory/cardTableModRefBS.cpp	Mon Jul 07 10:12:40 2014 +0200
    55.3 @@ -429,7 +429,7 @@
    55.4                                                                   OopsInGenClosure* cl,
    55.5                                                                   CardTableRS* ct) {
    55.6    if (!mr.is_empty()) {
    55.7 -    // Caller (process_strong_roots()) claims that all GC threads
    55.8 +    // Caller (process_roots()) claims that all GC threads
    55.9      // execute this call.  With UseDynamicNumberOfGCThreads now all
   55.10      // active GC threads execute this call.  The number of active GC
   55.11      // threads needs to be passed to par_non_clean_card_iterate_work()
   55.12 @@ -438,7 +438,7 @@
   55.13      // This is an example of where n_par_threads() is used instead
   55.14      // of workers()->active_workers().  n_par_threads can be set to 0 to
   55.15      // turn off parallelism.  For example when this code is called as
   55.16 -    // part of verification and SharedHeap::process_strong_roots() is being
   55.17 +    // part of verification and SharedHeap::process_roots() is being
   55.18      // used, then n_par_threads() may have been set to 0.  active_workers
   55.19      // is not overloaded with the meaning that it is a switch to disable
   55.20      // parallelism and so keeps the meaning of the number of
    56.1 --- a/src/share/vm/memory/defNewGeneration.cpp	Tue Jul 01 09:03:55 2014 +0200
    56.2 +++ b/src/share/vm/memory/defNewGeneration.cpp	Mon Jul 07 10:12:40 2014 +0200
    56.3 @@ -613,6 +613,9 @@
    56.4  
    56.5    KlassScanClosure klass_scan_closure(&fsc_with_no_gc_barrier,
    56.6                                        gch->rem_set()->klass_rem_set());
    56.7 +  CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure,
    56.8 +                                           &fsc_with_no_gc_barrier,
    56.9 +                                           false);
   56.10  
   56.11    set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier);
   56.12    FastEvacuateFollowersClosure evacuate_followers(gch, _level, this,
   56.13 @@ -622,16 +625,15 @@
   56.14    assert(gch->no_allocs_since_save_marks(0),
   56.15           "save marks have not been newly set.");
   56.16  
   56.17 -  int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_ScavengeCodeCache;
   56.18 -
   56.19 -  gch->gen_process_strong_roots(_level,
   56.20 -                                true,  // Process younger gens, if any,
   56.21 -                                       // as strong roots.
   56.22 -                                true,  // activate StrongRootsScope
   56.23 -                                SharedHeap::ScanningOption(so),
   56.24 -                                &fsc_with_no_gc_barrier,
   56.25 -                                &fsc_with_gc_barrier,
   56.26 -                                &klass_scan_closure);
   56.27 +  gch->gen_process_roots(_level,
   56.28 +                         true,  // Process younger gens, if any,
   56.29 +                                // as strong roots.
   56.30 +                         true,  // activate StrongRootsScope
   56.31 +                         SharedHeap::SO_ScavengeCodeCache,
   56.32 +                         GenCollectedHeap::StrongAndWeakRoots,
   56.33 +                         &fsc_with_no_gc_barrier,
   56.34 +                         &fsc_with_gc_barrier,
   56.35 +                         &cld_scan_closure);
   56.36  
   56.37    // "evacuate followers".
   56.38    evacuate_followers.do_void();
    57.1 --- a/src/share/vm/memory/genCollectedHeap.cpp	Tue Jul 01 09:03:55 2014 +0200
    57.2 +++ b/src/share/vm/memory/genCollectedHeap.cpp	Mon Jul 07 10:12:40 2014 +0200
    57.3 @@ -61,8 +61,8 @@
    57.4  GenCollectedHeap* GenCollectedHeap::_gch;
    57.5  NOT_PRODUCT(size_t GenCollectedHeap::_skip_header_HeapWords = 0;)
    57.6  
    57.7 -// The set of potentially parallel tasks in strong root scanning.
    57.8 -enum GCH_process_strong_roots_tasks {
    57.9 +// The set of potentially parallel tasks in root scanning.
   57.10 +enum GCH_strong_roots_tasks {
   57.11    // We probably want to parallelize both of these internally, but for now...
   57.12    GCH_PS_younger_gens,
   57.13    // Leave this one last.
   57.14 @@ -72,11 +72,11 @@
   57.15  GenCollectedHeap::GenCollectedHeap(GenCollectorPolicy *policy) :
   57.16    SharedHeap(policy),
   57.17    _gen_policy(policy),
   57.18 -  _gen_process_strong_tasks(new SubTasksDone(GCH_PS_NumElements)),
   57.19 +  _gen_process_roots_tasks(new SubTasksDone(GCH_PS_NumElements)),
   57.20    _full_collections_completed(0)
   57.21  {
   57.22 -  if (_gen_process_strong_tasks == NULL ||
   57.23 -      !_gen_process_strong_tasks->valid()) {
   57.24 +  if (_gen_process_roots_tasks == NULL ||
   57.25 +      !_gen_process_roots_tasks->valid()) {
   57.26      vm_exit_during_initialization("Failed necessary allocation.");
   57.27    }
   57.28    assert(policy != NULL, "Sanity check");
   57.29 @@ -590,24 +590,29 @@
   57.30  
   57.31  void GenCollectedHeap::set_par_threads(uint t) {
   57.32    SharedHeap::set_par_threads(t);
   57.33 -  _gen_process_strong_tasks->set_n_threads(t);
   57.34 +  _gen_process_roots_tasks->set_n_threads(t);
   57.35  }
   57.36  
   57.37  void GenCollectedHeap::
   57.38 -gen_process_strong_roots(int level,
   57.39 -                         bool younger_gens_as_roots,
   57.40 -                         bool activate_scope,
   57.41 -                         SharedHeap::ScanningOption so,
   57.42 -                         OopsInGenClosure* not_older_gens,
   57.43 -                         OopsInGenClosure* older_gens,
   57.44 -                         KlassClosure* klass_closure) {
   57.45 -  // General strong roots.
   57.46 +gen_process_roots(int level,
   57.47 +                  bool younger_gens_as_roots,
   57.48 +                  bool activate_scope,
   57.49 +                  SharedHeap::ScanningOption so,
   57.50 +                  OopsInGenClosure* not_older_gens,
   57.51 +                  OopsInGenClosure* weak_roots,
   57.52 +                  OopsInGenClosure* older_gens,
   57.53 +                  CLDClosure* cld_closure,
   57.54 +                  CLDClosure* weak_cld_closure,
   57.55 +                  CodeBlobClosure* code_closure) {
   57.56  
   57.57 -  SharedHeap::process_strong_roots(activate_scope, so,
   57.58 -                                   not_older_gens, klass_closure);
   57.59 +  // General roots.
   57.60 +  SharedHeap::process_roots(activate_scope, so,
   57.61 +                            not_older_gens, weak_roots,
   57.62 +                            cld_closure, weak_cld_closure,
   57.63 +                            code_closure);
   57.64  
   57.65    if (younger_gens_as_roots) {
   57.66 -    if (!_gen_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) {
   57.67 +    if (!_gen_process_roots_tasks->is_task_claimed(GCH_PS_younger_gens)) {
   57.68        for (int i = 0; i < level; i++) {
   57.69          not_older_gens->set_generation(_gens[i]);
   57.70          _gens[i]->oop_iterate(not_older_gens);
   57.71 @@ -623,7 +628,38 @@
   57.72      older_gens->reset_generation();
   57.73    }
   57.74  
   57.75 -  _gen_process_strong_tasks->all_tasks_completed();
   57.76 +  _gen_process_roots_tasks->all_tasks_completed();
   57.77 +}
   57.78 +
   57.79 +void GenCollectedHeap::
   57.80 +gen_process_roots(int level,
   57.81 +                  bool younger_gens_as_roots,
   57.82 +                  bool activate_scope,
   57.83 +                  SharedHeap::ScanningOption so,
   57.84 +                  bool only_strong_roots,
   57.85 +                  OopsInGenClosure* not_older_gens,
   57.86 +                  OopsInGenClosure* older_gens,
   57.87 +                  CLDClosure* cld_closure) {
   57.88 +
   57.89 +  const bool is_adjust_phase = !only_strong_roots && !younger_gens_as_roots;
   57.90 +
   57.91 +  bool is_moving_collection = false;
   57.92 +  if (level == 0 || is_adjust_phase) {
   57.93 +    // young collections are always moving
   57.94 +    is_moving_collection = true;
   57.95 +  }
   57.96 +
   57.97 +  MarkingCodeBlobClosure mark_code_closure(not_older_gens, is_moving_collection);
   57.98 +  CodeBlobClosure* code_closure = &mark_code_closure;
   57.99 +
  57.100 +  gen_process_roots(level,
  57.101 +                    younger_gens_as_roots,
  57.102 +                    activate_scope, so,
  57.103 +                    not_older_gens, only_strong_roots ? NULL : not_older_gens,
  57.104 +                    older_gens,
  57.105 +                    cld_closure, only_strong_roots ? NULL : cld_closure,
  57.106 +                    code_closure);
  57.107 +
  57.108  }
  57.109  
  57.110  void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure) {
    58.1 --- a/src/share/vm/memory/genCollectedHeap.hpp	Tue Jul 01 09:03:55 2014 +0200
    58.2 +++ b/src/share/vm/memory/genCollectedHeap.hpp	Mon Jul 07 10:12:40 2014 +0200
    58.3 @@ -78,9 +78,9 @@
    58.4    unsigned int _full_collections_completed;
    58.5  
    58.6    // Data structure for claiming the (potentially) parallel tasks in
    58.7 -  // (gen-specific) strong roots processing.
    58.8 -  SubTasksDone* _gen_process_strong_tasks;
    58.9 -  SubTasksDone* gen_process_strong_tasks() { return _gen_process_strong_tasks; }
   58.10 +  // (gen-specific) roots processing.
   58.11 +  SubTasksDone* _gen_process_roots_tasks;
   58.12 +  SubTasksDone* gen_process_roots_tasks() { return _gen_process_roots_tasks; }
   58.13  
   58.14    // In block contents verification, the number of header words to skip
   58.15    NOT_PRODUCT(static size_t _skip_header_HeapWords;)
   58.16 @@ -411,18 +411,30 @@
   58.17    // The "so" argument determines which of the roots
   58.18    // the closure is applied to:
   58.19    // "SO_None" does none;
   58.20 -  // "SO_AllClasses" applies the closure to all entries in the SystemDictionary;
   58.21 -  // "SO_SystemClasses" to all the "system" classes and loaders;
   58.22 -  // "SO_Strings" applies the closure to all entries in the StringTable.
   58.23 -  void gen_process_strong_roots(int level,
   58.24 -                                bool younger_gens_as_roots,
   58.25 -                                // The remaining arguments are in an order
   58.26 -                                // consistent with SharedHeap::process_strong_roots:
   58.27 -                                bool activate_scope,
   58.28 -                                SharedHeap::ScanningOption so,
   58.29 -                                OopsInGenClosure* not_older_gens,
   58.30 -                                OopsInGenClosure* older_gens,
   58.31 -                                KlassClosure* klass_closure);
   58.32 + private:
   58.33 +  void gen_process_roots(int level,
   58.34 +                         bool younger_gens_as_roots,
   58.35 +                         bool activate_scope,
   58.36 +                         SharedHeap::ScanningOption so,
   58.37 +                         OopsInGenClosure* not_older_gens,
   58.38 +                         OopsInGenClosure* weak_roots,
   58.39 +                         OopsInGenClosure* older_gens,
   58.40 +                         CLDClosure* cld_closure,
   58.41 +                         CLDClosure* weak_cld_closure,
   58.42 +                         CodeBlobClosure* code_closure);
   58.43 +
   58.44 + public:
   58.45 +  static const bool StrongAndWeakRoots = false;
   58.46 +  static const bool StrongRootsOnly    = true;
   58.47 +
   58.48 +  void gen_process_roots(int level,
   58.49 +                         bool younger_gens_as_roots,
   58.50 +                         bool activate_scope,
   58.51 +                         SharedHeap::ScanningOption so,
   58.52 +                         bool only_strong_roots,
   58.53 +                         OopsInGenClosure* not_older_gens,
   58.54 +                         OopsInGenClosure* older_gens,
   58.55 +                         CLDClosure* cld_closure);
   58.56  
   58.57    // Apply "root_closure" to all the weak roots of the system.
   58.58    // These include JNI weak roots, string table,
    59.1 --- a/src/share/vm/memory/genMarkSweep.cpp	Tue Jul 01 09:03:55 2014 +0200
    59.2 +++ b/src/share/vm/memory/genMarkSweep.cpp	Mon Jul 07 10:12:40 2014 +0200
    59.3 @@ -207,13 +207,14 @@
    59.4    // Need new claim bits before marking starts.
    59.5    ClassLoaderDataGraph::clear_claimed_marks();
    59.6  
    59.7 -  gch->gen_process_strong_roots(level,
    59.8 -                                false, // Younger gens are not roots.
    59.9 -                                true,  // activate StrongRootsScope
   59.10 -                                SharedHeap::SO_SystemClasses,
   59.11 -                                &follow_root_closure,
   59.12 -                                &follow_root_closure,
   59.13 -                                &follow_klass_closure);
   59.14 +  gch->gen_process_roots(level,
   59.15 +                         false, // Younger gens are not roots.
   59.16 +                         true,  // activate StrongRootsScope
   59.17 +                         SharedHeap::SO_None,
   59.18 +                         GenCollectedHeap::StrongRootsOnly,
   59.19 +                         &follow_root_closure,
   59.20 +                         &follow_root_closure,
   59.21 +                         &follow_cld_closure);
   59.22  
   59.23    // Process reference objects found during marking
   59.24    {
   59.25 @@ -291,13 +292,14 @@
   59.26    // are run.
   59.27    adjust_pointer_closure.set_orig_generation(gch->get_gen(level));
   59.28  
   59.29 -  gch->gen_process_strong_roots(level,
   59.30 -                                false, // Younger gens are not roots.
   59.31 -                                true,  // activate StrongRootsScope
   59.32 -                                SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_AllCodeCache,
   59.33 -                                &adjust_pointer_closure,
   59.34 -                                &adjust_pointer_closure,
   59.35 -                                &adjust_klass_closure);
   59.36 +  gch->gen_process_roots(level,
   59.37 +                         false, // Younger gens are not roots.
   59.38 +                         true,  // activate StrongRootsScope
   59.39 +                         SharedHeap::SO_AllCodeCache,
   59.40 +                         GenCollectedHeap::StrongAndWeakRoots,
   59.41 +                         &adjust_pointer_closure,
   59.42 +                         &adjust_pointer_closure,
   59.43 +                         &adjust_cld_closure);
   59.44  
   59.45    gch->gen_process_weak_roots(&adjust_pointer_closure);
   59.46  
    60.1 --- a/src/share/vm/memory/iterator.cpp	Tue Jul 01 09:03:55 2014 +0200
    60.2 +++ b/src/share/vm/memory/iterator.cpp	Mon Jul 07 10:12:40 2014 +0200
    60.3 @@ -35,6 +35,10 @@
    60.4    cld->oops_do(_oop_closure, &_klass_closure, _must_claim_cld);
    60.5  }
    60.6  
    60.7 +void CLDToKlassAndOopClosure::do_cld(ClassLoaderData* cld) {
    60.8 +  cld->oops_do(_oop_closure, _klass_closure, _must_claim_cld);
    60.9 +}
   60.10 +
   60.11  void ObjectToOopClosure::do_object(oop obj) {
   60.12    obj->oop_iterate(_cl);
   60.13  }
   60.14 @@ -43,6 +47,20 @@
   60.15    ShouldNotCallThis();
   60.16  }
   60.17  
   60.18 +void CodeBlobToOopClosure::do_nmethod(nmethod* nm) {
   60.19 +  nm->oops_do(_cl);
   60.20 +  if (_fix_relocations) {
   60.21 +    nm->fix_oop_relocations();
   60.22 +  }
   60.23 +}
   60.24 +
   60.25 +void CodeBlobToOopClosure::do_code_blob(CodeBlob* cb) {
   60.26 +  nmethod* nm = cb->as_nmethod_or_null();
   60.27 +  if (nm != NULL) {
   60.28 +    do_nmethod(nm);
   60.29 +  }
   60.30 +}
   60.31 +
   60.32  MarkingCodeBlobClosure::MarkScope::MarkScope(bool activate)
   60.33    : _active(activate)
   60.34  {
   60.35 @@ -55,32 +73,7 @@
   60.36  
   60.37  void MarkingCodeBlobClosure::do_code_blob(CodeBlob* cb) {
   60.38    nmethod* nm = cb->as_nmethod_or_null();
   60.39 -  if (nm == NULL)  return;
   60.40 -  if (!nm->test_set_oops_do_mark()) {
   60.41 -    NOT_PRODUCT(if (TraceScavenge)  nm->print_on(tty, "oops_do, 1st visit\n"));
   60.42 -    do_newly_marked_nmethod(nm);
   60.43 -  } else {
   60.44 -    NOT_PRODUCT(if (TraceScavenge)  nm->print_on(tty, "oops_do, skipped on 2nd visit\n"));
   60.45 +  if (nm != NULL && !nm->test_set_oops_do_mark()) {
   60.46 +    do_nmethod(nm);
   60.47    }
   60.48  }
   60.49 -
   60.50 -void CodeBlobToOopClosure::do_newly_marked_nmethod(nmethod* nm) {
   60.51 -  nm->oops_do(_cl, /*allow_zombie=*/ false);
   60.52 -}
   60.53 -
   60.54 -void CodeBlobToOopClosure::do_code_blob(CodeBlob* cb) {
   60.55 -  if (!_do_marking) {
   60.56 -    nmethod* nm = cb->as_nmethod_or_null();
   60.57 -    NOT_PRODUCT(if (TraceScavenge && Verbose && nm != NULL)  nm->print_on(tty, "oops_do, unmarked visit\n"));
   60.58 -    // This assert won't work, since there are lots of mini-passes
   60.59 -    // (mostly in debug mode) that co-exist with marking phases.
   60.60 -    //assert(!(cb->is_nmethod() && ((nmethod*)cb)->test_oops_do_mark()), "found marked nmethod during mark-free phase");
   60.61 -    if (nm != NULL) {
   60.62 -      nm->oops_do(_cl);
   60.63 -    }
   60.64 -  } else {
   60.65 -    MarkingCodeBlobClosure::do_code_blob(cb);
   60.66 -  }
   60.67 -}
   60.68 -
   60.69 -
    61.1 --- a/src/share/vm/memory/iterator.hpp	Tue Jul 01 09:03:55 2014 +0200
    61.2 +++ b/src/share/vm/memory/iterator.hpp	Mon Jul 07 10:12:40 2014 +0200
    61.3 @@ -84,8 +84,8 @@
    61.4    //
    61.5    // Providing default implementations of the _nv functions unfortunately
    61.6    // removes the compile-time safeness, but reduces the clutter for the
    61.7 -  // ExtendedOopClosures that don't need to walk the metadata. Currently,
    61.8 -  // only CMS needs these.
    61.9 +  // ExtendedOopClosures that don't need to walk the metadata.
   61.10 +  // Currently, only CMS and G1 need these.
   61.11  
   61.12    virtual bool do_metadata() { return do_metadata_nv(); }
   61.13    bool do_metadata_v()       { return do_metadata(); }
   61.14 @@ -145,15 +145,16 @@
   61.15      _oop_closure = oop_closure;
   61.16    }
   61.17  
   61.18 -public:
   61.19 + public:
   61.20    KlassToOopClosure(OopClosure* oop_closure = NULL) : _oop_closure(oop_closure) {}
   61.21 +
   61.22    virtual void do_klass(Klass* k);
   61.23  };
   61.24  
   61.25  class CLDToOopClosure : public CLDClosure {
   61.26 -  OopClosure* _oop_closure;
   61.27 +  OopClosure*       _oop_closure;
   61.28    KlassToOopClosure _klass_closure;
   61.29 -  bool _must_claim_cld;
   61.30 +  bool              _must_claim_cld;
   61.31  
   61.32   public:
   61.33    CLDToOopClosure(OopClosure* oop_closure, bool must_claim_cld = true) :
   61.34 @@ -164,6 +165,23 @@
   61.35    void do_cld(ClassLoaderData* cld);
   61.36  };
   61.37  
   61.38 +class CLDToKlassAndOopClosure : public CLDClosure {
   61.39 +  friend class SharedHeap;
   61.40 +  friend class G1CollectedHeap;
   61.41 + protected:
   61.42 +  OopClosure*   _oop_closure;
   61.43 +  KlassClosure* _klass_closure;
   61.44 +  bool          _must_claim_cld;
   61.45 + public:
   61.46 +  CLDToKlassAndOopClosure(KlassClosure* klass_closure,
   61.47 +                          OopClosure* oop_closure,
   61.48 +                          bool must_claim_cld) :
   61.49 +                              _oop_closure(oop_closure),
   61.50 +                              _klass_closure(klass_closure),
   61.51 +                              _must_claim_cld(must_claim_cld) {}
   61.52 +  void do_cld(ClassLoaderData* cld);
   61.53 +};
   61.54 +
   61.55  // The base class for all concurrent marking closures,
   61.56  // that participates in class unloading.
   61.57  // It's used to proxy through the metadata to the oops defined in them.
   61.58 @@ -265,14 +283,26 @@
   61.59    virtual void do_code_blob(CodeBlob* cb) = 0;
   61.60  };
   61.61  
   61.62 +// Applies an oop closure to all ref fields in code blobs
   61.63 +// iterated over in an object iteration.
   61.64 +class CodeBlobToOopClosure : public CodeBlobClosure {
   61.65 +  OopClosure* _cl;
   61.66 +  bool _fix_relocations;
   61.67 + protected:
   61.68 +  void do_nmethod(nmethod* nm);
   61.69 + public:
   61.70 +  CodeBlobToOopClosure(OopClosure* cl, bool fix_relocations) : _cl(cl), _fix_relocations(fix_relocations) {}
   61.71 +  virtual void do_code_blob(CodeBlob* cb);
   61.72  
   61.73 -class MarkingCodeBlobClosure : public CodeBlobClosure {
   61.74 +  const static bool FixRelocations = true;
   61.75 +};
   61.76 +
   61.77 +class MarkingCodeBlobClosure : public CodeBlobToOopClosure {
   61.78   public:
   61.79 +  MarkingCodeBlobClosure(OopClosure* cl, bool fix_relocations) : CodeBlobToOopClosure(cl, fix_relocations) {}
   61.80    // Called for each code blob, but at most once per unique blob.
   61.81 -  virtual void do_newly_marked_nmethod(nmethod* nm) = 0;
   61.82  
   61.83    virtual void do_code_blob(CodeBlob* cb);
   61.84 -    // = { if (!nmethod(cb)->test_set_oops_do_mark())  do_newly_marked_nmethod(cb); }
   61.85  
   61.86    class MarkScope : public StackObj {
   61.87    protected:
   61.88 @@ -285,23 +315,6 @@
   61.89    };
   61.90  };
   61.91  
   61.92 -
   61.93 -// Applies an oop closure to all ref fields in code blobs
   61.94 -// iterated over in an object iteration.
   61.95 -class CodeBlobToOopClosure: public MarkingCodeBlobClosure {
   61.96 -  OopClosure* _cl;
   61.97 -  bool _do_marking;
   61.98 -public:
   61.99 -  virtual void do_newly_marked_nmethod(nmethod* cb);
  61.100 -    // = { cb->oops_do(_cl); }
  61.101 -  virtual void do_code_blob(CodeBlob* cb);
  61.102 -    // = { if (_do_marking)  super::do_code_blob(cb); else cb->oops_do(_cl); }
  61.103 -  CodeBlobToOopClosure(OopClosure* cl, bool do_marking)
  61.104 -    : _cl(cl), _do_marking(do_marking) {}
  61.105 -};
  61.106 -
  61.107 -
  61.108 -
  61.109  // MonitorClosure is used for iterating over monitors in the monitors cache
  61.110  
  61.111  class ObjectMonitor;
    62.1 --- a/src/share/vm/memory/metadataFactory.hpp	Tue Jul 01 09:03:55 2014 +0200
    62.2 +++ b/src/share/vm/memory/metadataFactory.hpp	Mon Jul 07 10:12:40 2014 +0200
    62.3 @@ -25,6 +25,7 @@
    62.4  #ifndef SHARE_VM_MEMORY_METADATAFACTORY_HPP
    62.5  #define SHARE_VM_MEMORY_METADATAFACTORY_HPP
    62.6  
    62.7 +#include "classfile/classLoaderData.hpp"
    62.8  #include "utilities/array.hpp"
    62.9  #include "utilities/exceptions.hpp"
   62.10  #include "utilities/globalDefinitions.hpp"
    63.1 --- a/src/share/vm/memory/sharedHeap.cpp	Tue Jul 01 09:03:55 2014 +0200
    63.2 +++ b/src/share/vm/memory/sharedHeap.cpp	Mon Jul 07 10:12:40 2014 +0200
    63.3 @@ -29,6 +29,7 @@
    63.4  #include "gc_interface/collectedHeap.inline.hpp"
    63.5  #include "memory/sharedHeap.hpp"
    63.6  #include "oops/oop.inline.hpp"
    63.7 +#include "runtime/atomic.inline.hpp"
    63.8  #include "runtime/fprofiler.hpp"
    63.9  #include "runtime/java.hpp"
   63.10  #include "services/management.hpp"
   63.11 @@ -39,8 +40,8 @@
   63.12  
   63.13  SharedHeap* SharedHeap::_sh;
   63.14  
   63.15 -// The set of potentially parallel tasks in strong root scanning.
   63.16 -enum SH_process_strong_roots_tasks {
   63.17 +// The set of potentially parallel tasks in root scanning.
   63.18 +enum SH_process_roots_tasks {
   63.19    SH_PS_Universe_oops_do,
   63.20    SH_PS_JNIHandles_oops_do,
   63.21    SH_PS_ObjectSynchronizer_oops_do,
   63.22 @@ -58,6 +59,7 @@
   63.23    CollectedHeap(),
   63.24    _collector_policy(policy_),
   63.25    _rem_set(NULL),
   63.26 +  _strong_roots_scope(NULL),
   63.27    _strong_roots_parity(0),
   63.28    _process_strong_tasks(new SubTasksDone(SH_PS_NumElements)),
   63.29    _workers(NULL)
   63.30 @@ -114,6 +116,19 @@
   63.31  static AssertNonScavengableClosure assert_is_non_scavengable_closure;
   63.32  #endif
   63.33  
   63.34 +SharedHeap::StrongRootsScope* SharedHeap::active_strong_roots_scope() const {
   63.35 +  return _strong_roots_scope;
   63.36 +}
   63.37 +void SharedHeap::register_strong_roots_scope(SharedHeap::StrongRootsScope* scope) {
   63.38 +  assert(_strong_roots_scope == NULL, "Should only have one StrongRootsScope active");
   63.39 +  assert(scope != NULL, "Illegal argument");
   63.40 +  _strong_roots_scope = scope;
   63.41 +}
   63.42 +void SharedHeap::unregister_strong_roots_scope(SharedHeap::StrongRootsScope* scope) {
   63.43 +  assert(_strong_roots_scope == scope, "Wrong scope unregistered");
   63.44 +  _strong_roots_scope = NULL;
   63.45 +}
   63.46 +
   63.47  void SharedHeap::change_strong_roots_parity() {
   63.48    // Also set the new collection parity.
   63.49    assert(_strong_roots_parity >= 0 && _strong_roots_parity <= 2,
   63.50 @@ -124,111 +139,160 @@
   63.51           "Not in range.");
   63.52  }
   63.53  
   63.54 -SharedHeap::StrongRootsScope::StrongRootsScope(SharedHeap* outer, bool activate)
   63.55 -  : MarkScope(activate)
   63.56 +SharedHeap::StrongRootsScope::StrongRootsScope(SharedHeap* heap, bool activate)
   63.57 +  : MarkScope(activate), _sh(heap), _n_workers_done_with_threads(0)
   63.58  {
   63.59    if (_active) {
   63.60 -    outer->change_strong_roots_parity();
   63.61 +    _sh->register_strong_roots_scope(this);
   63.62 +    _sh->change_strong_roots_parity();
   63.63      // Zero the claimed high water mark in the StringTable
   63.64      StringTable::clear_parallel_claimed_index();
   63.65    }
   63.66  }
   63.67  
   63.68  SharedHeap::StrongRootsScope::~StrongRootsScope() {
   63.69 -  // nothing particular
   63.70 +  if (_active) {
   63.71 +    _sh->unregister_strong_roots_scope(this);
   63.72 +  }
   63.73 +}
   63.74 +
   63.75 +Monitor* SharedHeap::StrongRootsScope::_lock = new Monitor(Mutex::leaf, "StrongRootsScope lock", false);
   63.76 +
   63.77 +void SharedHeap::StrongRootsScope::mark_worker_done_with_threads(uint n_workers) {
   63.78 +  // The Thread work barrier is only needed by G1.
   63.79 +  // No need to use the barrier if this is single-threaded code.
   63.80 +  if (UseG1GC && n_workers > 0) {
   63.81 +    uint new_value = (uint)Atomic::add(1, &_n_workers_done_with_threads);
   63.82 +    if (new_value == n_workers) {
   63.83 +      // This thread is last. Notify the others.
   63.84 +      MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);
   63.85 +      _lock->notify_all();
   63.86 +    }
   63.87 +  }
   63.88 +}
   63.89 +
   63.90 +void SharedHeap::StrongRootsScope::wait_until_all_workers_done_with_threads(uint n_workers) {
   63.91 +  // No need to use the barrier if this is single-threaded code.
   63.92 +  if (n_workers > 0 && (uint)_n_workers_done_with_threads != n_workers) {
   63.93 +    MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);
   63.94 +    while ((uint)_n_workers_done_with_threads != n_workers) {
   63.95 +      _lock->wait(Mutex::_no_safepoint_check_flag, 0, false);
   63.96 +    }
   63.97 +  }
   63.98 +}
   63.99 +
  63.100 +void SharedHeap::process_roots(bool activate_scope,
  63.101 +                               ScanningOption so,
  63.102 +                               OopClosure* strong_roots,
  63.103 +                               OopClosure* weak_roots,
  63.104 +                               CLDClosure* strong_cld_closure,
  63.105 +                               CLDClosure* weak_cld_closure,
  63.106 +                               CodeBlobClosure* code_roots) {
  63.107 +  StrongRootsScope srs(this, activate_scope);
  63.108 +
  63.109 +  // General roots.
  63.110 +  assert(_strong_roots_parity != 0, "must have called prologue code");
  63.111 +  assert(code_roots != NULL, "code root closure should always be set");
  63.112 +  // _n_termination for _process_strong_tasks should be set up stream
  63.113 +  // in a method not running in a GC worker.  Otherwise the GC worker
  63.114 +  // could be trying to change the termination condition while the task
  63.115 +  // is executing in another GC worker.
  63.116 +
  63.117 +  // Iterating over the CLDG and the Threads are done early to allow G1 to
  63.118 +  // first process the strong CLDs and nmethods and then, after a barrier,
  63.119 +  // let the thread process the weak CLDs and nmethods.
  63.120 +
  63.121 +  if (!_process_strong_tasks->is_task_claimed(SH_PS_ClassLoaderDataGraph_oops_do)) {
  63.122 +    ClassLoaderDataGraph::roots_cld_do(strong_cld_closure, weak_cld_closure);
  63.123 +  }
  63.124 +
  63.125 +  // Some CLDs contained in the thread frames should be considered strong.
  63.126 +  // Don't process them if they will be processed during the ClassLoaderDataGraph phase.
  63.127 +  CLDClosure* roots_from_clds_p = (strong_cld_closure != weak_cld_closure) ? strong_cld_closure : NULL;
  63.128 +  // Only process code roots from thread stacks if we aren't visiting the entire CodeCache anyway
  63.129 +  CodeBlobClosure* roots_from_code_p = (so & SO_AllCodeCache) ? NULL : code_roots;
  63.130 +
  63.131 +  Threads::possibly_parallel_oops_do(strong_roots, roots_from_clds_p, roots_from_code_p);
  63.132 +
  63.133 +  // This is the point where this worker thread will not find more strong CLDs/nmethods.
  63.134 +  // Report this so G1 can synchronize the strong and weak CLDs/nmethods processing.
  63.135 +  active_strong_roots_scope()->mark_worker_done_with_threads(n_par_threads());
  63.136 +
  63.137 +  if (!_process_strong_tasks->is_task_claimed(SH_PS_Universe_oops_do)) {
  63.138 +    Universe::oops_do(strong_roots);
  63.139 +  }
  63.140 +  // Global (strong) JNI handles
  63.141 +  if (!_process_strong_tasks->is_task_claimed(SH_PS_JNIHandles_oops_do))
  63.142 +    JNIHandles::oops_do(strong_roots);
  63.143 +
  63.144 +  if (!_process_strong_tasks-> is_task_claimed(SH_PS_ObjectSynchronizer_oops_do))
  63.145 +    ObjectSynchronizer::oops_do(strong_roots);
  63.146 +  if (!_process_strong_tasks->is_task_claimed(SH_PS_FlatProfiler_oops_do))
  63.147 +    FlatProfiler::oops_do(strong_roots);
  63.148 +  if (!_process_strong_tasks->is_task_claimed(SH_PS_Management_oops_do))
  63.149 +    Management::oops_do(strong_roots);
  63.150 +  if (!_process_strong_tasks->is_task_claimed(SH_PS_jvmti_oops_do))
  63.151 +    JvmtiExport::oops_do(strong_roots);
  63.152 +
  63.153 +  if (!_process_strong_tasks->is_task_claimed(SH_PS_SystemDictionary_oops_do)) {
  63.154 +    SystemDictionary::roots_oops_do(strong_roots, weak_roots);
  63.155 +  }
  63.156 +
  63.157 +  // All threads execute the following. A specific chunk of buckets
  63.158 +  // from the StringTable are the individual tasks.
  63.159 +  if (weak_roots != NULL) {
  63.160 +    if (CollectedHeap::use_parallel_gc_threads()) {
  63.161 +      StringTable::possibly_parallel_oops_do(weak_roots);
  63.162 +    } else {
  63.163 +      StringTable::oops_do(weak_roots);
  63.164 +    }
  63.165 +  }
  63.166 +
  63.167 +  if (!_process_strong_tasks->is_task_claimed(SH_PS_CodeCache_oops_do)) {
  63.168 +    if (so & SO_ScavengeCodeCache) {
  63.169 +      assert(code_roots != NULL, "must supply closure for code cache");
  63.170 +
  63.171 +      // We only visit parts of the CodeCache when scavenging.
  63.172 +      CodeCache::scavenge_root_nmethods_do(code_roots);
  63.173 +    }
  63.174 +    if (so & SO_AllCodeCache) {
  63.175 +      assert(code_roots != NULL, "must supply closure for code cache");
  63.176 +
  63.177 +      // CMSCollector uses this to do intermediate-strength collections.
  63.178 +      // We scan the entire code cache, since CodeCache::do_unloading is not called.
  63.179 +      CodeCache::blobs_do(code_roots);
  63.180 +    }
  63.181 +    // Verify that the code cache contents are not subject to
  63.182 +    // movement by a scavenging collection.
  63.183 +    DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, !CodeBlobToOopClosure::FixRelocations));
  63.184 +    DEBUG_ONLY(CodeCache::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable));
  63.185 +  }
  63.186 +
  63.187 +  _process_strong_tasks->all_tasks_completed();
  63.188 +}
  63.189 +
  63.190 +void SharedHeap::process_all_roots(bool activate_scope,
  63.191 +                                   ScanningOption so,
  63.192 +                                   OopClosure* roots,
  63.193 +                                   CLDClosure* cld_closure,
  63.194 +                                   CodeBlobClosure* code_closure) {
  63.195 +  process_roots(activate_scope, so,
  63.196 +                roots, roots,
  63.197 +                cld_closure, cld_closure,
  63.198 +                code_closure);
  63.199  }
  63.200  
  63.201  void SharedHeap::process_strong_roots(bool activate_scope,
  63.202                                        ScanningOption so,
  63.203                                        OopClosure* roots,
  63.204 -                                      KlassClosure* klass_closure) {
  63.205 -  StrongRootsScope srs(this, activate_scope);
  63.206 +                                      CLDClosure* cld_closure,
  63.207 +                                      CodeBlobClosure* code_closure) {
  63.208 +  process_roots(activate_scope, so,
  63.209 +                roots, NULL,
  63.210 +                cld_closure, NULL,
  63.211 +                code_closure);
  63.212 +}
  63.213  
  63.214 -  // General strong roots.
  63.215 -  assert(_strong_roots_parity != 0, "must have called prologue code");
  63.216 -  // _n_termination for _process_strong_tasks should be set up stream
  63.217 -  // in a method not running in a GC worker.  Otherwise the GC worker
  63.218 -  // could be trying to change the termination condition while the task
  63.219 -  // is executing in another GC worker.
  63.220 -  if (!_process_strong_tasks->is_task_claimed(SH_PS_Universe_oops_do)) {
  63.221 -    Universe::oops_do(roots);
  63.222 -  }
  63.223 -  // Global (strong) JNI handles
  63.224 -  if (!_process_strong_tasks->is_task_claimed(SH_PS_JNIHandles_oops_do))
  63.225 -    JNIHandles::oops_do(roots);
  63.226 -
  63.227 -  CodeBlobToOopClosure code_roots(roots, true);
  63.228 -
  63.229 -  CLDToOopClosure roots_from_clds(roots);
  63.230 -  // If we limit class scanning to SO_SystemClasses we need to apply a CLD closure to
  63.231 -  // CLDs which are strongly reachable from the thread stacks.
  63.232 -  CLDToOopClosure* roots_from_clds_p = ((so & SO_SystemClasses) ? &roots_from_clds : NULL);
  63.233 -  // All threads execute this; the individual threads are task groups.
  63.234 -  if (CollectedHeap::use_parallel_gc_threads()) {
  63.235 -    Threads::possibly_parallel_oops_do(roots, roots_from_clds_p, &code_roots);
  63.236 -  } else {
  63.237 -    Threads::oops_do(roots, roots_from_clds_p, &code_roots);
  63.238 -  }
  63.239 -
  63.240 -  if (!_process_strong_tasks-> is_task_claimed(SH_PS_ObjectSynchronizer_oops_do))
  63.241 -    ObjectSynchronizer::oops_do(roots);
  63.242 -  if (!_process_strong_tasks->is_task_claimed(SH_PS_FlatProfiler_oops_do))
  63.243 -    FlatProfiler::oops_do(roots);
  63.244 -  if (!_process_strong_tasks->is_task_claimed(SH_PS_Management_oops_do))
  63.245 -    Management::oops_do(roots);
  63.246 -  if (!_process_strong_tasks->is_task_claimed(SH_PS_jvmti_oops_do))
  63.247 -    JvmtiExport::oops_do(roots);
  63.248 -
  63.249 -  if (!_process_strong_tasks->is_task_claimed(SH_PS_SystemDictionary_oops_do)) {
  63.250 -    if (so & SO_AllClasses) {
  63.251 -      SystemDictionary::oops_do(roots);
  63.252 -    } else if (so & SO_SystemClasses) {
  63.253 -      SystemDictionary::always_strong_oops_do(roots);
  63.254 -    } else {
  63.255 -      fatal("We should always have selected either SO_AllClasses or SO_SystemClasses");
  63.256 -    }
  63.257 -  }
  63.258 -
  63.259 -  if (!_process_strong_tasks->is_task_claimed(SH_PS_ClassLoaderDataGraph_oops_do)) {
  63.260 -    if (so & SO_AllClasses) {
  63.261 -      ClassLoaderDataGraph::oops_do(roots, klass_closure, /* must_claim */ false);
  63.262 -    } else if (so & SO_SystemClasses) {
  63.263 -      ClassLoaderDataGraph::always_strong_oops_do(roots, klass_closure, /* must_claim */ true);
  63.264 -    }
  63.265 -  }
  63.266 -
  63.267 -  // All threads execute the following. A specific chunk of buckets
  63.268 -  // from the StringTable are the individual tasks.
  63.269 -  if (so & SO_Strings) {
  63.270 -    if (CollectedHeap::use_parallel_gc_threads()) {
  63.271 -      StringTable::possibly_parallel_oops_do(roots);
  63.272 -    } else {
  63.273 -      StringTable::oops_do(roots);
  63.274 -    }
  63.275 -  }
  63.276 -
  63.277 -  if (!_process_strong_tasks->is_task_claimed(SH_PS_CodeCache_oops_do)) {
  63.278 -    if (so & SO_ScavengeCodeCache) {
  63.279 -      assert(&code_roots != NULL, "must supply closure for code cache");
  63.280 -
  63.281 -      // We only visit parts of the CodeCache when scavenging.
  63.282 -      CodeCache::scavenge_root_nmethods_do(&code_roots);
  63.283 -    }
  63.284 -    if (so & SO_AllCodeCache) {
  63.285 -      assert(&code_roots != NULL, "must supply closure for code cache");
  63.286 -
  63.287 -      // CMSCollector uses this to do intermediate-strength collections.
  63.288 -      // We scan the entire code cache, since CodeCache::do_unloading is not called.
  63.289 -      CodeCache::blobs_do(&code_roots);
  63.290 -    }
  63.291 -    // Verify that the code cache contents are not subject to
  63.292 -    // movement by a scavenging collection.
  63.293 -    DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, /*do_marking=*/ false));
  63.294 -    DEBUG_ONLY(CodeCache::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable));
  63.295 -  }
  63.296 -
  63.297 -  _process_strong_tasks->all_tasks_completed();
  63.298 -}
  63.299  
  63.300  class AlwaysTrueClosure: public BoolObjectClosure {
  63.301  public:
    64.1 --- a/src/share/vm/memory/sharedHeap.hpp	Tue Jul 01 09:03:55 2014 +0200
    64.2 +++ b/src/share/vm/memory/sharedHeap.hpp	Mon Jul 07 10:12:40 2014 +0200
    64.3 @@ -69,14 +69,10 @@
    64.4  //    number of active GC workers.  CompactibleFreeListSpace and Space
    64.5  //    have SequentialSubTasksDone's.
    64.6  // Example of using SubTasksDone and SequentialSubTasksDone
    64.7 -// G1CollectedHeap::g1_process_strong_roots() calls
    64.8 -//  process_strong_roots(false, // no scoping; this is parallel code
    64.9 -//                       is_scavenging, so,
   64.10 -//                       &buf_scan_non_heap_roots,
   64.11 -//                       &eager_scan_code_roots);
   64.12 -//  which delegates to SharedHeap::process_strong_roots() and uses
   64.13 +// G1CollectedHeap::g1_process_roots()
   64.14 +//  to SharedHeap::process_roots() and uses
   64.15  //  SubTasksDone* _process_strong_tasks to claim tasks.
   64.16 -//  process_strong_roots() calls
   64.17 +//  process_roots() calls
   64.18  //      rem_set()->younger_refs_iterate()
   64.19  //  to scan the card table and which eventually calls down into
   64.20  //  CardTableModRefBS::par_non_clean_card_iterate_work().  This method
   64.21 @@ -182,12 +178,12 @@
   64.22    // task.  (This also means that a parallel thread may only call
   64.23    // process_strong_roots once.)
   64.24    //
   64.25 -  // For calls to process_strong_roots by sequential code, the parity is
   64.26 +  // For calls to process_roots by sequential code, the parity is
   64.27    // updated automatically.
   64.28    //
   64.29    // The idea is that objects representing fine-grained tasks, such as
   64.30    // threads, will contain a "parity" field.  A task will is claimed in the
   64.31 -  // current "process_strong_roots" call only if its parity field is the
   64.32 +  // current "process_roots" call only if its parity field is the
   64.33    // same as the "strong_roots_parity"; task claiming is accomplished by
   64.34    // updating the parity field to the strong_roots_parity with a CAS.
   64.35    //
   64.36 @@ -198,27 +194,44 @@
   64.37    //   c) to never return a distinguished value (zero) with which such
   64.38    //      task-claiming variables may be initialized, to indicate "never
   64.39    //      claimed".
   64.40 - private:
   64.41 -  void change_strong_roots_parity();
   64.42   public:
   64.43    int strong_roots_parity() { return _strong_roots_parity; }
   64.44  
   64.45 -  // Call these in sequential code around process_strong_roots.
   64.46 +  // Call these in sequential code around process_roots.
   64.47    // strong_roots_prologue calls change_strong_roots_parity, if
   64.48    // parallel tasks are enabled.
   64.49    class StrongRootsScope : public MarkingCodeBlobClosure::MarkScope {
   64.50 -  public:
   64.51 -    StrongRootsScope(SharedHeap* outer, bool activate = true);
   64.52 +    // Used to implement the Thread work barrier.
   64.53 +    static Monitor* _lock;
   64.54 +
   64.55 +    SharedHeap*   _sh;
   64.56 +    volatile jint _n_workers_done_with_threads;
   64.57 +
   64.58 +   public:
   64.59 +    StrongRootsScope(SharedHeap* heap, bool activate = true);
   64.60      ~StrongRootsScope();
   64.61 +
   64.62 +    // Mark that this thread is done with the Threads work.
   64.63 +    void mark_worker_done_with_threads(uint n_workers);
   64.64 +    // Wait until all n_workers are done with the Threads work.
   64.65 +    void wait_until_all_workers_done_with_threads(uint n_workers);
   64.66    };
   64.67    friend class StrongRootsScope;
   64.68  
   64.69 +  // The current active StrongRootScope
   64.70 +  StrongRootsScope* _strong_roots_scope;
   64.71 +
   64.72 +  StrongRootsScope* active_strong_roots_scope() const;
   64.73 +
   64.74 + private:
   64.75 +  void register_strong_roots_scope(StrongRootsScope* scope);
   64.76 +  void unregister_strong_roots_scope(StrongRootsScope* scope);
   64.77 +  void change_strong_roots_parity();
   64.78 +
   64.79 + public:
   64.80    enum ScanningOption {
   64.81 -    SO_None                = 0x0,
   64.82 -    SO_AllClasses          = 0x1,
   64.83 -    SO_SystemClasses       = 0x2,
   64.84 -    SO_Strings             = 0x4,
   64.85 -    SO_AllCodeCache        = 0x8,
   64.86 +    SO_None                =  0x0,
   64.87 +    SO_AllCodeCache        =  0x8,
   64.88      SO_ScavengeCodeCache   = 0x10
   64.89    };
   64.90  
   64.91 @@ -227,15 +240,26 @@
   64.92    // Invoke the "do_oop" method the closure "roots" on all root locations.
   64.93    // The "so" argument determines which roots the closure is applied to:
   64.94    // "SO_None" does none;
   64.95 -  // "SO_AllClasses" applies the closure to all entries in the SystemDictionary;
   64.96 -  // "SO_SystemClasses" to all the "system" classes and loaders;
   64.97 -  // "SO_Strings" applies the closure to all entries in StringTable;
   64.98    // "SO_AllCodeCache" applies the closure to all elements of the CodeCache.
   64.99    // "SO_ScavengeCodeCache" applies the closure to elements on the scavenge root list in the CodeCache.
  64.100 +  void process_roots(bool activate_scope,
  64.101 +                     ScanningOption so,
  64.102 +                     OopClosure* strong_roots,
  64.103 +                     OopClosure* weak_roots,
  64.104 +                     CLDClosure* strong_cld_closure,
  64.105 +                     CLDClosure* weak_cld_closure,
  64.106 +                     CodeBlobClosure* code_roots);
  64.107 +  void process_all_roots(bool activate_scope,
  64.108 +                         ScanningOption so,
  64.109 +                         OopClosure* roots,
  64.110 +                         CLDClosure* cld_closure,
  64.111 +                         CodeBlobClosure* code_roots);
  64.112    void process_strong_roots(bool activate_scope,
  64.113                              ScanningOption so,
  64.114                              OopClosure* roots,
  64.115 -                            KlassClosure* klass_closure);
  64.116 +                            CLDClosure* cld_closure,
  64.117 +                            CodeBlobClosure* code_roots);
  64.118 +
  64.119  
  64.120    // Apply "root_closure" to the JNI weak roots..
  64.121    void process_weak_roots(OopClosure* root_closure);
  64.122 @@ -251,7 +275,7 @@
  64.123    virtual void gc_epilogue(bool full) = 0;
  64.124  
  64.125    // Sets the number of parallel threads that will be doing tasks
  64.126 -  // (such as process strong roots) subsequently.
  64.127 +  // (such as process roots) subsequently.
  64.128    virtual void set_par_threads(uint t);
  64.129  
  64.130    int n_termination();
    65.1 --- a/src/share/vm/oops/instanceKlass.cpp	Tue Jul 01 09:03:55 2014 +0200
    65.2 +++ b/src/share/vm/oops/instanceKlass.cpp	Mon Jul 07 10:12:40 2014 +0200
    65.3 @@ -289,6 +289,7 @@
    65.4    set_static_oop_field_count(0);
    65.5    set_nonstatic_field_size(0);
    65.6    set_is_marked_dependent(false);
    65.7 +  set_has_unloaded_dependent(false);
    65.8    set_init_state(InstanceKlass::allocated);
    65.9    set_init_thread(NULL);
   65.10    set_reference_type(rt);
   65.11 @@ -1819,6 +1820,9 @@
   65.12    return id;
   65.13  }
   65.14  
   65.15 +int nmethodBucket::decrement() {
   65.16 +  return Atomic::add(-1, (volatile int *)&_count);
   65.17 +}
   65.18  
   65.19  //
   65.20  // Walk the list of dependent nmethods searching for nmethods which
   65.21 @@ -1833,7 +1837,7 @@
   65.22      nmethod* nm = b->get_nmethod();
   65.23      // since dependencies aren't removed until an nmethod becomes a zombie,
   65.24      // the dependency list may contain nmethods which aren't alive.
   65.25 -    if (nm->is_alive() && !nm->is_marked_for_deoptimization() && nm->check_dependency_on(changes)) {
   65.26 +    if (b->count() > 0 && nm->is_alive() && !nm->is_marked_for_deoptimization() && nm->check_dependency_on(changes)) {
   65.27        if (TraceDependencies) {
   65.28          ResourceMark rm;
   65.29          tty->print_cr("Marked for deoptimization");
   65.30 @@ -1850,6 +1854,43 @@
   65.31    return found;
   65.32  }
   65.33  
   65.34 +void InstanceKlass::clean_dependent_nmethods() {
   65.35 +  assert_locked_or_safepoint(CodeCache_lock);
   65.36 +
   65.37 +  if (has_unloaded_dependent()) {
   65.38 +    nmethodBucket* b = _dependencies;
   65.39 +    nmethodBucket* last = NULL;
   65.40 +    while (b != NULL) {
   65.41 +      assert(b->count() >= 0, err_msg("bucket count: %d", b->count()));
   65.42 +
   65.43 +      nmethodBucket* next = b->next();
   65.44 +
   65.45 +      if (b->count() == 0) {
   65.46 +        if (last == NULL) {
   65.47 +          _dependencies = next;
   65.48 +        } else {
   65.49 +          last->set_next(next);
   65.50 +        }
   65.51 +        delete b;
   65.52 +        // last stays the same.
   65.53 +      } else {
   65.54 +        last = b;
   65.55 +      }
   65.56 +
   65.57 +      b = next;
   65.58 +    }
   65.59 +    set_has_unloaded_dependent(false);
   65.60 +  }
   65.61 +#ifdef ASSERT
   65.62 +  else {
   65.63 +    // Verification
   65.64 +    for (nmethodBucket* b = _dependencies; b != NULL; b = b->next()) {
   65.65 +      assert(b->count() >= 0, err_msg("bucket count: %d", b->count()));
   65.66 +      assert(b->count() != 0, "empty buckets need to be cleaned");
   65.67 +    }
   65.68 +  }
   65.69 +#endif
   65.70 +}
   65.71  
   65.72  //
   65.73  // Add an nmethodBucket to the list of dependencies for this nmethod.
   65.74 @@ -1884,13 +1925,10 @@
   65.75    nmethodBucket* last = NULL;
   65.76    while (b != NULL) {
   65.77      if (nm == b->get_nmethod()) {
   65.78 -      if (b->decrement() == 0) {
   65.79 -        if (last == NULL) {
   65.80 -          _dependencies = b->next();
   65.81 -        } else {
   65.82 -          last->set_next(b->next());
   65.83 -        }
   65.84 -        delete b;
   65.85 +      int val = b->decrement();
   65.86 +      guarantee(val >= 0, err_msg("Underflow: %d", val));
   65.87 +      if (val == 0) {
   65.88 +        set_has_unloaded_dependent(true);
   65.89        }
   65.90        return;
   65.91      }
   65.92 @@ -1929,6 +1967,11 @@
   65.93    nmethodBucket* b = _dependencies;
   65.94    while (b != NULL) {
   65.95      if (nm == b->get_nmethod()) {
   65.96 +#ifdef ASSERT
   65.97 +      int count = b->count();
   65.98 +      assert(count >= 0, "Just check if we ever get here 1");
   65.99 +      assert(count > 0,  "Just check if we ever get here 2");
  65.100 +#endif
  65.101        return true;
  65.102      }
  65.103      b = b->next();
  65.104 @@ -2227,7 +2270,7 @@
  65.105  #endif // INCLUDE_ALL_GCS
  65.106  
  65.107  void InstanceKlass::clean_implementors_list(BoolObjectClosure* is_alive) {
  65.108 -  assert(is_loader_alive(is_alive), "this klass should be live");
  65.109 +  assert(class_loader_data()->is_alive(is_alive), "this klass should be live");
  65.110    if (is_interface()) {
  65.111      if (ClassUnloading) {
  65.112        Klass* impl = implementor();
    66.1 --- a/src/share/vm/oops/instanceKlass.hpp	Tue Jul 01 09:03:55 2014 +0200
    66.2 +++ b/src/share/vm/oops/instanceKlass.hpp	Mon Jul 07 10:12:40 2014 +0200
    66.3 @@ -226,6 +226,7 @@
    66.4    // _is_marked_dependent can be set concurrently, thus cannot be part of the
    66.5    // _misc_flags.
    66.6    bool            _is_marked_dependent;  // used for marking during flushing and deoptimization
    66.7 +  bool            _has_unloaded_dependent;
    66.8  
    66.9    enum {
   66.10      _misc_rewritten            = 1 << 0, // methods rewritten.
   66.11 @@ -473,6 +474,9 @@
   66.12    bool is_marked_dependent() const         { return _is_marked_dependent; }
   66.13    void set_is_marked_dependent(bool value) { _is_marked_dependent = value; }
   66.14  
   66.15 +  bool has_unloaded_dependent() const         { return _has_unloaded_dependent; }
   66.16 +  void set_has_unloaded_dependent(bool value) { _has_unloaded_dependent = value; }
   66.17 +
   66.18    // initialization (virtuals from Klass)
   66.19    bool should_be_initialized() const;  // means that initialize should be called
   66.20    void initialize(TRAPS);
   66.21 @@ -946,6 +950,7 @@
   66.22  
   66.23    void clean_implementors_list(BoolObjectClosure* is_alive);
   66.24    void clean_method_data(BoolObjectClosure* is_alive);
   66.25 +  void clean_dependent_nmethods();
   66.26  
   66.27    // Explicit metaspace deallocation of fields
   66.28    // For RedefineClasses and class file parsing errors, we need to deallocate
   66.29 @@ -1234,7 +1239,7 @@
   66.30    }
   66.31    int count()                             { return _count; }
   66.32    int increment()                         { _count += 1; return _count; }
   66.33 -  int decrement()                         { _count -= 1; assert(_count >= 0, "don't underflow"); return _count; }
   66.34 +  int decrement();
   66.35    nmethodBucket* next()                   { return _next; }
   66.36    void set_next(nmethodBucket* b)         { _next = b; }
   66.37    nmethod* get_nmethod()                  { return _nmethod; }
    67.1 --- a/src/share/vm/oops/klass.cpp	Tue Jul 01 09:03:55 2014 +0200
    67.2 +++ b/src/share/vm/oops/klass.cpp	Mon Jul 07 10:12:40 2014 +0200
    67.3 @@ -42,6 +42,7 @@
    67.4  #include "utilities/stack.hpp"
    67.5  #include "utilities/macros.hpp"
    67.6  #if INCLUDE_ALL_GCS
    67.7 +#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
    67.8  #include "gc_implementation/parallelScavenge/psParallelCompact.hpp"
    67.9  #include "gc_implementation/parallelScavenge/psPromotionManager.hpp"
   67.10  #include "gc_implementation/parallelScavenge/psScavenge.hpp"
   67.11 @@ -159,7 +160,12 @@
   67.12    _primary_supers[0] = k;
   67.13    set_super_check_offset(in_bytes(primary_supers_offset()));
   67.14  
   67.15 -  set_java_mirror(NULL);
   67.16 +  // The constructor is used from init_self_patching_vtbl_list,
   67.17 +  // which doesn't zero out the memory before calling the constructor.
   67.18 +  // Need to set the field explicitly to not hit an assert that the field
   67.19 +  // should be NULL before setting it.
   67.20 +  _java_mirror = NULL;
   67.21 +
   67.22    set_modifier_flags(0);
   67.23    set_layout_helper(Klass::_lh_neutral_value);
   67.24    set_name(NULL);
   67.25 @@ -391,7 +397,7 @@
   67.26    return mirror_alive;
   67.27  }
   67.28  
   67.29 -void Klass::clean_weak_klass_links(BoolObjectClosure* is_alive) {
   67.30 +void Klass::clean_weak_klass_links(BoolObjectClosure* is_alive, bool clean_alive_klasses) {
   67.31    if (!ClassUnloading) {
   67.32      return;
   67.33    }
   67.34 @@ -436,7 +442,7 @@
   67.35      }
   67.36  
   67.37      // Clean the implementors list and method data.
   67.38 -    if (current->oop_is_instance()) {
   67.39 +    if (clean_alive_klasses && current->oop_is_instance()) {
   67.40        InstanceKlass* ik = InstanceKlass::cast(current);
   67.41        ik->clean_implementors_list(is_alive);
   67.42        ik->clean_method_data(is_alive);
   67.43 @@ -448,12 +454,18 @@
   67.44    record_modified_oops();
   67.45  }
   67.46  
   67.47 -void Klass::klass_update_barrier_set_pre(void* p, oop v) {
   67.48 -  // This barrier used by G1, where it's used remember the old oop values,
   67.49 -  // so that we don't forget any objects that were live at the snapshot at
   67.50 -  // the beginning. This function is only used when we write oops into
   67.51 -  // Klasses. Since the Klasses are used as roots in G1, we don't have to
   67.52 -  // do anything here.
   67.53 +// This barrier is used by G1 to remember the old oop values, so
   67.54 +// that we don't forget any objects that were live at the snapshot at
   67.55 +// the beginning. This function is only used when we write oops into Klasses.
   67.56 +void Klass::klass_update_barrier_set_pre(oop* p, oop v) {
   67.57 +#if INCLUDE_ALL_GCS
   67.58 +  if (UseG1GC) {
   67.59 +    oop obj = *p;
   67.60 +    if (obj != NULL) {
   67.61 +      G1SATBCardTableModRefBS::enqueue(obj);
   67.62 +    }
   67.63 +  }
   67.64 +#endif
   67.65  }
   67.66  
   67.67  void Klass::klass_oop_store(oop* p, oop v) {
   67.68 @@ -464,7 +476,7 @@
   67.69    if (always_do_update_barrier) {
   67.70      klass_oop_store((volatile oop*)p, v);
   67.71    } else {
   67.72 -    klass_update_barrier_set_pre((void*)p, v);
   67.73 +    klass_update_barrier_set_pre(p, v);
   67.74      *p = v;
   67.75      klass_update_barrier_set(v);
   67.76    }
   67.77 @@ -474,7 +486,7 @@
   67.78    assert(!Universe::heap()->is_in_reserved((void*)p), "Should store pointer into metadata");
   67.79    assert(v == NULL || Universe::heap()->is_in_reserved((void*)v), "Should store pointer to an object");
   67.80  
   67.81 -  klass_update_barrier_set_pre((void*)p, v);
   67.82 +  klass_update_barrier_set_pre((oop*)p, v); // Cast away volatile.
   67.83    OrderAccess::release_store_ptr(p, v);
   67.84    klass_update_barrier_set(v);
   67.85  }
    68.1 --- a/src/share/vm/oops/klass.hpp	Tue Jul 01 09:03:55 2014 +0200
    68.2 +++ b/src/share/vm/oops/klass.hpp	Mon Jul 07 10:12:40 2014 +0200
    68.3 @@ -583,7 +583,10 @@
    68.4    // The is_alive closure passed in depends on the Garbage Collector used.
    68.5    bool is_loader_alive(BoolObjectClosure* is_alive);
    68.6  
    68.7 -  static void clean_weak_klass_links(BoolObjectClosure* is_alive);
    68.8 +  static void clean_weak_klass_links(BoolObjectClosure* is_alive, bool clean_alive_klasses = true);
    68.9 +  static void clean_subklass_tree(BoolObjectClosure* is_alive) {
   68.10 +    clean_weak_klass_links(is_alive, false /* clean_alive_klasses */);
   68.11 +  }
   68.12  
   68.13    // iterators
   68.14    virtual int oop_oop_iterate(oop obj, ExtendedOopClosure* blk) = 0;
   68.15 @@ -690,7 +693,7 @@
   68.16   private:
   68.17    // barriers used by klass_oop_store
   68.18    void klass_update_barrier_set(oop v);
   68.19 -  void klass_update_barrier_set_pre(void* p, oop v);
   68.20 +  void klass_update_barrier_set_pre(oop* p, oop v);
   68.21  };
   68.22  
   68.23  #endif // SHARE_VM_OOPS_KLASS_HPP
    69.1 --- a/src/share/vm/prims/jvmtiTagMap.cpp	Tue Jul 01 09:03:55 2014 +0200
    69.2 +++ b/src/share/vm/prims/jvmtiTagMap.cpp	Mon Jul 07 10:12:40 2014 +0200
    69.3 @@ -3017,7 +3017,7 @@
    69.4  
    69.5    // If there are any non-perm roots in the code cache, visit them.
    69.6    blk.set_kind(JVMTI_HEAP_REFERENCE_OTHER);
    69.7 -  CodeBlobToOopClosure look_in_blobs(&blk, false);
    69.8 +  CodeBlobToOopClosure look_in_blobs(&blk, !CodeBlobToOopClosure::FixRelocations);
    69.9    CodeCache::scavenge_root_nmethods_do(&look_in_blobs);
   69.10  
   69.11    return true;
    70.1 --- a/src/share/vm/prims/whitebox.cpp	Tue Jul 01 09:03:55 2014 +0200
    70.2 +++ b/src/share/vm/prims/whitebox.cpp	Mon Jul 07 10:12:40 2014 +0200
    70.3 @@ -24,6 +24,7 @@
    70.4  
    70.5  #include "precompiled.hpp"
    70.6  
    70.7 +#include "memory/metadataFactory.hpp"
    70.8  #include "memory/universe.hpp"
    70.9  #include "oops/oop.inline.hpp"
   70.10  
   70.11 @@ -36,6 +37,7 @@
   70.12  #include "runtime/arguments.hpp"
   70.13  #include "runtime/interfaceSupport.hpp"
   70.14  #include "runtime/os.hpp"
   70.15 +#include "utilities/array.hpp"
   70.16  #include "utilities/debug.hpp"
   70.17  #include "utilities/macros.hpp"
   70.18  #include "utilities/exceptions.hpp"
   70.19 @@ -725,6 +727,35 @@
   70.20  WB_END
   70.21  
   70.22  
   70.23 +int WhiteBox::array_bytes_to_length(size_t bytes) {
   70.24 +  return Array<u1>::bytes_to_length(bytes);
   70.25 +}
   70.26 +
   70.27 +WB_ENTRY(jlong, WB_AllocateMetaspace(JNIEnv* env, jobject wb, jobject class_loader, jlong size))
   70.28 +  if (size < 0) {
   70.29 +    THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(),
   70.30 +        err_msg("WB_AllocateMetaspace: size is negative: " JLONG_FORMAT, size));
   70.31 +  }
   70.32 +
   70.33 +  oop class_loader_oop = JNIHandles::resolve(class_loader);
   70.34 +  ClassLoaderData* cld = class_loader_oop != NULL
   70.35 +      ? java_lang_ClassLoader::loader_data(class_loader_oop)
   70.36 +      : ClassLoaderData::the_null_class_loader_data();
   70.37 +
   70.38 +  void* metadata = MetadataFactory::new_writeable_array<u1>(cld, WhiteBox::array_bytes_to_length((size_t)size), thread);
   70.39 +
   70.40 +  return (jlong)(uintptr_t)metadata;
   70.41 +WB_END
   70.42 +
   70.43 +WB_ENTRY(void, WB_FreeMetaspace(JNIEnv* env, jobject wb, jobject class_loader, jlong addr, jlong size))
   70.44 +  oop class_loader_oop = JNIHandles::resolve(class_loader);
   70.45 +  ClassLoaderData* cld = class_loader_oop != NULL
   70.46 +      ? java_lang_ClassLoader::loader_data(class_loader_oop)
   70.47 +      : ClassLoaderData::the_null_class_loader_data();
   70.48 +
   70.49 +  MetadataFactory::free_array(cld, (Array<u1>*)(uintptr_t)addr);
   70.50 +WB_END
   70.51 +
   70.52  //Some convenience methods to deal with objects from java
   70.53  int WhiteBox::offset_for_field(const char* field_name, oop object,
   70.54      Symbol* signature_symbol) {
   70.55 @@ -855,6 +886,10 @@
   70.56    {CC"isInStringTable",    CC"(Ljava/lang/String;)Z", (void*)&WB_IsInStringTable  },
   70.57    {CC"fullGC",   CC"()V",                             (void*)&WB_FullGC },
   70.58    {CC"readReservedMemory", CC"()V",                   (void*)&WB_ReadReservedMemory },
   70.59 +  {CC"allocateMetaspace",
   70.60 +     CC"(Ljava/lang/ClassLoader;J)J",                 (void*)&WB_AllocateMetaspace },
   70.61 +  {CC"freeMetaspace",
   70.62 +     CC"(Ljava/lang/ClassLoader;JJ)V",                (void*)&WB_FreeMetaspace },
   70.63    {CC"getCPUFeatures",     CC"()Ljava/lang/String;",  (void*)&WB_GetCPUFeatures     },
   70.64    {CC"getNMethod",         CC"(Ljava/lang/reflect/Executable;Z)[Ljava/lang/Object;",
   70.65                                                        (void*)&WB_GetNMethod         },
    71.1 --- a/src/share/vm/prims/whitebox.hpp	Tue Jul 01 09:03:55 2014 +0200
    71.2 +++ b/src/share/vm/prims/whitebox.hpp	Mon Jul 07 10:12:40 2014 +0200
    71.3 @@ -62,6 +62,8 @@
    71.4      Symbol* signature_symbol);
    71.5    static const char* lookup_jstring(const char* field_name, oop object);
    71.6    static bool lookup_bool(const char* field_name, oop object);
    71.7 +
    71.8 +  static int array_bytes_to_length(size_t bytes);
    71.9  };
   71.10  
   71.11  
    72.1 --- a/src/share/vm/runtime/thread.cpp	Tue Jul 01 09:03:55 2014 +0200
    72.2 +++ b/src/share/vm/runtime/thread.cpp	Mon Jul 07 10:12:40 2014 +0200
    72.3 @@ -4181,8 +4181,8 @@
    72.4    SharedHeap* sh = SharedHeap::heap();
    72.5    // Cannot yet substitute active_workers for n_par_threads
    72.6    // because of G1CollectedHeap::verify() use of
    72.7 -  // SharedHeap::process_strong_roots().  n_par_threads == 0 will
    72.8 -  // turn off parallelism in process_strong_roots while active_workers
    72.9 +  // SharedHeap::process_roots().  n_par_threads == 0 will
   72.10 +  // turn off parallelism in process_roots while active_workers
   72.11    // is being used for parallelism elsewhere.
   72.12    bool is_par = sh->n_par_threads() > 0;
   72.13    assert(!is_par ||
    73.1 --- a/src/share/vm/runtime/thread.hpp	Tue Jul 01 09:03:55 2014 +0200
    73.2 +++ b/src/share/vm/runtime/thread.hpp	Mon Jul 07 10:12:40 2014 +0200
    73.3 @@ -478,7 +478,7 @@
    73.4  private:
    73.5    bool claim_oops_do_par_case(int collection_parity);
    73.6  public:
    73.7 -  // Requires that "collection_parity" is that of the current strong roots
    73.8 +  // Requires that "collection_parity" is that of the current roots
    73.9    // iteration.  If "is_par" is false, sets the parity of "this" to
   73.10    // "collection_parity", and returns "true".  If "is_par" is true,
   73.11    // uses an atomic instruction to set the current threads parity to
    74.1 --- a/src/share/vm/utilities/array.hpp	Tue Jul 01 09:03:55 2014 +0200
    74.2 +++ b/src/share/vm/utilities/array.hpp	Mon Jul 07 10:12:40 2014 +0200
    74.3 @@ -305,6 +305,7 @@
    74.4    friend class MetadataFactory;
    74.5    friend class VMStructs;
    74.6    friend class MethodHandleCompiler;           // special case
    74.7 +  friend class WhiteBox;
    74.8  protected:
    74.9    int _length;                                 // the number of array elements
   74.10    T   _data[1];                                // the array memory
   74.11 @@ -326,6 +327,29 @@
   74.12  
   74.13    static size_t byte_sizeof(int length) { return sizeof(Array<T>) + MAX2(length - 1, 0) * sizeof(T); }
   74.14  
   74.15 +  // WhiteBox API helper.
   74.16 +  static int bytes_to_length(size_t bytes)       {
   74.17 +    assert(is_size_aligned(bytes, BytesPerWord), "Must be, for now");
   74.18 +
   74.19 +    if (sizeof(Array<T>) >= bytes) {
   74.20 +      return 0;
   74.21 +    }
   74.22 +
   74.23 +    size_t left = bytes - sizeof(Array<T>);
   74.24 +    assert(is_size_aligned(left, sizeof(T)), "Must be");
   74.25 +
   74.26 +    size_t elements = left / sizeof(T);
   74.27 +    assert(elements <= (size_t)INT_MAX, err_msg("number of elements " SIZE_FORMAT "doesn't fit into an int.", elements));
   74.28 +
   74.29 +    int length = (int)elements;
   74.30 +
   74.31 +    assert((size_t)size(length) * BytesPerWord == bytes,
   74.32 +        err_msg("Expected: " SIZE_FORMAT " got: " SIZE_FORMAT,
   74.33 +                bytes, (size_t)size(length) * BytesPerWord));
   74.34 +
   74.35 +    return length;
   74.36 +  }
   74.37 +
   74.38    explicit Array(int length) : _length(length) {
   74.39      assert(length >= 0, "illegal length");
   74.40    }
    75.1 --- a/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java	Tue Jul 01 09:03:55 2014 +0200
    75.2 +++ b/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java	Mon Jul 07 10:12:40 2014 +0200
    75.3 @@ -142,6 +142,8 @@
    75.4  
    75.5    // Memory
    75.6    public native void readReservedMemory();
    75.7 +  public native long allocateMetaspace(ClassLoader classLoader, long size);
    75.8 +  public native void freeMetaspace(ClassLoader classLoader, long addr, long size);
    75.9  
   75.10    // force Full GC
   75.11    public native void fullGC();

mercurial