6863023: need non-perm oops in code cache for JSR 292

Tue, 15 Sep 2009 21:53:47 -0700

author
jrose
date
Tue, 15 Sep 2009 21:53:47 -0700
changeset 1424
148e5441d916
parent 1422
00977607da34
child 1425
be094e0c089a

6863023: need non-perm oops in code cache for JSR 292
Summary: Make a special root-list for those few nmethods which might contain non-perm oops.
Reviewed-by: twisti, kvn, never, jmasa, ysr

agent/src/share/classes/sun/jvm/hotspot/code/CodeCache.java file | annotate | diff | comparison | revisions
agent/src/share/classes/sun/jvm/hotspot/code/NMethod.java file | annotate | diff | comparison | revisions
src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp file | annotate | diff | comparison | revisions
src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp file | annotate | diff | comparison | revisions
src/cpu/x86/vm/c1_LIRAssembler_x86.cpp file | annotate | diff | comparison | revisions
src/cpu/x86/vm/c1_LIRGenerator_x86.cpp file | annotate | diff | comparison | revisions
src/cpu/x86/vm/x86_32.ad file | annotate | diff | comparison | revisions
src/cpu/x86/vm/x86_64.ad file | annotate | diff | comparison | revisions
src/share/vm/c1/c1_GraphBuilder.cpp file | annotate | diff | comparison | revisions
src/share/vm/c1/c1_InstructionPrinter.cpp file | annotate | diff | comparison | revisions
src/share/vm/c1/c1_LIRGenerator.cpp file | annotate | diff | comparison | revisions
src/share/vm/c1/c1_ValueType.cpp file | annotate | diff | comparison | revisions
src/share/vm/ci/ciEnv.cpp file | annotate | diff | comparison | revisions
src/share/vm/ci/ciEnv.hpp file | annotate | diff | comparison | revisions
src/share/vm/ci/ciObject.cpp file | annotate | diff | comparison | revisions
src/share/vm/ci/ciObject.hpp file | annotate | diff | comparison | revisions
src/share/vm/ci/ciObjectFactory.cpp file | annotate | diff | comparison | revisions
src/share/vm/classfile/systemDictionary.cpp file | annotate | diff | comparison | revisions
src/share/vm/code/codeBlob.hpp file | annotate | diff | comparison | revisions
src/share/vm/code/codeCache.cpp file | annotate | diff | comparison | revisions
src/share/vm/code/codeCache.hpp file | annotate | diff | comparison | revisions
src/share/vm/code/debugInfoRec.cpp file | annotate | diff | comparison | revisions
src/share/vm/code/dependencies.cpp file | annotate | diff | comparison | revisions
src/share/vm/code/nmethod.cpp file | annotate | diff | comparison | revisions
src/share/vm/code/nmethod.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/g1/concurrentMark.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/g1/g1MarkSweep.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/includeDB_gc_parallelScavenge file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/parNew/parNewGeneration.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/parallelScavenge/pcTasks.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/parallelScavenge/psTasks.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/shared/markSweep.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/shared/markSweep.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_interface/collectedHeap.hpp file | annotate | diff | comparison | revisions
src/share/vm/memory/defNewGeneration.cpp file | annotate | diff | comparison | revisions
src/share/vm/memory/genCollectedHeap.cpp file | annotate | diff | comparison | revisions
src/share/vm/memory/genCollectedHeap.hpp file | annotate | diff | comparison | revisions
src/share/vm/memory/genMarkSweep.cpp file | annotate | diff | comparison | revisions
src/share/vm/memory/iterator.cpp file | annotate | diff | comparison | revisions
src/share/vm/memory/iterator.hpp file | annotate | diff | comparison | revisions
src/share/vm/memory/sharedHeap.cpp file | annotate | diff | comparison | revisions
src/share/vm/memory/sharedHeap.hpp file | annotate | diff | comparison | revisions
src/share/vm/oops/instanceKlass.cpp file | annotate | diff | comparison | revisions
src/share/vm/oops/oop.hpp file | annotate | diff | comparison | revisions
src/share/vm/oops/oop.inline2.hpp file | annotate | diff | comparison | revisions
src/share/vm/opto/output.cpp file | annotate | diff | comparison | revisions
src/share/vm/opto/parse.hpp file | annotate | diff | comparison | revisions
src/share/vm/opto/parse2.cpp file | annotate | diff | comparison | revisions
src/share/vm/opto/parse3.cpp file | annotate | diff | comparison | revisions
src/share/vm/opto/type.cpp file | annotate | diff | comparison | revisions
src/share/vm/opto/type.hpp file | annotate | diff | comparison | revisions
src/share/vm/prims/jvmtiTagMap.cpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/arguments.cpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/frame.cpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/frame.hpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/globals.hpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/sweeper.cpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/thread.cpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/thread.hpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/vmStructs.cpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/vmThread.cpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/vmThread.hpp file | annotate | diff | comparison | revisions
src/share/vm/utilities/debug.cpp file | annotate | diff | comparison | revisions
     1.1 --- a/agent/src/share/classes/sun/jvm/hotspot/code/CodeCache.java	Tue Sep 15 11:09:34 2009 -0700
     1.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/code/CodeCache.java	Tue Sep 15 21:53:47 2009 -0700
     1.3 @@ -33,6 +33,7 @@
     1.4  
     1.5  public class CodeCache {
     1.6    private static AddressField       heapField;
     1.7 +  private static AddressField       scavengeRootNMethodsField;
     1.8    private static VirtualConstructor virtualConstructor;
     1.9  
    1.10    private CodeHeap heap;
    1.11 @@ -49,6 +50,7 @@
    1.12      Type type = db.lookupType("CodeCache");
    1.13  
    1.14      heapField = type.getAddressField("_heap");
    1.15 +    scavengeRootNMethodsField = type.getAddressField("_scavenge_root_nmethods");
    1.16  
    1.17      virtualConstructor = new VirtualConstructor(db);
    1.18      // Add mappings for all possible CodeBlob subclasses
    1.19 @@ -67,6 +69,10 @@
    1.20      heap = (CodeHeap) VMObjectFactory.newObject(CodeHeap.class, heapField.getValue());
    1.21    }
    1.22  
    1.23 +  public NMethod scavengeRootMethods() {
    1.24 +    return (NMethod) VMObjectFactory.newObject(NMethod.class, scavengeRootNMethodsField.getValue());
    1.25 +  }
    1.26 +
    1.27    public boolean contains(Address p) {
    1.28      return getHeap().contains(p);
    1.29    }
     2.1 --- a/agent/src/share/classes/sun/jvm/hotspot/code/NMethod.java	Tue Sep 15 11:09:34 2009 -0700
     2.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/code/NMethod.java	Tue Sep 15 21:53:47 2009 -0700
     2.3 @@ -40,7 +40,10 @@
     2.4    /** != InvocationEntryBci if this nmethod is an on-stack replacement method */
     2.5    private static CIntegerField entryBCIField;
     2.6    /** To support simple linked-list chaining of nmethods */
     2.7 -  private static AddressField  linkField;
     2.8 +  private static AddressField  osrLinkField;
     2.9 +  private static AddressField  scavengeRootLinkField;
    2.10 +  private static CIntegerField scavengeRootStateField;
    2.11 +
    2.12    /** Offsets for different nmethod parts */
    2.13    private static CIntegerField exceptionOffsetField;
    2.14    private static CIntegerField deoptOffsetField;
    2.15 @@ -87,7 +90,10 @@
    2.16      zombieInstructionSizeField  = type.getCIntegerField("_zombie_instruction_size");
    2.17      methodField                 = type.getOopField("_method");
    2.18      entryBCIField               = type.getCIntegerField("_entry_bci");
    2.19 -    linkField                   = type.getAddressField("_link");
    2.20 +    osrLinkField                = type.getAddressField("_osr_link");
    2.21 +    scavengeRootLinkField       = type.getAddressField("_scavenge_root_link");
    2.22 +    scavengeRootStateField      = type.getCIntegerField("_scavenge_root_state");
    2.23 +
    2.24      exceptionOffsetField        = type.getCIntegerField("_exception_offset");
    2.25      deoptOffsetField            = type.getCIntegerField("_deoptimize_offset");
    2.26      origPCOffsetField           = type.getCIntegerField("_orig_pc_offset");
    2.27 @@ -219,10 +225,19 @@
    2.28      return getEntryBCI();
    2.29    }
    2.30  
    2.31 -  public NMethod getLink() {
    2.32 -    return (NMethod) VMObjectFactory.newObject(NMethod.class, linkField.getValue(addr));
    2.33 +  public NMethod getOSRLink() {
    2.34 +    return (NMethod) VMObjectFactory.newObject(NMethod.class, osrLinkField.getValue(addr));
    2.35    }
    2.36  
    2.37 +  public NMethod getScavengeRootLink() {
    2.38 +    return (NMethod) VMObjectFactory.newObject(NMethod.class, scavengeRootLinkField.getValue(addr));
    2.39 +  }
    2.40 +
    2.41 +  public int getScavengeRootState() {
    2.42 +    return (int) scavengeRootStateField.getValue(addr);
    2.43 +  }
    2.44 +
    2.45 +
    2.46    /** Tells whether frames described by this nmethod can be
    2.47        deoptimized. Note: native wrappers cannot be deoptimized. */
    2.48    public boolean canBeDeoptimized() { return isJavaMethod(); }
     3.1 --- a/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Tue Sep 15 11:09:34 2009 -0700
     3.2 +++ b/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Tue Sep 15 21:53:47 2009 -0700
     3.3 @@ -2171,7 +2171,7 @@
     3.4      // subtype which we can't check or src is the same array as dst
     3.5      // but not necessarily exactly of type default_type.
     3.6      Label known_ok, halt;
     3.7 -    jobject2reg(op->expected_type()->encoding(), tmp);
     3.8 +    jobject2reg(op->expected_type()->constant_encoding(), tmp);
     3.9      __ ld_ptr(dst, oopDesc::klass_offset_in_bytes(), tmp2);
    3.10      if (basic_type != T_OBJECT) {
    3.11        __ cmp(tmp, tmp2);
    3.12 @@ -2429,7 +2429,7 @@
    3.13        assert(data->is_BitData(), "need BitData for checkcast");
    3.14        Register mdo      = k_RInfo;
    3.15        Register data_val = Rtmp1;
    3.16 -      jobject2reg(md->encoding(), mdo);
    3.17 +      jobject2reg(md->constant_encoding(), mdo);
    3.18  
    3.19        int mdo_offset_bias = 0;
    3.20        if (!Assembler::is_simm13(md->byte_offset_of_slot(data, DataLayout::header_offset()) + data->size_in_bytes())) {
    3.21 @@ -2452,7 +2452,7 @@
    3.22      // patching may screw with our temporaries on sparc,
    3.23      // so let's do it before loading the class
    3.24      if (k->is_loaded()) {
    3.25 -      jobject2reg(k->encoding(), k_RInfo);
    3.26 +      jobject2reg(k->constant_encoding(), k_RInfo);
    3.27      } else {
    3.28        jobject2reg_with_patching(k_RInfo, op->info_for_patch());
    3.29      }
    3.30 @@ -2513,7 +2513,7 @@
    3.31      // patching may screw with our temporaries on sparc,
    3.32      // so let's do it before loading the class
    3.33      if (k->is_loaded()) {
    3.34 -      jobject2reg(k->encoding(), k_RInfo);
    3.35 +      jobject2reg(k->constant_encoding(), k_RInfo);
    3.36      } else {
    3.37        jobject2reg_with_patching(k_RInfo, op->info_for_patch());
    3.38      }
    3.39 @@ -2717,7 +2717,7 @@
    3.40    assert(op->tmp1()->is_single_cpu(), "tmp1 must be allocated");
    3.41    Register mdo  = op->mdo()->as_register();
    3.42    Register tmp1 = op->tmp1()->as_register();
    3.43 -  jobject2reg(md->encoding(), mdo);
    3.44 +  jobject2reg(md->constant_encoding(), mdo);
    3.45    int mdo_offset_bias = 0;
    3.46    if (!Assembler::is_simm13(md->byte_offset_of_slot(data, CounterData::count_offset()) +
    3.47                              data->size_in_bytes())) {
    3.48 @@ -2774,7 +2774,7 @@
    3.49          if (receiver == NULL) {
    3.50            Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) -
    3.51                              mdo_offset_bias);
    3.52 -          jobject2reg(known_klass->encoding(), tmp1);
    3.53 +          jobject2reg(known_klass->constant_encoding(), tmp1);
    3.54            __ st_ptr(tmp1, recv_addr);
    3.55            Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) -
    3.56                              mdo_offset_bias);
     4.1 --- a/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp	Tue Sep 15 11:09:34 2009 -0700
     4.2 +++ b/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp	Tue Sep 15 21:53:47 2009 -0700
     4.3 @@ -896,7 +896,7 @@
     4.4    LIR_Opr len = length.result();
     4.5    BasicType elem_type = x->elt_type();
     4.6  
     4.7 -  __ oop2reg(ciTypeArrayKlass::make(elem_type)->encoding(), klass_reg);
     4.8 +  __ oop2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg);
     4.9  
    4.10    CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info);
    4.11    __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path);
     5.1 --- a/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Tue Sep 15 11:09:34 2009 -0700
     5.2 +++ b/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Tue Sep 15 21:53:47 2009 -0700
     5.3 @@ -1638,7 +1638,7 @@
     5.4        jobject2reg_with_patching(k_RInfo, op->info_for_patch());
     5.5      } else {
     5.6  #ifdef _LP64
     5.7 -      __ movoop(k_RInfo, k->encoding());
     5.8 +      __ movoop(k_RInfo, k->constant_encoding());
     5.9  #else
    5.10        k_RInfo = noreg;
    5.11  #endif // _LP64
    5.12 @@ -1661,7 +1661,7 @@
    5.13        assert(data != NULL,       "need data for checkcast");
    5.14        assert(data->is_BitData(), "need BitData for checkcast");
    5.15        Register mdo  = klass_RInfo;
    5.16 -      __ movoop(mdo, md->encoding());
    5.17 +      __ movoop(mdo, md->constant_encoding());
    5.18        Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset()));
    5.19        int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant());
    5.20        __ orl(data_addr, header_bits);
    5.21 @@ -1679,7 +1679,7 @@
    5.22  #ifdef _LP64
    5.23          __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
    5.24  #else
    5.25 -        __ cmpoop(Address(obj, oopDesc::klass_offset_in_bytes()), k->encoding());
    5.26 +        __ cmpoop(Address(obj, oopDesc::klass_offset_in_bytes()), k->constant_encoding());
    5.27  #endif // _LP64
    5.28        } else {
    5.29          __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
    5.30 @@ -1696,7 +1696,7 @@
    5.31  #ifdef _LP64
    5.32          __ cmpptr(k_RInfo, Address(klass_RInfo, k->super_check_offset()));
    5.33  #else
    5.34 -        __ cmpoop(Address(klass_RInfo, k->super_check_offset()), k->encoding());
    5.35 +        __ cmpoop(Address(klass_RInfo, k->super_check_offset()), k->constant_encoding());
    5.36  #endif // _LP64
    5.37          if (sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes() != k->super_check_offset()) {
    5.38            __ jcc(Assembler::notEqual, *stub->entry());
    5.39 @@ -1707,7 +1707,7 @@
    5.40  #ifdef _LP64
    5.41            __ cmpptr(klass_RInfo, k_RInfo);
    5.42  #else
    5.43 -          __ cmpoop(klass_RInfo, k->encoding());
    5.44 +          __ cmpoop(klass_RInfo, k->constant_encoding());
    5.45  #endif // _LP64
    5.46            __ jcc(Assembler::equal, done);
    5.47  
    5.48 @@ -1715,7 +1715,7 @@
    5.49  #ifdef _LP64
    5.50            __ push(k_RInfo);
    5.51  #else
    5.52 -          __ pushoop(k->encoding());
    5.53 +          __ pushoop(k->constant_encoding());
    5.54  #endif // _LP64
    5.55            __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
    5.56            __ pop(klass_RInfo);
    5.57 @@ -1763,7 +1763,7 @@
    5.58      if (!k->is_loaded()) {
    5.59        jobject2reg_with_patching(k_RInfo, op->info_for_patch());
    5.60      } else {
    5.61 -      LP64_ONLY(__ movoop(k_RInfo, k->encoding()));
    5.62 +      LP64_ONLY(__ movoop(k_RInfo, k->constant_encoding()));
    5.63      }
    5.64      assert(obj != k_RInfo, "must be different");
    5.65  
    5.66 @@ -1774,7 +1774,7 @@
    5.67        // get object class
    5.68        // not a safepoint as obj null check happens earlier
    5.69        if (LP64_ONLY(false &&) k->is_loaded()) {
    5.70 -        NOT_LP64(__ cmpoop(Address(obj, oopDesc::klass_offset_in_bytes()), k->encoding()));
    5.71 +        NOT_LP64(__ cmpoop(Address(obj, oopDesc::klass_offset_in_bytes()), k->constant_encoding()));
    5.72          k_RInfo = noreg;
    5.73        } else {
    5.74          __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
    5.75 @@ -1791,14 +1791,14 @@
    5.76  #ifndef _LP64
    5.77        if (k->is_loaded()) {
    5.78          // See if we get an immediate positive hit
    5.79 -        __ cmpoop(Address(klass_RInfo, k->super_check_offset()), k->encoding());
    5.80 +        __ cmpoop(Address(klass_RInfo, k->super_check_offset()), k->constant_encoding());
    5.81          __ jcc(Assembler::equal, one);
    5.82          if (sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes() == k->super_check_offset()) {
    5.83            // check for self
    5.84 -          __ cmpoop(klass_RInfo, k->encoding());
    5.85 +          __ cmpoop(klass_RInfo, k->constant_encoding());
    5.86            __ jcc(Assembler::equal, one);
    5.87            __ push(klass_RInfo);
    5.88 -          __ pushoop(k->encoding());
    5.89 +          __ pushoop(k->constant_encoding());
    5.90            __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
    5.91            __ pop(klass_RInfo);
    5.92            __ pop(dst);
    5.93 @@ -3112,7 +3112,7 @@
    5.94      // subtype which we can't check or src is the same array as dst
    5.95      // but not necessarily exactly of type default_type.
    5.96      Label known_ok, halt;
    5.97 -    __ movoop(tmp, default_type->encoding());
    5.98 +    __ movoop(tmp, default_type->constant_encoding());
    5.99      if (basic_type != T_OBJECT) {
   5.100        __ cmpptr(tmp, dst_klass_addr);
   5.101        __ jcc(Assembler::notEqual, halt);
   5.102 @@ -3200,7 +3200,7 @@
   5.103    assert(data->is_CounterData(), "need CounterData for calls");
   5.104    assert(op->mdo()->is_single_cpu(),  "mdo must be allocated");
   5.105    Register mdo  = op->mdo()->as_register();
   5.106 -  __ movoop(mdo, md->encoding());
   5.107 +  __ movoop(mdo, md->constant_encoding());
   5.108    Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
   5.109    __ addl(counter_addr, DataLayout::counter_increment);
   5.110    Bytecodes::Code bc = method->java_code_at_bci(bci);
   5.111 @@ -3240,7 +3240,7 @@
   5.112          ciKlass* receiver = vc_data->receiver(i);
   5.113          if (receiver == NULL) {
   5.114            Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)));
   5.115 -          __ movoop(recv_addr, known_klass->encoding());
   5.116 +          __ movoop(recv_addr, known_klass->constant_encoding());
   5.117            Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
   5.118            __ addl(data_addr, DataLayout::counter_increment);
   5.119            return;
     6.1 --- a/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp	Tue Sep 15 11:09:34 2009 -0700
     6.2 +++ b/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp	Tue Sep 15 21:53:47 2009 -0700
     6.3 @@ -994,7 +994,7 @@
     6.4    LIR_Opr len = length.result();
     6.5    BasicType elem_type = x->elt_type();
     6.6  
     6.7 -  __ oop2reg(ciTypeArrayKlass::make(elem_type)->encoding(), klass_reg);
     6.8 +  __ oop2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg);
     6.9  
    6.10    CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info);
    6.11    __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path);
     7.1 --- a/src/cpu/x86/vm/x86_32.ad	Tue Sep 15 11:09:34 2009 -0700
     7.2 +++ b/src/cpu/x86/vm/x86_32.ad	Tue Sep 15 21:53:47 2009 -0700
     7.3 @@ -379,7 +379,7 @@
     7.4          int format) {
     7.5  #ifdef ASSERT
     7.6    if (rspec.reloc()->type() == relocInfo::oop_type && d32 != 0 && d32 != (int)Universe::non_oop_word()) {
     7.7 -    assert(oop(d32)->is_oop() && oop(d32)->is_perm(), "cannot embed non-perm oops in code");
     7.8 +    assert(oop(d32)->is_oop() && (ScavengeRootsInCode || !oop(d32)->is_scavengable()), "cannot embed scavengable oops in code");
     7.9    }
    7.10  #endif
    7.11    cbuf.relocate(cbuf.inst_mark(), rspec, format);
     8.1 --- a/src/cpu/x86/vm/x86_64.ad	Tue Sep 15 11:09:34 2009 -0700
     8.2 +++ b/src/cpu/x86/vm/x86_64.ad	Tue Sep 15 21:53:47 2009 -0700
     8.3 @@ -683,7 +683,7 @@
     8.4  #ifdef ASSERT
     8.5    if (rspec.reloc()->type() == relocInfo::oop_type &&
     8.6        d32 != 0 && d32 != (intptr_t) Universe::non_oop_word()) {
     8.7 -    assert(oop((intptr_t)d32)->is_oop() && oop((intptr_t)d32)->is_perm(), "cannot embed non-perm oops in code");
     8.8 +    assert(oop((intptr_t)d32)->is_oop() && (ScavengeRootsInCode || !oop((intptr_t)d32)->is_scavengable()), "cannot embed scavengable oops in code");
     8.9    }
    8.10  #endif
    8.11    cbuf.relocate(cbuf.inst_mark(), rspec, format);
    8.12 @@ -721,8 +721,8 @@
    8.13  #ifdef ASSERT
    8.14    if (rspec.reloc()->type() == relocInfo::oop_type &&
    8.15        d64 != 0 && d64 != (int64_t) Universe::non_oop_word()) {
    8.16 -    assert(oop(d64)->is_oop() && oop(d64)->is_perm(),
    8.17 -           "cannot embed non-perm oops in code");
    8.18 +    assert(oop(d64)->is_oop() && (ScavengeRootsInCode || !oop(d64)->is_scavengable()),
    8.19 +           "cannot embed scavengable oops in code");
    8.20    }
    8.21  #endif
    8.22    cbuf.relocate(cbuf.inst_mark(), rspec, format);
     9.1 --- a/src/share/vm/c1/c1_GraphBuilder.cpp	Tue Sep 15 11:09:34 2009 -0700
     9.2 +++ b/src/share/vm/c1/c1_GraphBuilder.cpp	Tue Sep 15 21:53:47 2009 -0700
     9.3 @@ -1442,7 +1442,7 @@
     9.4          switch (field_type) {
     9.5          case T_ARRAY:
     9.6          case T_OBJECT:
     9.7 -          if (field_val.as_object()->has_encoding()) {
     9.8 +          if (field_val.as_object()->should_be_constant()) {
     9.9              constant =  new Constant(as_ValueType(field_val));
    9.10            }
    9.11            break;
    10.1 --- a/src/share/vm/c1/c1_InstructionPrinter.cpp	Tue Sep 15 11:09:34 2009 -0700
    10.2 +++ b/src/share/vm/c1/c1_InstructionPrinter.cpp	Tue Sep 15 21:53:47 2009 -0700
    10.3 @@ -133,12 +133,12 @@
    10.4        ciMethod* m = (ciMethod*)value;
    10.5        output()->print("<method %s.%s>", m->holder()->name()->as_utf8(), m->name()->as_utf8());
    10.6      } else {
    10.7 -      output()->print("<object 0x%x>", value->encoding());
    10.8 +      output()->print("<object 0x%x>", value->constant_encoding());
    10.9      }
   10.10    } else if (type->as_InstanceConstant() != NULL) {
   10.11 -    output()->print("<instance 0x%x>", type->as_InstanceConstant()->value()->encoding());
   10.12 +    output()->print("<instance 0x%x>", type->as_InstanceConstant()->value()->constant_encoding());
   10.13    } else if (type->as_ArrayConstant() != NULL) {
   10.14 -    output()->print("<array 0x%x>", type->as_ArrayConstant()->value()->encoding());
   10.15 +    output()->print("<array 0x%x>", type->as_ArrayConstant()->value()->constant_encoding());
   10.16    } else if (type->as_ClassConstant() != NULL) {
   10.17      ciInstanceKlass* klass = type->as_ClassConstant()->value();
   10.18      if (!klass->is_loaded()) {
    11.1 --- a/src/share/vm/c1/c1_LIRGenerator.cpp	Tue Sep 15 11:09:34 2009 -0700
    11.2 +++ b/src/share/vm/c1/c1_LIRGenerator.cpp	Tue Sep 15 21:53:47 2009 -0700
    11.3 @@ -440,7 +440,7 @@
    11.4      __ oop2reg_patch(NULL, r, info);
    11.5    } else {
    11.6      // no patching needed
    11.7 -    __ oop2reg(obj->encoding(), r);
    11.8 +    __ oop2reg(obj->constant_encoding(), r);
    11.9    }
   11.10  }
   11.11  
   11.12 @@ -831,7 +831,7 @@
   11.13      int taken_count_offset     = md->byte_offset_of_slot(data, BranchData::taken_offset());
   11.14      int not_taken_count_offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset());
   11.15      LIR_Opr md_reg = new_register(T_OBJECT);
   11.16 -    __ move(LIR_OprFact::oopConst(md->encoding()), md_reg);
   11.17 +    __ move(LIR_OprFact::oopConst(md->constant_encoding()), md_reg);
   11.18      LIR_Opr data_offset_reg = new_register(T_INT);
   11.19      __ cmove(lir_cond(cond),
   11.20               LIR_OprFact::intConst(taken_count_offset),
   11.21 @@ -1071,7 +1071,7 @@
   11.22      LIR_OprList* args = new LIR_OprList();
   11.23      args->append(getThreadPointer());
   11.24      LIR_Opr meth = new_register(T_OBJECT);
   11.25 -    __ oop2reg(method()->encoding(), meth);
   11.26 +    __ oop2reg(method()->constant_encoding(), meth);
   11.27      args->append(meth);
   11.28      call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), voidType, NULL);
   11.29    }
   11.30 @@ -1784,7 +1784,7 @@
   11.31      LIR_OprList* args = new LIR_OprList();
   11.32      args->append(getThreadPointer());
   11.33      LIR_Opr meth = new_register(T_OBJECT);
   11.34 -    __ oop2reg(method()->encoding(), meth);
   11.35 +    __ oop2reg(method()->constant_encoding(), meth);
   11.36      args->append(meth);
   11.37      call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), voidType, NULL);
   11.38    }
   11.39 @@ -2207,7 +2207,7 @@
   11.40      LIR_OprList* args = new LIR_OprList();
   11.41      args->append(getThreadPointer());
   11.42      LIR_Opr meth = new_register(T_OBJECT);
   11.43 -    __ oop2reg(method()->encoding(), meth);
   11.44 +    __ oop2reg(method()->constant_encoding(), meth);
   11.45      args->append(meth);
   11.46      call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), voidType, NULL);
   11.47    }
   11.48 @@ -2216,7 +2216,7 @@
   11.49      LIR_Opr obj;
   11.50      if (method()->is_static()) {
   11.51        obj = new_register(T_OBJECT);
   11.52 -      __ oop2reg(method()->holder()->java_mirror()->encoding(), obj);
   11.53 +      __ oop2reg(method()->holder()->java_mirror()->constant_encoding(), obj);
   11.54      } else {
   11.55        Local* receiver = x->state()->local_at(0)->as_Local();
   11.56        assert(receiver != NULL, "must already exist");
   11.57 @@ -2660,7 +2660,7 @@
   11.58      }
   11.59  
   11.60      LIR_Opr meth = new_register(T_OBJECT);
   11.61 -    __ oop2reg(method()->encoding(), meth);
   11.62 +    __ oop2reg(method()->constant_encoding(), meth);
   11.63      LIR_Opr result = increment_and_return_counter(meth, offset, InvocationCounter::count_increment);
   11.64      __ cmp(lir_cond_aboveEqual, result, LIR_OprFact::intConst(limit));
   11.65      CodeStub* overflow = new CounterOverflowStub(info, info->bci());
    12.1 --- a/src/share/vm/c1/c1_ValueType.cpp	Tue Sep 15 11:09:34 2009 -0700
    12.2 +++ b/src/share/vm/c1/c1_ValueType.cpp	Tue Sep 15 21:53:47 2009 -0700
    12.3 @@ -86,7 +86,7 @@
    12.4  
    12.5  jobject ObjectType::encoding() const {
    12.6    assert(is_constant(), "must be");
    12.7 -  return constant_value()->encoding();
    12.8 +  return constant_value()->constant_encoding();
    12.9  }
   12.10  
   12.11  bool ObjectType::is_loaded() const {
    13.1 --- a/src/share/vm/ci/ciEnv.cpp	Tue Sep 15 11:09:34 2009 -0700
    13.2 +++ b/src/share/vm/ci/ciEnv.cpp	Tue Sep 15 21:53:47 2009 -0700
    13.3 @@ -257,7 +257,7 @@
    13.4  
    13.5  // ------------------------------------------------------------------
    13.6  // ciEnv::make_array
    13.7 -ciArray* ciEnv::make_array(GrowableArray<ciObject*>* objects) {
    13.8 +ciArray* ciEnv::make_system_array(GrowableArray<ciObject*>* objects) {
    13.9    VM_ENTRY_MARK;
   13.10    int length = objects->length();
   13.11    objArrayOop a = oopFactory::new_system_objArray(length, THREAD);
    14.1 --- a/src/share/vm/ci/ciEnv.hpp	Tue Sep 15 11:09:34 2009 -0700
    14.2 +++ b/src/share/vm/ci/ciEnv.hpp	Tue Sep 15 21:53:47 2009 -0700
    14.3 @@ -339,8 +339,8 @@
    14.4    // but consider adding to vmSymbols.hpp instead.
    14.5  
    14.6    // Use this to make a holder for non-perm compile time constants.
    14.7 -  // The resulting array is guaranteed to satisfy "has_encoding".
    14.8 -  ciArray*  make_array(GrowableArray<ciObject*>* objects);
    14.9 +  // The resulting array is guaranteed to satisfy "can_be_constant".
   14.10 +  ciArray*  make_system_array(GrowableArray<ciObject*>* objects);
   14.11  
   14.12    // converts the ciKlass* representing the holder of a method into a
   14.13    // ciInstanceKlass*.  This is needed since the holder of a method in
    15.1 --- a/src/share/vm/ci/ciObject.cpp	Tue Sep 15 11:09:34 2009 -0700
    15.2 +++ b/src/share/vm/ci/ciObject.cpp	Tue Sep 15 21:53:47 2009 -0700
    15.3 @@ -55,6 +55,7 @@
    15.4    }
    15.5    _klass = NULL;
    15.6    _ident = 0;
    15.7 +  init_flags_from(o);
    15.8  }
    15.9  
   15.10  // ------------------------------------------------------------------
   15.11 @@ -69,6 +70,7 @@
   15.12    }
   15.13    _klass = NULL;
   15.14    _ident = 0;
   15.15 +  init_flags_from(h());
   15.16  }
   15.17  
   15.18  // ------------------------------------------------------------------
   15.19 @@ -158,7 +160,7 @@
   15.20  }
   15.21  
   15.22  // ------------------------------------------------------------------
   15.23 -// ciObject::encoding
   15.24 +// ciObject::constant_encoding
   15.25  //
   15.26  // The address which the compiler should embed into the
   15.27  // generated code to represent this oop.  This address
   15.28 @@ -172,16 +174,24 @@
   15.29  //
   15.30  // This method should be changed to return an generified address
   15.31  // to discourage use of the JNI handle.
   15.32 -jobject ciObject::encoding() {
   15.33 +jobject ciObject::constant_encoding() {
   15.34    assert(is_null_object() || handle() != NULL, "cannot embed null pointer");
   15.35 -  assert(has_encoding(), "oop must be NULL or perm");
   15.36 +  assert(can_be_constant(), "oop must be NULL or perm");
   15.37    return handle();
   15.38  }
   15.39  
   15.40  // ------------------------------------------------------------------
   15.41 -// ciObject::has_encoding
   15.42 -bool ciObject::has_encoding() {
   15.43 -  return handle() == NULL || is_perm();
   15.44 +// ciObject::can_be_constant
   15.45 +bool ciObject::can_be_constant() {
   15.46 +  if (ScavengeRootsInCode >= 1)  return true;  // now everybody can encode as a constant
   15.47 +  return handle() == NULL || !is_scavengable();
   15.48 +}
   15.49 +
   15.50 +// ------------------------------------------------------------------
   15.51 +// ciObject::should_be_constant()
   15.52 +bool ciObject::should_be_constant() {
   15.53 +  if (ScavengeRootsInCode >= 2)  return true;  // force everybody to be a constant
   15.54 +  return handle() == NULL || !is_scavengable();
   15.55  }
   15.56  
   15.57  
   15.58 @@ -195,8 +205,9 @@
   15.59  void ciObject::print(outputStream* st) {
   15.60    st->print("<%s", type_string());
   15.61    GUARDED_VM_ENTRY(print_impl(st);)
   15.62 -  st->print(" ident=%d %s address=0x%x>", ident(),
   15.63 +  st->print(" ident=%d %s%s address=0x%x>", ident(),
   15.64          is_perm() ? "PERM" : "",
   15.65 +        is_scavengable() ? "SCAVENGABLE" : "",
   15.66          (address)this);
   15.67  }
   15.68  
    16.1 --- a/src/share/vm/ci/ciObject.hpp	Tue Sep 15 11:09:34 2009 -0700
    16.2 +++ b/src/share/vm/ci/ciObject.hpp	Tue Sep 15 21:53:47 2009 -0700
    16.3 @@ -51,9 +51,10 @@
    16.4    ciKlass* _klass;
    16.5    uint     _ident;
    16.6  
    16.7 -  enum { FLAG_BITS   = 1};
    16.8 +  enum { FLAG_BITS   = 2 };
    16.9    enum {
   16.10 -         PERM_FLAG    = 1
   16.11 +         PERM_FLAG        = 1,
   16.12 +         SCAVENGABLE_FLAG = 2
   16.13         };
   16.14  protected:
   16.15    ciObject();
   16.16 @@ -68,8 +69,15 @@
   16.17      return JNIHandles::resolve_non_null(_handle);
   16.18    }
   16.19  
   16.20 -  void set_perm() {
   16.21 -    _ident |=  PERM_FLAG;
   16.22 +  void init_flags_from(oop x) {
   16.23 +    int flags = 0;
   16.24 +    if (x != NULL) {
   16.25 +      if (x->is_perm())
   16.26 +        flags |= PERM_FLAG;
   16.27 +      if (x->is_scavengable())
   16.28 +        flags |= SCAVENGABLE_FLAG;
   16.29 +    }
   16.30 +    _ident |= flags;
   16.31    }
   16.32  
   16.33    // Virtual behavior of the print() method.
   16.34 @@ -91,17 +99,27 @@
   16.35    // A hash value for the convenience of compilers.
   16.36    int hash();
   16.37  
   16.38 -  // Tells if this oop has an encoding.  (I.e., is it null or perm?)
   16.39 +  // Tells if this oop has an encoding as a constant.
   16.40 +  // True if is_scavengable is false.
   16.41 +  // Also true if ScavengeRootsInCode is non-zero.
   16.42    // If it does not have an encoding, the compiler is responsible for
   16.43    // making other arrangements for dealing with the oop.
   16.44 -  // See ciEnv::make_perm_array
   16.45 -  bool has_encoding();
   16.46 +  // See ciEnv::make_array
   16.47 +  bool can_be_constant();
   16.48 +
   16.49 +  // Tells if this oop should be made a constant.
   16.50 +  // True if is_scavengable is false or ScavengeRootsInCode > 1.
   16.51 +  bool should_be_constant();
   16.52  
   16.53    // Is this object guaranteed to be in the permanent part of the heap?
   16.54    // If so, CollectedHeap::can_elide_permanent_oop_store_barriers is relevant.
   16.55    // If the answer is false, no guarantees are made.
   16.56    bool is_perm() { return (_ident & PERM_FLAG) != 0; }
   16.57  
   16.58 +  // Might this object possibly move during a scavenge operation?
   16.59 +  // If the answer is true and ScavengeRootsInCode==0, the oop cannot be embedded in code.
   16.60 +  bool is_scavengable() { return (_ident & SCAVENGABLE_FLAG) != 0; }
   16.61 +
   16.62    // The address which the compiler should embed into the
   16.63    // generated code to represent this oop.  This address
   16.64    // is not the true address of the oop -- it will get patched
   16.65 @@ -109,7 +127,7 @@
   16.66    //
   16.67    // Usage note: no address arithmetic allowed.  Oop must
   16.68    // be registered with the oopRecorder.
   16.69 -  jobject encoding();
   16.70 +  jobject constant_encoding();
   16.71  
   16.72    // What kind of ciObject is this?
   16.73    virtual bool is_null_object() const       { return false; }
    17.1 --- a/src/share/vm/ci/ciObjectFactory.cpp	Tue Sep 15 11:09:34 2009 -0700
    17.2 +++ b/src/share/vm/ci/ciObjectFactory.cpp	Tue Sep 15 21:53:47 2009 -0700
    17.3 @@ -261,12 +261,11 @@
    17.4      ciObject* new_object = create_new_object(keyHandle());
    17.5      assert(keyHandle() == new_object->get_oop(), "must be properly recorded");
    17.6      init_ident_of(new_object);
    17.7 -    if (!keyHandle->is_perm()) {
    17.8 +    if (!new_object->is_perm()) {
    17.9        // Not a perm-space object.
   17.10        insert_non_perm(bucket, keyHandle(), new_object);
   17.11        return new_object;
   17.12      }
   17.13 -    new_object->set_perm();
   17.14      if (len != _ci_objects->length()) {
   17.15        // creating the new object has recursively entered new objects
   17.16        // into the table.  We need to recompute our index.
    18.1 --- a/src/share/vm/classfile/systemDictionary.cpp	Tue Sep 15 11:09:34 2009 -0700
    18.2 +++ b/src/share/vm/classfile/systemDictionary.cpp	Tue Sep 15 21:53:47 2009 -0700
    18.3 @@ -2414,6 +2414,8 @@
    18.4                           vmSymbols::makeSite_name(), vmSymbols::makeSite_signature(),
    18.5                           &args, CHECK_(empty));
    18.6    oop call_site_oop = (oop) result.get_jobject();
    18.7 +  assert(call_site_oop->is_oop()
    18.8 +         /*&& sun_dyn_CallSiteImpl::is_instance(call_site_oop)*/, "must be sane");
    18.9    sun_dyn_CallSiteImpl::set_vmmethod(call_site_oop, mh_invdyn());
   18.10    if (TraceMethodHandles) {
   18.11      tty->print_cr("Linked invokedynamic bci=%d site="INTPTR_FORMAT":", caller_bci, call_site_oop);
   18.12 @@ -2450,6 +2452,8 @@
   18.13    oop boot_method_oop = (oop) result.get_jobject();
   18.14  
   18.15    if (boot_method_oop != NULL) {
   18.16 +    assert(boot_method_oop->is_oop()
   18.17 +           && java_dyn_MethodHandle::is_instance(boot_method_oop), "must be sane");
   18.18      // probably no race conditions, but let's be careful:
   18.19      if (Atomic::cmpxchg_ptr(boot_method_oop, ik->adr_bootstrap_method(), NULL) == NULL)
   18.20        ik->set_bootstrap_method(boot_method_oop);
    19.1 --- a/src/share/vm/code/codeBlob.hpp	Tue Sep 15 11:09:34 2009 -0700
    19.2 +++ b/src/share/vm/code/codeBlob.hpp	Tue Sep 15 21:53:47 2009 -0700
    19.3 @@ -175,6 +175,8 @@
    19.4                              OopClosure* keep_alive,
    19.5                              bool unloading_occurred);
    19.6    virtual void oops_do(OopClosure* f) = 0;
    19.7 +  // (All CodeBlob subtypes other than NMethod currently have
    19.8 +  // an empty oops_do() method.
    19.9  
   19.10    // OopMap for frame
   19.11    OopMapSet* oop_maps() const                    { return _oop_maps; }
    20.1 --- a/src/share/vm/code/codeCache.cpp	Tue Sep 15 11:09:34 2009 -0700
    20.2 +++ b/src/share/vm/code/codeCache.cpp	Tue Sep 15 21:53:47 2009 -0700
    20.3 @@ -95,6 +95,7 @@
    20.4  int CodeCache::_number_of_blobs = 0;
    20.5  int CodeCache::_number_of_nmethods_with_dependencies = 0;
    20.6  bool CodeCache::_needs_cache_clean = false;
    20.7 +nmethod* CodeCache::_scavenge_root_nmethods = NULL;
    20.8  
    20.9  
   20.10  CodeBlob* CodeCache::first() {
   20.11 @@ -148,10 +149,7 @@
   20.12      }
   20.13    }
   20.14    verify_if_often();
   20.15 -  if (PrintCodeCache2) {        // Need to add a new flag
   20.16 -      ResourceMark rm;
   20.17 -      tty->print_cr("CodeCache allocation:  addr: " INTPTR_FORMAT ", size: 0x%x\n", cb, size);
   20.18 -  }
   20.19 +  print_trace("allocation", cb, size);
   20.20    return cb;
   20.21  }
   20.22  
   20.23 @@ -159,10 +157,7 @@
   20.24    assert_locked_or_safepoint(CodeCache_lock);
   20.25    verify_if_often();
   20.26  
   20.27 -  if (PrintCodeCache2) {        // Need to add a new flag
   20.28 -      ResourceMark rm;
   20.29 -      tty->print_cr("CodeCache free:  addr: " INTPTR_FORMAT ", size: 0x%x\n", cb, cb->size());
   20.30 -  }
   20.31 +  print_trace("free", cb);
   20.32    if (cb->is_nmethod() && ((nmethod *)cb)->has_dependencies()) {
   20.33      _number_of_nmethods_with_dependencies--;
   20.34    }
   20.35 @@ -260,14 +255,148 @@
   20.36    }
   20.37  }
   20.38  
   20.39 -void CodeCache::oops_do(OopClosure* f) {
   20.40 +void CodeCache::blobs_do(CodeBlobClosure* f) {
   20.41    assert_locked_or_safepoint(CodeCache_lock);
   20.42    FOR_ALL_ALIVE_BLOBS(cb) {
   20.43 -    cb->oops_do(f);
   20.44 +    f->do_code_blob(cb);
   20.45 +
   20.46 +#ifdef ASSERT
   20.47 +    if (cb->is_nmethod())
   20.48 +      ((nmethod*)cb)->verify_scavenge_root_oops();
   20.49 +#endif //ASSERT
   20.50    }
   20.51  }
   20.52  
   20.53 +// Walk the list of methods which might contain non-perm oops.
   20.54 +void CodeCache::scavenge_root_nmethods_do(CodeBlobClosure* f) {
   20.55 +  assert_locked_or_safepoint(CodeCache_lock);
   20.56 +  debug_only(mark_scavenge_root_nmethods());
   20.57 +
   20.58 +  for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
   20.59 +    debug_only(cur->clear_scavenge_root_marked());
   20.60 +    assert(cur->scavenge_root_not_marked(), "");
   20.61 +    assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
   20.62 +
   20.63 +    bool is_live = (!cur->is_zombie() && !cur->is_unloaded());
   20.64 +#ifndef PRODUCT
   20.65 +    if (TraceScavenge) {
   20.66 +      cur->print_on(tty, is_live ? "scavenge root" : "dead scavenge root"); tty->cr();
   20.67 +    }
   20.68 +#endif //PRODUCT
   20.69 +    if (is_live)
   20.70 +      // Perform cur->oops_do(f), maybe just once per nmethod.
   20.71 +      f->do_code_blob(cur);
   20.72 +  }
   20.73 +
   20.74 +  // Check for stray marks.
   20.75 +  debug_only(verify_perm_nmethods(NULL));
   20.76 +}
   20.77 +
   20.78 +void CodeCache::add_scavenge_root_nmethod(nmethod* nm) {
   20.79 +  assert_locked_or_safepoint(CodeCache_lock);
   20.80 +  nm->set_on_scavenge_root_list();
   20.81 +  nm->set_scavenge_root_link(_scavenge_root_nmethods);
   20.82 +  set_scavenge_root_nmethods(nm);
   20.83 +  print_trace("add_scavenge_root", nm);
   20.84 +}
   20.85 +
   20.86 +void CodeCache::drop_scavenge_root_nmethod(nmethod* nm) {
   20.87 +  assert_locked_or_safepoint(CodeCache_lock);
   20.88 +  print_trace("drop_scavenge_root", nm);
   20.89 +  nmethod* last = NULL;
   20.90 +  nmethod* cur = scavenge_root_nmethods();
   20.91 +  while (cur != NULL) {
   20.92 +    nmethod* next = cur->scavenge_root_link();
   20.93 +    if (cur == nm) {
   20.94 +      if (last != NULL)
   20.95 +            last->set_scavenge_root_link(next);
   20.96 +      else  set_scavenge_root_nmethods(next);
   20.97 +      nm->set_scavenge_root_link(NULL);
   20.98 +      nm->clear_on_scavenge_root_list();
   20.99 +      return;
  20.100 +    }
  20.101 +    last = cur;
  20.102 +    cur = next;
  20.103 +  }
  20.104 +  assert(false, "should have been on list");
  20.105 +}
  20.106 +
  20.107 +void CodeCache::prune_scavenge_root_nmethods() {
  20.108 +  assert_locked_or_safepoint(CodeCache_lock);
  20.109 +  debug_only(mark_scavenge_root_nmethods());
  20.110 +
  20.111 +  nmethod* last = NULL;
  20.112 +  nmethod* cur = scavenge_root_nmethods();
  20.113 +  while (cur != NULL) {
  20.114 +    nmethod* next = cur->scavenge_root_link();
  20.115 +    debug_only(cur->clear_scavenge_root_marked());
  20.116 +    assert(cur->scavenge_root_not_marked(), "");
  20.117 +    assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
  20.118 +
  20.119 +    if (!cur->is_zombie() && !cur->is_unloaded()
  20.120 +        && cur->detect_scavenge_root_oops()) {
  20.121 +      // Keep it.  Advance 'last' to prevent deletion.
  20.122 +      last = cur;
  20.123 +    } else {
  20.124 +      // Prune it from the list, so we don't have to look at it any more.
  20.125 +      print_trace("prune_scavenge_root", cur);
  20.126 +      cur->set_scavenge_root_link(NULL);
  20.127 +      cur->clear_on_scavenge_root_list();
  20.128 +      if (last != NULL)
  20.129 +            last->set_scavenge_root_link(next);
  20.130 +      else  set_scavenge_root_nmethods(next);
  20.131 +    }
  20.132 +    cur = next;
  20.133 +  }
  20.134 +
  20.135 +  // Check for stray marks.
  20.136 +  debug_only(verify_perm_nmethods(NULL));
  20.137 +}
  20.138 +
  20.139 +#ifndef PRODUCT
  20.140 +void CodeCache::asserted_non_scavengable_nmethods_do(CodeBlobClosure* f) {
  20.141 +  // While we are here, verify the integrity of the list.
  20.142 +  mark_scavenge_root_nmethods();
  20.143 +  for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
  20.144 +    assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
  20.145 +    cur->clear_scavenge_root_marked();
  20.146 +  }
  20.147 +  verify_perm_nmethods(f);
  20.148 +}
  20.149 +
  20.150 +// Temporarily mark nmethods that are claimed to be on the non-perm list.
  20.151 +void CodeCache::mark_scavenge_root_nmethods() {
  20.152 +  FOR_ALL_ALIVE_BLOBS(cb) {
  20.153 +    if (cb->is_nmethod()) {
  20.154 +      nmethod *nm = (nmethod*)cb;
  20.155 +      assert(nm->scavenge_root_not_marked(), "clean state");
  20.156 +      if (nm->on_scavenge_root_list())
  20.157 +        nm->set_scavenge_root_marked();
  20.158 +    }
  20.159 +  }
  20.160 +}
  20.161 +
  20.162 +// If the closure is given, run it on the unlisted nmethods.
  20.163 +// Also make sure that the effects of mark_scavenge_root_nmethods is gone.
  20.164 +void CodeCache::verify_perm_nmethods(CodeBlobClosure* f_or_null) {
  20.165 +  FOR_ALL_ALIVE_BLOBS(cb) {
  20.166 +    bool call_f = (f_or_null != NULL);
  20.167 +    if (cb->is_nmethod()) {
  20.168 +      nmethod *nm = (nmethod*)cb;
  20.169 +      assert(nm->scavenge_root_not_marked(), "must be already processed");
  20.170 +      if (nm->on_scavenge_root_list())
  20.171 +        call_f = false;  // don't show this one to the client
  20.172 +      nm->verify_scavenge_root_oops();
  20.173 +    } else {
  20.174 +      call_f = false;   // not an nmethod
  20.175 +    }
  20.176 +    if (call_f)  f_or_null->do_code_blob(cb);
  20.177 +  }
  20.178 +}
  20.179 +#endif //PRODUCT
  20.180 +
  20.181  void CodeCache::gc_prologue() {
  20.182 +  assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_epilogue must be called");
  20.183  }
  20.184  
  20.185  
  20.186 @@ -285,6 +414,8 @@
  20.187      cb->fix_oop_relocations();
  20.188    }
  20.189    set_needs_cache_clean(false);
  20.190 +  prune_scavenge_root_nmethods();
  20.191 +  assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_prologue must be called");
  20.192  }
  20.193  
  20.194  
  20.195 @@ -508,6 +639,14 @@
  20.196    }
  20.197  }
  20.198  
  20.199 +void CodeCache::print_trace(const char* event, CodeBlob* cb, int size) {
  20.200 +  if (PrintCodeCache2) {  // Need to add a new flag
  20.201 +    ResourceMark rm;
  20.202 +    if (size == 0)  size = cb->size();
  20.203 +    tty->print_cr("CodeCache %s:  addr: " INTPTR_FORMAT ", size: 0x%x", event, cb, size);
  20.204 +  }
  20.205 +}
  20.206 +
  20.207  void CodeCache::print_internals() {
  20.208    int nmethodCount = 0;
  20.209    int runtimeStubCount = 0;
    21.1 --- a/src/share/vm/code/codeCache.hpp	Tue Sep 15 11:09:34 2009 -0700
    21.2 +++ b/src/share/vm/code/codeCache.hpp	Tue Sep 15 21:53:47 2009 -0700
    21.3 @@ -45,8 +45,13 @@
    21.4    static int _number_of_blobs;
    21.5    static int _number_of_nmethods_with_dependencies;
    21.6    static bool _needs_cache_clean;
    21.7 +  static nmethod* _scavenge_root_nmethods;  // linked via nm->scavenge_root_link()
    21.8  
    21.9    static void verify_if_often() PRODUCT_RETURN;
   21.10 +
   21.11 +  static void mark_scavenge_root_nmethods() PRODUCT_RETURN;
   21.12 +  static void verify_perm_nmethods(CodeBlobClosure* f_or_null) PRODUCT_RETURN;
   21.13 +
   21.14   public:
   21.15  
   21.16    // Initialization
   21.17 @@ -61,6 +66,7 @@
   21.18    static void flush();                              // flushes all CodeBlobs
   21.19    static bool contains(void *p);                    // returns whether p is included
   21.20    static void blobs_do(void f(CodeBlob* cb));       // iterates over all CodeBlobs
   21.21 +  static void blobs_do(CodeBlobClosure* f);         // iterates over all CodeBlobs
   21.22    static void nmethods_do(void f(nmethod* nm));     // iterates over all nmethods
   21.23  
   21.24    // Lookup
   21.25 @@ -106,12 +112,24 @@
   21.26    static void do_unloading(BoolObjectClosure* is_alive,
   21.27                             OopClosure* keep_alive,
   21.28                             bool unloading_occurred);
   21.29 -  static void oops_do(OopClosure* f);
   21.30 +  static void oops_do(OopClosure* f) {
   21.31 +    CodeBlobToOopClosure oopc(f, /*do_marking=*/ false);
   21.32 +    blobs_do(&oopc);
   21.33 +  }
   21.34 +  static void asserted_non_scavengable_nmethods_do(CodeBlobClosure* f = NULL) PRODUCT_RETURN;
   21.35 +  static void scavenge_root_nmethods_do(CodeBlobClosure* f);
   21.36 +
   21.37 +  static nmethod* scavenge_root_nmethods()          { return _scavenge_root_nmethods; }
   21.38 +  static void set_scavenge_root_nmethods(nmethod* nm) { _scavenge_root_nmethods = nm; }
   21.39 +  static void add_scavenge_root_nmethod(nmethod* nm);
   21.40 +  static void drop_scavenge_root_nmethod(nmethod* nm);
   21.41 +  static void prune_scavenge_root_nmethods();
   21.42  
   21.43    // Printing/debugging
   21.44    static void print()   PRODUCT_RETURN;          // prints summary
   21.45    static void print_internals();
   21.46    static void verify();                          // verifies the code cache
   21.47 +  static void print_trace(const char* event, CodeBlob* cb, int size = 0) PRODUCT_RETURN;
   21.48  
   21.49    // The full limits of the codeCache
   21.50    static address  low_bound()                    { return (address) _heap->low_boundary(); }
    22.1 --- a/src/share/vm/code/debugInfoRec.cpp	Tue Sep 15 11:09:34 2009 -0700
    22.2 +++ b/src/share/vm/code/debugInfoRec.cpp	Tue Sep 15 21:53:47 2009 -0700
    22.3 @@ -299,7 +299,7 @@
    22.4    stream()->write_int(sender_stream_offset);
    22.5  
    22.6    // serialize scope
    22.7 -  jobject method_enc = (method == NULL)? NULL: method->encoding();
    22.8 +  jobject method_enc = (method == NULL)? NULL: method->constant_encoding();
    22.9    stream()->write_int(oop_recorder()->find_index(method_enc));
   22.10    stream()->write_bci(bci);
   22.11    assert(method == NULL ||
    23.1 --- a/src/share/vm/code/dependencies.cpp	Tue Sep 15 11:09:34 2009 -0700
    23.2 +++ b/src/share/vm/code/dependencies.cpp	Tue Sep 15 21:53:47 2009 -0700
    23.3 @@ -302,7 +302,7 @@
    23.4        bytes.write_byte(code_byte);
    23.5        for (int j = 0; j < stride; j++) {
    23.6          if (j == skipj)  continue;
    23.7 -        bytes.write_int(_oop_recorder->find_index(deps->at(i+j)->encoding()));
    23.8 +        bytes.write_int(_oop_recorder->find_index(deps->at(i+j)->constant_encoding()));
    23.9        }
   23.10      }
   23.11    }
    24.1 --- a/src/share/vm/code/nmethod.cpp	Tue Sep 15 11:09:34 2009 -0700
    24.2 +++ b/src/share/vm/code/nmethod.cpp	Tue Sep 15 21:53:47 2009 -0700
    24.3 @@ -581,10 +581,13 @@
    24.4      debug_only(No_Safepoint_Verifier nsv;)
    24.5      assert_locked_or_safepoint(CodeCache_lock);
    24.6  
    24.7 -    NOT_PRODUCT(_has_debug_info = false; )
    24.8 +    NOT_PRODUCT(_has_debug_info = false);
    24.9 +    _oops_do_mark_link       = NULL;
   24.10      _method                  = method;
   24.11      _entry_bci               = InvocationEntryBci;
   24.12 -    _link                    = NULL;
   24.13 +    _osr_link                = NULL;
   24.14 +    _scavenge_root_link      = NULL;
   24.15 +    _scavenge_root_state     = 0;
   24.16      _compiler                = NULL;
   24.17      // We have no exception handler or deopt handler make the
   24.18      // values something that will never match a pc like the nmethod vtable entry
   24.19 @@ -618,7 +621,7 @@
   24.20      _stack_traversal_mark    = 0;
   24.21  
   24.22      code_buffer->copy_oops_to(this);
   24.23 -    debug_only(check_store();)
   24.24 +    debug_only(verify_scavenge_root_oops());
   24.25      CodeCache::commit(this);
   24.26      VTune::create_nmethod(this);
   24.27    }
   24.28 @@ -668,10 +671,13 @@
   24.29      debug_only(No_Safepoint_Verifier nsv;)
   24.30      assert_locked_or_safepoint(CodeCache_lock);
   24.31  
   24.32 -    NOT_PRODUCT(_has_debug_info = false; )
   24.33 +    NOT_PRODUCT(_has_debug_info = false);
   24.34 +    _oops_do_mark_link       = NULL;
   24.35      _method                  = method;
   24.36      _entry_bci               = InvocationEntryBci;
   24.37 -    _link                    = NULL;
   24.38 +    _osr_link                = NULL;
   24.39 +    _scavenge_root_link      = NULL;
   24.40 +    _scavenge_root_state     = 0;
   24.41      _compiler                = NULL;
   24.42      // We have no exception handler or deopt handler make the
   24.43      // values something that will never match a pc like the nmethod vtable entry
   24.44 @@ -703,7 +709,7 @@
   24.45      _stack_traversal_mark    = 0;
   24.46  
   24.47      code_buffer->copy_oops_to(this);
   24.48 -    debug_only(check_store();)
   24.49 +    debug_only(verify_scavenge_root_oops());
   24.50      CodeCache::commit(this);
   24.51      VTune::create_nmethod(this);
   24.52    }
   24.53 @@ -770,12 +776,15 @@
   24.54      debug_only(No_Safepoint_Verifier nsv;)
   24.55      assert_locked_or_safepoint(CodeCache_lock);
   24.56  
   24.57 -    NOT_PRODUCT(_has_debug_info = false; )
   24.58 +    NOT_PRODUCT(_has_debug_info = false);
   24.59 +    _oops_do_mark_link       = NULL;
   24.60      _method                  = method;
   24.61      _compile_id              = compile_id;
   24.62      _comp_level              = comp_level;
   24.63      _entry_bci               = entry_bci;
   24.64 -    _link                    = NULL;
   24.65 +    _osr_link                = NULL;
   24.66 +    _scavenge_root_link      = NULL;
   24.67 +    _scavenge_root_state     = 0;
   24.68      _compiler                = compiler;
   24.69      _orig_pc_offset          = orig_pc_offset;
   24.70  #ifdef HAVE_DTRACE_H
   24.71 @@ -813,7 +822,10 @@
   24.72      code_buffer->copy_oops_to(this);
   24.73      debug_info->copy_to(this);
   24.74      dependencies->copy_to(this);
   24.75 -    debug_only(check_store();)
   24.76 +    if (ScavengeRootsInCode && detect_scavenge_root_oops()) {
   24.77 +      CodeCache::add_scavenge_root_nmethod(this);
   24.78 +    }
   24.79 +    debug_only(verify_scavenge_root_oops());
   24.80  
   24.81      CodeCache::commit(this);
   24.82  
   24.83 @@ -902,23 +914,30 @@
   24.84    if (st != NULL) {
   24.85      ttyLocker ttyl;
   24.86      // Print a little tag line that looks like +PrintCompilation output:
   24.87 -    st->print("%3d%c  %s",
   24.88 +    int tlen = (int) strlen(title);
   24.89 +    bool do_nl = false;
   24.90 +    if (tlen > 0 && title[tlen-1] == '\n') { tlen--; do_nl = true; }
   24.91 +    st->print("%3d%c  %.*s",
   24.92                compile_id(),
   24.93                is_osr_method() ? '%' :
   24.94                method() != NULL &&
   24.95                is_native_method() ? 'n' : ' ',
   24.96 -              title);
   24.97 +              tlen, title);
   24.98  #ifdef TIERED
   24.99      st->print(" (%d) ", comp_level());
  24.100  #endif // TIERED
  24.101      if (WizardMode) st->print(" (" INTPTR_FORMAT ")", this);
  24.102 -    if (method() != NULL) {
  24.103 -      method()->print_short_name(st);
  24.104 +    if (Universe::heap()->is_gc_active() && method() != NULL) {
  24.105 +      st->print("(method)");
  24.106 +    } else if (method() != NULL) {
  24.107 +        method()->print_short_name(st);
  24.108        if (is_osr_method())
  24.109          st->print(" @ %d", osr_entry_bci());
  24.110        if (method()->code_size() > 0)
  24.111          st->print(" (%d bytes)", method()->code_size());
  24.112      }
  24.113 +
  24.114 +    if (do_nl)  st->cr();
  24.115    }
  24.116  }
  24.117  
  24.118 @@ -1033,6 +1052,7 @@
  24.119    }
  24.120  }
  24.121  
  24.122 +// This is a private interface with the sweeper.
  24.123  void nmethod::mark_as_seen_on_stack() {
  24.124    assert(is_not_entrant(), "must be a non-entrant method");
  24.125    set_stack_traversal_mark(NMethodSweeper::traversal_count());
  24.126 @@ -1077,7 +1097,8 @@
  24.127                    " unloadable], methodOop(" INTPTR_FORMAT
  24.128                    "), cause(" INTPTR_FORMAT ")",
  24.129                    this, (address)_method, (address)cause);
  24.130 -    cause->klass()->print();
  24.131 +    if (!Universe::heap()->is_gc_active())
  24.132 +      cause->klass()->print();
  24.133    }
  24.134    // If _method is already NULL the methodOop is about to be unloaded,
  24.135    // so we don't have to break the cycle. Note that it is possible to
  24.136 @@ -1105,7 +1126,8 @@
  24.137    // The methodOop is gone at this point
  24.138    assert(_method == NULL, "Tautology");
  24.139  
  24.140 -  set_link(NULL);
  24.141 +  set_osr_link(NULL);
  24.142 +  //set_scavenge_root_link(NULL); // done by prune_scavenge_root_nmethods
  24.143    NMethodSweeper::notify(this);
  24.144  }
  24.145  
  24.146 @@ -1291,6 +1313,10 @@
  24.147      ec = next;
  24.148    }
  24.149  
  24.150 +  if (on_scavenge_root_list()) {
  24.151 +    CodeCache::drop_scavenge_root_nmethod(this);
  24.152 +  }
  24.153 +
  24.154    ((CodeBlob*)(this))->flush();
  24.155  
  24.156    CodeCache::free(this);
  24.157 @@ -1350,7 +1376,10 @@
  24.158        return false;
  24.159      }
  24.160    }
  24.161 -  assert(unloading_occurred, "Inconsistency in unloading");
  24.162 +  // If ScavengeRootsInCode is true, an nmethod might be unloaded
  24.163 +  // simply because one of its constant oops has gone dead.
  24.164 +  // No actual classes need to be unloaded in order for this to occur.
  24.165 +  assert(unloading_occurred || ScavengeRootsInCode, "Inconsistency in unloading");
  24.166    make_unloaded(is_alive, obj);
  24.167    return true;
  24.168  }
  24.169 @@ -1558,12 +1587,108 @@
  24.170    }
  24.171  
  24.172    // Scopes
  24.173 +  // This includes oop constants not inlined in the code stream.
  24.174    for (oop* p = oops_begin(); p < oops_end(); p++) {
  24.175      if (*p == Universe::non_oop_word())  continue;  // skip non-oops
  24.176      f->do_oop(p);
  24.177    }
  24.178  }
  24.179  
  24.180 +#define NMETHOD_SENTINEL ((nmethod*)badAddress)
  24.181 +
  24.182 +nmethod* volatile nmethod::_oops_do_mark_nmethods;
  24.183 +
  24.184 +// An nmethod is "marked" if its _mark_link is set non-null.
  24.185 +// Even if it is the end of the linked list, it will have a non-null link value,
  24.186 +// as long as it is on the list.
  24.187 +// This code must be MP safe, because it is used from parallel GC passes.
  24.188 +bool nmethod::test_set_oops_do_mark() {
  24.189 +  assert(nmethod::oops_do_marking_is_active(), "oops_do_marking_prologue must be called");
  24.190 +  nmethod* observed_mark_link = _oops_do_mark_link;
  24.191 +  if (observed_mark_link == NULL) {
  24.192 +    // Claim this nmethod for this thread to mark.
  24.193 +    observed_mark_link = (nmethod*)
  24.194 +      Atomic::cmpxchg_ptr(NMETHOD_SENTINEL, &_oops_do_mark_link, NULL);
  24.195 +    if (observed_mark_link == NULL) {
  24.196 +
  24.197 +      // Atomically append this nmethod (now claimed) to the head of the list:
  24.198 +      nmethod* observed_mark_nmethods = _oops_do_mark_nmethods;
  24.199 +      for (;;) {
  24.200 +        nmethod* required_mark_nmethods = observed_mark_nmethods;
  24.201 +        _oops_do_mark_link = required_mark_nmethods;
  24.202 +        observed_mark_nmethods = (nmethod*)
  24.203 +          Atomic::cmpxchg_ptr(this, &_oops_do_mark_nmethods, required_mark_nmethods);
  24.204 +        if (observed_mark_nmethods == required_mark_nmethods)
  24.205 +          break;
  24.206 +      }
  24.207 +      // Mark was clear when we first saw this guy.
  24.208 +      NOT_PRODUCT(if (TraceScavenge)  print_on(tty, "oops_do, mark\n"));
  24.209 +      return false;
  24.210 +    }
  24.211 +  }
  24.212 +  // On fall through, another racing thread marked this nmethod before we did.
  24.213 +  return true;
  24.214 +}
  24.215 +
  24.216 +void nmethod::oops_do_marking_prologue() {
  24.217 +  NOT_PRODUCT(if (TraceScavenge)  tty->print_cr("[oops_do_marking_prologue"));
  24.218 +  assert(_oops_do_mark_nmethods == NULL, "must not call oops_do_marking_prologue twice in a row");
  24.219 +  // We use cmpxchg_ptr instead of regular assignment here because the user
  24.220 +  // may fork a bunch of threads, and we need them all to see the same state.
  24.221 +  void* observed = Atomic::cmpxchg_ptr(NMETHOD_SENTINEL, &_oops_do_mark_nmethods, NULL);
  24.222 +  guarantee(observed == NULL, "no races in this sequential code");
  24.223 +}
  24.224 +
  24.225 +void nmethod::oops_do_marking_epilogue() {
  24.226 +  assert(_oops_do_mark_nmethods != NULL, "must not call oops_do_marking_epilogue twice in a row");
  24.227 +  nmethod* cur = _oops_do_mark_nmethods;
  24.228 +  while (cur != NMETHOD_SENTINEL) {
  24.229 +    assert(cur != NULL, "not NULL-terminated");
  24.230 +    nmethod* next = cur->_oops_do_mark_link;
  24.231 +    cur->_oops_do_mark_link = NULL;
  24.232 +    NOT_PRODUCT(if (TraceScavenge)  cur->print_on(tty, "oops_do, unmark\n"));
  24.233 +    cur = next;
  24.234 +  }
  24.235 +  void* required = _oops_do_mark_nmethods;
  24.236 +  void* observed = Atomic::cmpxchg_ptr(NULL, &_oops_do_mark_nmethods, required);
  24.237 +  guarantee(observed == required, "no races in this sequential code");
  24.238 +  NOT_PRODUCT(if (TraceScavenge)  tty->print_cr("oops_do_marking_epilogue]"));
  24.239 +}
  24.240 +
  24.241 +class DetectScavengeRoot: public OopClosure {
  24.242 +  bool     _detected_scavenge_root;
  24.243 +public:
  24.244 +  DetectScavengeRoot() : _detected_scavenge_root(false)
  24.245 +  { NOT_PRODUCT(_print_nm = NULL); }
  24.246 +  bool detected_scavenge_root() { return _detected_scavenge_root; }
  24.247 +  virtual void do_oop(oop* p) {
  24.248 +    if ((*p) != NULL && (*p)->is_scavengable()) {
  24.249 +      NOT_PRODUCT(maybe_print(p));
  24.250 +      _detected_scavenge_root = true;
  24.251 +    }
  24.252 +  }
  24.253 +  virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
  24.254 +
  24.255 +#ifndef PRODUCT
  24.256 +  nmethod* _print_nm;
  24.257 +  void maybe_print(oop* p) {
  24.258 +    if (_print_nm == NULL)  return;
  24.259 +    if (!_detected_scavenge_root)  _print_nm->print_on(tty, "new scavenge root");
  24.260 +    tty->print_cr(""PTR_FORMAT"[offset=%d] detected non-perm oop "PTR_FORMAT" (found at "PTR_FORMAT")",
  24.261 +                  _print_nm, (int)((intptr_t)p - (intptr_t)_print_nm),
  24.262 +                  (intptr_t)(*p), (intptr_t)p);
  24.263 +    (*p)->print();
  24.264 +  }
  24.265 +#endif //PRODUCT
  24.266 +};
  24.267 +
  24.268 +bool nmethod::detect_scavenge_root_oops() {
  24.269 +  DetectScavengeRoot detect_scavenge_root;
  24.270 +  NOT_PRODUCT(if (TraceScavenge)  detect_scavenge_root._print_nm = this);
  24.271 +  oops_do(&detect_scavenge_root);
  24.272 +  return detect_scavenge_root.detected_scavenge_root();
  24.273 +}
  24.274 +
  24.275  // Method that knows how to preserve outgoing arguments at call. This method must be
  24.276  // called with a frame corresponding to a Java invoke
  24.277  void nmethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) {
  24.278 @@ -1878,6 +2003,24 @@
  24.279  // -----------------------------------------------------------------------------
  24.280  // Verification
  24.281  
  24.282 +class VerifyOopsClosure: public OopClosure {
  24.283 +  nmethod* _nm;
  24.284 +  bool     _ok;
  24.285 +public:
  24.286 +  VerifyOopsClosure(nmethod* nm) : _nm(nm), _ok(true) { }
  24.287 +  bool ok() { return _ok; }
  24.288 +  virtual void do_oop(oop* p) {
  24.289 +    if ((*p) == NULL || (*p)->is_oop())  return;
  24.290 +    if (_ok) {
  24.291 +      _nm->print_nmethod(true);
  24.292 +      _ok = false;
  24.293 +    }
  24.294 +    tty->print_cr("*** non-oop "PTR_FORMAT" found at "PTR_FORMAT" (offset %d)",
  24.295 +                  (intptr_t)(*p), (intptr_t)p, (int)((intptr_t)p - (intptr_t)_nm));
  24.296 +  }
  24.297 +  virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
  24.298 +};
  24.299 +
  24.300  void nmethod::verify() {
  24.301  
  24.302    // Hmm. OSR methods can be deopted but not marked as zombie or not_entrant
  24.303 @@ -1911,6 +2054,11 @@
  24.304      }
  24.305    }
  24.306  
  24.307 +  VerifyOopsClosure voc(this);
  24.308 +  oops_do(&voc);
  24.309 +  assert(voc.ok(), "embedded oops must be OK");
  24.310 +  verify_scavenge_root_oops();
  24.311 +
  24.312    verify_scopes();
  24.313  }
  24.314  
  24.315 @@ -1974,19 +2122,34 @@
  24.316  // Non-product code
  24.317  #ifndef PRODUCT
  24.318  
  24.319 -void nmethod::check_store() {
  24.320 -  // Make sure all oops in the compiled code are tenured
  24.321 +class DebugScavengeRoot: public OopClosure {
  24.322 +  nmethod* _nm;
  24.323 +  bool     _ok;
  24.324 +public:
  24.325 +  DebugScavengeRoot(nmethod* nm) : _nm(nm), _ok(true) { }
  24.326 +  bool ok() { return _ok; }
  24.327 +  virtual void do_oop(oop* p) {
  24.328 +    if ((*p) == NULL || !(*p)->is_scavengable())  return;
  24.329 +    if (_ok) {
  24.330 +      _nm->print_nmethod(true);
  24.331 +      _ok = false;
  24.332 +    }
  24.333 +    tty->print_cr("*** non-perm oop "PTR_FORMAT" found at "PTR_FORMAT" (offset %d)",
  24.334 +                  (intptr_t)(*p), (intptr_t)p, (int)((intptr_t)p - (intptr_t)_nm));
  24.335 +    (*p)->print();
  24.336 +  }
  24.337 +  virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
  24.338 +};
  24.339  
  24.340 -  RelocIterator iter(this);
  24.341 -  while (iter.next()) {
  24.342 -    if (iter.type() == relocInfo::oop_type) {
  24.343 -      oop_Relocation* reloc = iter.oop_reloc();
  24.344 -      oop obj = reloc->oop_value();
  24.345 -      if (obj != NULL && !obj->is_perm()) {
  24.346 -        fatal("must be permanent oop in compiled code");
  24.347 -      }
  24.348 -    }
  24.349 +void nmethod::verify_scavenge_root_oops() {
  24.350 +  if (!on_scavenge_root_list()) {
  24.351 +    // Actually look inside, to verify the claim that it's clean.
  24.352 +    DebugScavengeRoot debug_scavenge_root(this);
  24.353 +    oops_do(&debug_scavenge_root);
  24.354 +    if (!debug_scavenge_root.ok())
  24.355 +      fatal("found an unadvertised bad non-perm oop in the code cache");
  24.356    }
  24.357 +  assert(scavenge_root_not_marked(), "");
  24.358  }
  24.359  
  24.360  #endif // PRODUCT
  24.361 @@ -2019,6 +2182,7 @@
  24.362      if (is_not_entrant()) tty->print("not_entrant ");
  24.363      if (is_zombie())      tty->print("zombie ");
  24.364      if (is_unloaded())    tty->print("unloaded ");
  24.365 +    if (on_scavenge_root_list())  tty->print("scavenge_root ");
  24.366      tty->print_cr("}:");
  24.367    }
  24.368    if (size              () > 0) tty->print_cr(" total in heap  [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
    25.1 --- a/src/share/vm/code/nmethod.hpp	Tue Sep 15 11:09:34 2009 -0700
    25.2 +++ b/src/share/vm/code/nmethod.hpp	Tue Sep 15 21:53:47 2009 -0700
    25.3 @@ -125,6 +125,7 @@
    25.4  class nmethod : public CodeBlob {
    25.5    friend class VMStructs;
    25.6    friend class NMethodSweeper;
    25.7 +  friend class CodeCache;  // non-perm oops
    25.8   private:
    25.9    // Shared fields for all nmethod's
   25.10    static int _zombie_instruction_size;
   25.11 @@ -132,7 +133,12 @@
   25.12    methodOop _method;
   25.13    int       _entry_bci;        // != InvocationEntryBci if this nmethod is an on-stack replacement method
   25.14  
   25.15 -  nmethod*  _link;             // To support simple linked-list chaining of nmethods
   25.16 +  // To support simple linked-list chaining of nmethods:
   25.17 +  nmethod*  _osr_link;         // from instanceKlass::osr_nmethods_head
   25.18 +  nmethod*  _scavenge_root_link; // from CodeCache::scavenge_root_nmethods
   25.19 +
   25.20 +  static nmethod* volatile _oops_do_mark_nmethods;
   25.21 +  nmethod*        volatile _oops_do_mark_link;
   25.22  
   25.23    AbstractCompiler* _compiler; // The compiler which compiled this nmethod
   25.24  
   25.25 @@ -174,6 +180,8 @@
   25.26    // used by jvmti to track if an unload event has been posted for this nmethod.
   25.27    bool _unload_reported;
   25.28  
   25.29 +  jbyte _scavenge_root_state;
   25.30 +
   25.31    NOT_PRODUCT(bool _has_debug_info; )
   25.32  
   25.33    // Nmethod Flushing lock (if non-zero, then the nmethod is not removed)
   25.34 @@ -242,7 +250,6 @@
   25.35  
   25.36    // helper methods
   25.37    void* operator new(size_t size, int nmethod_size);
   25.38 -  void check_store();
   25.39  
   25.40    const char* reloc_string_for(u_char* begin, u_char* end);
   25.41    void make_not_entrant_or_zombie(int state);
   25.42 @@ -407,6 +414,24 @@
   25.43    int   version() const                           { return flags.version; }
   25.44    void  set_version(int v);
   25.45  
   25.46 +  // Non-perm oop support
   25.47 +  bool  on_scavenge_root_list() const                  { return (_scavenge_root_state & 1) != 0; }
   25.48 + protected:
   25.49 +  enum { npl_on_list = 0x01, npl_marked = 0x10 };
   25.50 +  void  set_on_scavenge_root_list()                    { _scavenge_root_state = npl_on_list; }
   25.51 +  void  clear_on_scavenge_root_list()                  { _scavenge_root_state = 0; }
   25.52 +  // assertion-checking and pruning logic uses the bits of _scavenge_root_state
   25.53 +#ifndef PRODUCT
   25.54 +  void  set_scavenge_root_marked()                     { _scavenge_root_state |= npl_marked; }
   25.55 +  void  clear_scavenge_root_marked()                   { _scavenge_root_state &= ~npl_marked; }
   25.56 +  bool  scavenge_root_not_marked()                     { return (_scavenge_root_state &~ npl_on_list) == 0; }
   25.57 +  // N.B. there is no positive marked query, and we only use the not_marked query for asserts.
   25.58 +#endif //PRODUCT
   25.59 +  nmethod* scavenge_root_link() const                  { return _scavenge_root_link; }
   25.60 +  void     set_scavenge_root_link(nmethod *n)          { _scavenge_root_link = n; }
   25.61 +
   25.62 + public:
   25.63 +
   25.64    // Sweeper support
   25.65    long  stack_traversal_mark()                    { return _stack_traversal_mark; }
   25.66    void  set_stack_traversal_mark(long l)          { _stack_traversal_mark = l; }
   25.67 @@ -425,8 +450,8 @@
   25.68    int   osr_entry_bci() const                     { assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod"); return _entry_bci; }
   25.69    address  osr_entry() const                      { assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod"); return _osr_entry_point; }
   25.70    void  invalidate_osr_method();
   25.71 -  nmethod* link() const                           { return _link; }
   25.72 -  void     set_link(nmethod *n)                   { _link = n; }
   25.73 +  nmethod* osr_link() const                       { return _osr_link; }
   25.74 +  void     set_osr_link(nmethod *n)               { _osr_link = n; }
   25.75  
   25.76    // tells whether frames described by this nmethod can be deoptimized
   25.77    // note: native wrappers cannot be deoptimized.
   25.78 @@ -467,6 +492,14 @@
   25.79    void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map,
   25.80                                       OopClosure* f);
   25.81    void oops_do(OopClosure* f);
   25.82 +  bool detect_scavenge_root_oops();
   25.83 +  void verify_scavenge_root_oops() PRODUCT_RETURN;
   25.84 +
   25.85 +  bool test_set_oops_do_mark();
   25.86 +  static void oops_do_marking_prologue();
   25.87 +  static void oops_do_marking_epilogue();
   25.88 +  static bool oops_do_marking_is_active() { return _oops_do_mark_nmethods != NULL; }
   25.89 +  DEBUG_ONLY(bool test_oops_do_mark() { return _oops_do_mark_link != NULL; })
   25.90  
   25.91    // ScopeDesc for an instruction
   25.92    ScopeDesc* scope_desc_at(address pc);
    26.1 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp	Tue Sep 15 11:09:34 2009 -0700
    26.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp	Tue Sep 15 21:53:47 2009 -0700
    26.3 @@ -53,14 +53,12 @@
    26.4   public:
    26.5    MarkRefsIntoClosure(MemRegion span, CMSBitMap* bitMap,
    26.6                        bool should_do_nmethods);
    26.7 +  bool should_do_nmethods() { return _should_do_nmethods; }
    26.8    virtual void do_oop(oop* p);
    26.9    virtual void do_oop(narrowOop* p);
   26.10    inline void do_oop_nv(oop* p)       { MarkRefsIntoClosure::do_oop_work(p); }
   26.11    inline void do_oop_nv(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); }
   26.12    bool do_header() { return true; }
   26.13 -  virtual const bool do_nmethods() const {
   26.14 -    return _should_do_nmethods;
   26.15 -  }
   26.16    Prefetch::style prefetch_style() {
   26.17      return Prefetch::do_read;
   26.18    }
   26.19 @@ -79,14 +77,12 @@
   26.20   public:
   26.21    MarkRefsIntoVerifyClosure(MemRegion span, CMSBitMap* verification_bm,
   26.22                              CMSBitMap* cms_bm, bool should_do_nmethods);
   26.23 +  bool should_do_nmethods() { return _should_do_nmethods; }
   26.24    virtual void do_oop(oop* p);
   26.25    virtual void do_oop(narrowOop* p);
   26.26    inline void do_oop_nv(oop* p)       { MarkRefsIntoVerifyClosure::do_oop_work(p); }
   26.27    inline void do_oop_nv(narrowOop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
   26.28    bool do_header() { return true; }
   26.29 -  virtual const bool do_nmethods() const {
   26.30 -    return _should_do_nmethods;
   26.31 -  }
   26.32    Prefetch::style prefetch_style() {
   26.33      return Prefetch::do_read;
   26.34    }
   26.35 @@ -194,7 +190,6 @@
   26.36    inline void do_oop_nv(oop* p)       { MarkRefsIntoAndScanClosure::do_oop_work(p); }
   26.37    inline void do_oop_nv(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
   26.38    bool do_header() { return true; }
   26.39 -  virtual const bool do_nmethods() const { return true; }
   26.40    Prefetch::style prefetch_style() {
   26.41      return Prefetch::do_read;
   26.42    }
    27.1 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Tue Sep 15 11:09:34 2009 -0700
    27.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Tue Sep 15 21:53:47 2009 -0700
    27.3 @@ -2857,9 +2857,12 @@
    27.4  
    27.5    gch->gen_process_strong_roots(_cmsGen->level(),
    27.6                                  true,   // younger gens are roots
    27.7 +                                true,   // activate StrongRootsScope
    27.8                                  true,   // collecting perm gen
    27.9                                  SharedHeap::ScanningOption(roots_scanning_options()),
   27.10 -                                NULL, &notOlder);
   27.11 +                                &notOlder,
   27.12 +                                true,   // walk code active on stacks
   27.13 +                                NULL);
   27.14  
   27.15    // Now mark from the roots
   27.16    assert(_revisitStack.isEmpty(), "Should be empty");
   27.17 @@ -2905,9 +2908,12 @@
   27.18    gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
   27.19    gch->gen_process_strong_roots(_cmsGen->level(),
   27.20                                  true,   // younger gens are roots
   27.21 +                                true,   // activate StrongRootsScope
   27.22                                  true,   // collecting perm gen
   27.23                                  SharedHeap::ScanningOption(roots_scanning_options()),
   27.24 -                                NULL, &notOlder);
   27.25 +                                &notOlder,
   27.26 +                                true,   // walk code active on stacks
   27.27 +                                NULL);
   27.28  
   27.29    // Now mark from the roots
   27.30    assert(_revisitStack.isEmpty(), "Should be empty");
   27.31 @@ -3503,9 +3509,12 @@
   27.32      gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
   27.33      gch->gen_process_strong_roots(_cmsGen->level(),
   27.34                                    true,   // younger gens are roots
   27.35 +                                  true,   // activate StrongRootsScope
   27.36                                    true,   // collecting perm gen
   27.37                                    SharedHeap::ScanningOption(roots_scanning_options()),
   27.38 -                                  NULL, &notOlder);
   27.39 +                                  &notOlder,
   27.40 +                                  true,   // walk all of code cache if (so & SO_CodeCache)
   27.41 +                                  NULL);
   27.42    }
   27.43  
   27.44    // Clear mod-union table; it will be dirtied in the prologue of
   27.45 @@ -5015,9 +5024,15 @@
   27.46    _timer.start();
   27.47    gch->gen_process_strong_roots(_collector->_cmsGen->level(),
   27.48                                  false,     // yg was scanned above
   27.49 +                                false,     // this is parallel code
   27.50                                  true,      // collecting perm gen
   27.51                                  SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
   27.52 -                                NULL, &par_mrias_cl);
   27.53 +                                &par_mrias_cl,
   27.54 +                                true,   // walk all of code cache if (so & SO_CodeCache)
   27.55 +                                NULL);
   27.56 +  assert(_collector->should_unload_classes()
   27.57 +         || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_CodeCache),
   27.58 +         "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
   27.59    _timer.stop();
   27.60    if (PrintCMSStatistics != 0) {
   27.61      gclog_or_tty->print_cr(
   27.62 @@ -5398,7 +5413,6 @@
   27.63  
   27.64    // Set up for parallel process_strong_roots work.
   27.65    gch->set_par_threads(n_workers);
   27.66 -  gch->change_strong_roots_parity();
   27.67    // We won't be iterating over the cards in the card table updating
   27.68    // the younger_gen cards, so we shouldn't call the following else
   27.69    // the verification code as well as subsequent younger_refs_iterate
   27.70 @@ -5429,8 +5443,10 @@
   27.71    if (n_workers > 1) {
   27.72      // Make refs discovery MT-safe
   27.73      ReferenceProcessorMTMutator mt(ref_processor(), true);
   27.74 +    GenCollectedHeap::StrongRootsScope srs(gch);
   27.75      workers->run_task(&tsk);
   27.76    } else {
   27.77 +    GenCollectedHeap::StrongRootsScope srs(gch);
   27.78      tsk.work(0);
   27.79    }
   27.80    gch->set_par_threads(0);  // 0 ==> non-parallel.
   27.81 @@ -5514,11 +5530,18 @@
   27.82      verify_work_stacks_empty();
   27.83  
   27.84      gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
   27.85 +    GenCollectedHeap::StrongRootsScope srs(gch);
   27.86      gch->gen_process_strong_roots(_cmsGen->level(),
   27.87                                    true,  // younger gens as roots
   27.88 +                                  false, // use the local StrongRootsScope
   27.89                                    true,  // collecting perm gen
   27.90                                    SharedHeap::ScanningOption(roots_scanning_options()),
   27.91 -                                  NULL, &mrias_cl);
   27.92 +                                  &mrias_cl,
   27.93 +                                  true,   // walk code active on stacks
   27.94 +                                  NULL);
   27.95 +    assert(should_unload_classes()
   27.96 +           || (roots_scanning_options() & SharedHeap::SO_CodeCache),
   27.97 +           "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
   27.98    }
   27.99    verify_work_stacks_empty();
  27.100    // Restore evacuated mark words, if any, used for overflow list links
    28.1 --- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Tue Sep 15 11:09:34 2009 -0700
    28.2 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Tue Sep 15 21:53:47 2009 -0700
    28.3 @@ -747,10 +747,11 @@
    28.4    // clear the mark bitmap (no grey objects to start with)
    28.5    _nextMarkBitMap->clearAll();
    28.6    PrintReachableClosure prcl(_nextMarkBitMap);
    28.7 -  g1h->process_strong_roots(
    28.8 +  g1h->process_strong_roots(true,    // activate StrongRootsScope
    28.9                              false,   // fake perm gen collection
   28.10                              SharedHeap::SO_AllClasses,
   28.11                              &prcl, // Regular roots
   28.12 +                            NULL,  // do not visit active blobs
   28.13                              &prcl    // Perm Gen Roots
   28.14                              );
   28.15    // The root iteration above "consumed" dirty cards in the perm gen.
   28.16 @@ -866,9 +867,11 @@
   28.17    g1h->set_marking_started();
   28.18    g1h->rem_set()->prepare_for_younger_refs_iterate(false);
   28.19  
   28.20 -  g1h->process_strong_roots(false,   // fake perm gen collection
   28.21 +  g1h->process_strong_roots(true,    // activate StrongRootsScope
   28.22 +                            false,   // fake perm gen collection
   28.23                              SharedHeap::SO_AllClasses,
   28.24                              &notOlder, // Regular roots
   28.25 +                            NULL,     // do not visit active blobs
   28.26                              &older    // Perm Gen Roots
   28.27                              );
   28.28    checkpointRootsInitialPost();
   28.29 @@ -1963,7 +1966,7 @@
   28.30    g1h->ensure_parsability(false);
   28.31  
   28.32    if (ParallelGCThreads > 0) {
   28.33 -    g1h->change_strong_roots_parity();
   28.34 +    G1CollectedHeap::StrongRootsScope srs(g1h);
   28.35      // this is remark, so we'll use up all available threads
   28.36      int active_workers = ParallelGCThreads;
   28.37      set_phase(active_workers, false);
   28.38 @@ -1980,7 +1983,7 @@
   28.39      SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
   28.40      guarantee( satb_mq_set.completed_buffers_num() == 0, "invariant" );
   28.41    } else {
   28.42 -    g1h->change_strong_roots_parity();
   28.43 +    G1CollectedHeap::StrongRootsScope srs(g1h);
   28.44      // this is remark, so we'll use up all available threads
   28.45      int active_workers = 1;
   28.46      set_phase(active_workers, false);
    29.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Tue Sep 15 11:09:34 2009 -0700
    29.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Tue Sep 15 21:53:47 2009 -0700
    29.3 @@ -2299,9 +2299,12 @@
    29.4    if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
    29.5      if (!silent) { gclog_or_tty->print("roots "); }
    29.6      VerifyRootsClosure rootsCl(use_prev_marking);
    29.7 -    process_strong_roots(false,
    29.8 +    CodeBlobToOopClosure blobsCl(&rootsCl, /*do_marking=*/ false);
    29.9 +    process_strong_roots(true,  // activate StrongRootsScope
   29.10 +                         false,
   29.11                           SharedHeap::SO_AllClasses,
   29.12                           &rootsCl,
   29.13 +                         &blobsCl,
   29.14                           &rootsCl);
   29.15      rem_set()->invalidate(perm_gen()->used_region(), false);
   29.16      if (!silent) { gclog_or_tty->print("heapRegions "); }
   29.17 @@ -3992,8 +3995,14 @@
   29.18    BufferingOopsInGenClosure buf_scan_perm(scan_perm);
   29.19    buf_scan_perm.set_generation(perm_gen());
   29.20  
   29.21 -  process_strong_roots(collecting_perm_gen, so,
   29.22 +  // Walk the code cache w/o buffering, because StarTask cannot handle
   29.23 +  // unaligned oop locations.
   29.24 +  CodeBlobToOopClosure eager_scan_code_roots(scan_non_heap_roots, /*do_marking=*/ true);
   29.25 +
   29.26 +  process_strong_roots(false, // no scoping; this is parallel code
   29.27 +                       collecting_perm_gen, so,
   29.28                         &buf_scan_non_heap_roots,
   29.29 +                       &eager_scan_code_roots,
   29.30                         &buf_scan_perm);
   29.31    // Finish up any enqueued closure apps.
   29.32    buf_scan_non_heap_roots.done();
   29.33 @@ -4083,7 +4092,8 @@
   29.34  void
   29.35  G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure,
   29.36                                         OopClosure* non_root_closure) {
   29.37 -  SharedHeap::process_weak_roots(root_closure, non_root_closure);
   29.38 +  CodeBlobToOopClosure roots_in_blobs(root_closure, /*do_marking=*/ false);
   29.39 +  SharedHeap::process_weak_roots(root_closure, &roots_in_blobs, non_root_closure);
   29.40  }
   29.41  
   29.42  
   29.43 @@ -4117,15 +4127,16 @@
   29.44  
   29.45    init_for_evac_failure(NULL);
   29.46  
   29.47 -  change_strong_roots_parity();  // In preparation for parallel strong roots.
   29.48    rem_set()->prepare_for_younger_refs_iterate(true);
   29.49  
   29.50    assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
   29.51    double start_par = os::elapsedTime();
   29.52    if (ParallelGCThreads > 0) {
   29.53      // The individual threads will set their evac-failure closures.
   29.54 +    StrongRootsScope srs(this);
   29.55      workers()->run_task(&g1_par_task);
   29.56    } else {
   29.57 +    StrongRootsScope srs(this);
   29.58      g1_par_task.work(0);
   29.59    }
   29.60  
    30.1 --- a/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Tue Sep 15 11:09:34 2009 -0700
    30.2 +++ b/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Tue Sep 15 21:53:47 2009 -0700
    30.3 @@ -116,9 +116,11 @@
    30.4  
    30.5    SharedHeap* sh = SharedHeap::heap();
    30.6  
    30.7 -  sh->process_strong_roots(true,  // Collecting permanent generation.
    30.8 +  sh->process_strong_roots(true,  // activeate StrongRootsScope
    30.9 +                           true,  // Collecting permanent generation.
   30.10                             SharedHeap::SO_SystemClasses,
   30.11                             &GenMarkSweep::follow_root_closure,
   30.12 +                           &GenMarkSweep::follow_code_root_closure,
   30.13                             &GenMarkSweep::follow_root_closure);
   30.14  
   30.15    // Process reference objects found during marking
   30.16 @@ -276,9 +278,11 @@
   30.17  
   30.18    SharedHeap* sh = SharedHeap::heap();
   30.19  
   30.20 -  sh->process_strong_roots(true,  // Collecting permanent generation.
   30.21 +  sh->process_strong_roots(true,  // activate StrongRootsScope
   30.22 +                           true,  // Collecting permanent generation.
   30.23                             SharedHeap::SO_AllClasses,
   30.24                             &GenMarkSweep::adjust_root_pointer_closure,
   30.25 +                           NULL,  // do not touch code cache here
   30.26                             &GenMarkSweep::adjust_pointer_closure);
   30.27  
   30.28    g1h->ref_processor()->weak_oops_do(&GenMarkSweep::adjust_root_pointer_closure);
    31.1 --- a/src/share/vm/gc_implementation/includeDB_gc_parallelScavenge	Tue Sep 15 11:09:34 2009 -0700
    31.2 +++ b/src/share/vm/gc_implementation/includeDB_gc_parallelScavenge	Tue Sep 15 21:53:47 2009 -0700
    31.3 @@ -372,6 +372,7 @@
    31.4  psScavenge.inline.hpp                   psPromotionManager.hpp
    31.5  psScavenge.inline.hpp                   psScavenge.hpp
    31.6  
    31.7 +pcTasks.cpp                             codeCache.hpp
    31.8  pcTasks.cpp                             collectedHeap.hpp
    31.9  pcTasks.cpp                             fprofiler.hpp
   31.10  pcTasks.cpp                             jniHandles.hpp
   31.11 @@ -391,6 +392,7 @@
   31.12  pcTasks.hpp				psTasks.hpp
   31.13  
   31.14  psTasks.cpp                             cardTableExtension.hpp
   31.15 +psTasks.cpp                             codeCache.hpp
   31.16  psTasks.cpp                             fprofiler.hpp
   31.17  psTasks.cpp                             gcTaskManager.hpp
   31.18  psTasks.cpp                             iterator.hpp
    32.1 --- a/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Tue Sep 15 11:09:34 2009 -0700
    32.2 +++ b/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Tue Sep 15 21:53:47 2009 -0700
    32.3 @@ -480,12 +480,14 @@
    32.4  
    32.5    par_scan_state.start_strong_roots();
    32.6    gch->gen_process_strong_roots(_gen->level(),
    32.7 -                                true, // Process younger gens, if any,
    32.8 -                                      // as strong roots.
    32.9 -                                false,// not collecting perm generation.
   32.10 +                                true,  // Process younger gens, if any,
   32.11 +                                       // as strong roots.
   32.12 +                                false, // no scope; this is parallel code
   32.13 +                                false, // not collecting perm generation.
   32.14                                  SharedHeap::SO_AllClasses,
   32.15 -                                &par_scan_state.older_gen_closure(),
   32.16 -                                &par_scan_state.to_space_root_closure());
   32.17 +                                &par_scan_state.to_space_root_closure(),
   32.18 +                                true,   // walk *all* scavengable nmethods
   32.19 +                                &par_scan_state.older_gen_closure());
   32.20    par_scan_state.end_strong_roots();
   32.21  
   32.22    // "evacuate followers".
   32.23 @@ -799,15 +801,16 @@
   32.24    ParNewGenTask tsk(this, _next_gen, reserved().end(), &thread_state_set);
   32.25    int n_workers = workers->total_workers();
   32.26    gch->set_par_threads(n_workers);
   32.27 -  gch->change_strong_roots_parity();
   32.28    gch->rem_set()->prepare_for_younger_refs_iterate(true);
   32.29    // It turns out that even when we're using 1 thread, doing the work in a
   32.30    // separate thread causes wide variance in run times.  We can't help this
   32.31    // in the multi-threaded case, but we special-case n=1 here to get
   32.32    // repeatable measurements of the 1-thread overhead of the parallel code.
   32.33    if (n_workers > 1) {
   32.34 +    GenCollectedHeap::StrongRootsScope srs(gch);
   32.35      workers->run_task(&tsk);
   32.36    } else {
   32.37 +    GenCollectedHeap::StrongRootsScope srs(gch);
   32.38      tsk.work(0);
   32.39    }
   32.40    thread_state_set.reset();
    33.1 --- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Tue Sep 15 11:09:34 2009 -0700
    33.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Tue Sep 15 21:53:47 2009 -0700
    33.3 @@ -962,6 +962,14 @@
    33.4    _old_gen->resize(desired_free_space);
    33.5  }
    33.6  
    33.7 +ParallelScavengeHeap::ParStrongRootsScope::ParStrongRootsScope() {
    33.8 +  // nothing particular
    33.9 +}
   33.10 +
   33.11 +ParallelScavengeHeap::ParStrongRootsScope::~ParStrongRootsScope() {
   33.12 +  // nothing particular
   33.13 +}
   33.14 +
   33.15  #ifndef PRODUCT
   33.16  void ParallelScavengeHeap::record_gen_tops_before_GC() {
   33.17    if (ZapUnusedHeapArea) {
    34.1 --- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp	Tue Sep 15 11:09:34 2009 -0700
    34.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp	Tue Sep 15 21:53:47 2009 -0700
    34.3 @@ -234,6 +234,13 @@
    34.4  
    34.5    // Mangle the unused parts of all spaces in the heap
    34.6    void gen_mangle_unused_area() PRODUCT_RETURN;
    34.7 +
    34.8 +  // Call these in sequential code around the processing of strong roots.
    34.9 +  class ParStrongRootsScope : public MarkingCodeBlobClosure::MarkScope {
   34.10 +  public:
   34.11 +    ParStrongRootsScope();
   34.12 +    ~ParStrongRootsScope();
   34.13 +  };
   34.14  };
   34.15  
   34.16  inline size_t ParallelScavengeHeap::set_alignment(size_t& var, size_t val)
    35.1 --- a/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp	Tue Sep 15 11:09:34 2009 -0700
    35.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp	Tue Sep 15 21:53:47 2009 -0700
    35.3 @@ -39,12 +39,13 @@
    35.4    ParCompactionManager* cm =
    35.5      ParCompactionManager::gc_thread_compaction_manager(which);
    35.6    PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
    35.7 +  CodeBlobToOopClosure mark_and_push_in_blobs(&mark_and_push_closure, /*do_marking=*/ true);
    35.8  
    35.9    if (_java_thread != NULL)
   35.10 -    _java_thread->oops_do(&mark_and_push_closure);
   35.11 +    _java_thread->oops_do(&mark_and_push_closure, &mark_and_push_in_blobs);
   35.12  
   35.13    if (_vm_thread != NULL)
   35.14 -    _vm_thread->oops_do(&mark_and_push_closure);
   35.15 +    _vm_thread->oops_do(&mark_and_push_closure, &mark_and_push_in_blobs);
   35.16  
   35.17    // Do the real work
   35.18    cm->drain_marking_stacks(&mark_and_push_closure);
   35.19 @@ -79,7 +80,8 @@
   35.20      case threads:
   35.21      {
   35.22        ResourceMark rm;
   35.23 -      Threads::oops_do(&mark_and_push_closure);
   35.24 +      CodeBlobToOopClosure each_active_code_blob(&mark_and_push_closure, /*do_marking=*/ true);
   35.25 +      Threads::oops_do(&mark_and_push_closure, &each_active_code_blob);
   35.26      }
   35.27      break;
   35.28  
   35.29 @@ -107,6 +109,11 @@
   35.30        vmSymbols::oops_do(&mark_and_push_closure);
   35.31        break;
   35.32  
   35.33 +    case code_cache:
   35.34 +      // Do not treat nmethods as strong roots for mark/sweep, since we can unload them.
   35.35 +      //CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure(&mark_and_push_closure));
   35.36 +      break;
   35.37 +
   35.38      default:
   35.39        fatal("Unknown root type");
   35.40    }
    36.1 --- a/src/share/vm/gc_implementation/parallelScavenge/pcTasks.hpp	Tue Sep 15 11:09:34 2009 -0700
    36.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/pcTasks.hpp	Tue Sep 15 21:53:47 2009 -0700
    36.3 @@ -92,7 +92,8 @@
    36.4      jvmti                 = 7,
    36.5      system_dictionary     = 8,
    36.6      vm_symbols            = 9,
    36.7 -    reference_processing  = 10
    36.8 +    reference_processing  = 10,
    36.9 +    code_cache            = 11
   36.10    };
   36.11   private:
   36.12    RootType _root_type;
    37.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Tue Sep 15 11:09:34 2009 -0700
    37.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Tue Sep 15 21:53:47 2009 -0700
    37.3 @@ -507,16 +507,22 @@
    37.4    assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
    37.5  
    37.6    // General strong roots.
    37.7 -  Universe::oops_do(mark_and_push_closure());
    37.8 -  ReferenceProcessor::oops_do(mark_and_push_closure());
    37.9 -  JNIHandles::oops_do(mark_and_push_closure());   // Global (strong) JNI handles
   37.10 -  Threads::oops_do(mark_and_push_closure());
   37.11 -  ObjectSynchronizer::oops_do(mark_and_push_closure());
   37.12 -  FlatProfiler::oops_do(mark_and_push_closure());
   37.13 -  Management::oops_do(mark_and_push_closure());
   37.14 -  JvmtiExport::oops_do(mark_and_push_closure());
   37.15 -  SystemDictionary::always_strong_oops_do(mark_and_push_closure());
   37.16 -  vmSymbols::oops_do(mark_and_push_closure());
   37.17 +  {
   37.18 +    ParallelScavengeHeap::ParStrongRootsScope psrs;
   37.19 +    Universe::oops_do(mark_and_push_closure());
   37.20 +    ReferenceProcessor::oops_do(mark_and_push_closure());
   37.21 +    JNIHandles::oops_do(mark_and_push_closure());   // Global (strong) JNI handles
   37.22 +    CodeBlobToOopClosure each_active_code_blob(mark_and_push_closure(), /*do_marking=*/ true);
   37.23 +    Threads::oops_do(mark_and_push_closure(), &each_active_code_blob);
   37.24 +    ObjectSynchronizer::oops_do(mark_and_push_closure());
   37.25 +    FlatProfiler::oops_do(mark_and_push_closure());
   37.26 +    Management::oops_do(mark_and_push_closure());
   37.27 +    JvmtiExport::oops_do(mark_and_push_closure());
   37.28 +    SystemDictionary::always_strong_oops_do(mark_and_push_closure());
   37.29 +    vmSymbols::oops_do(mark_and_push_closure());
   37.30 +    // Do not treat nmethods as strong roots for mark/sweep, since we can unload them.
   37.31 +    //CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure(mark_and_push_closure()));
   37.32 +  }
   37.33  
   37.34    // Flush marking stack.
   37.35    follow_stack();
   37.36 @@ -609,7 +615,7 @@
   37.37    Universe::oops_do(adjust_root_pointer_closure());
   37.38    ReferenceProcessor::oops_do(adjust_root_pointer_closure());
   37.39    JNIHandles::oops_do(adjust_root_pointer_closure());   // Global (strong) JNI handles
   37.40 -  Threads::oops_do(adjust_root_pointer_closure());
   37.41 +  Threads::oops_do(adjust_root_pointer_closure(), NULL);
   37.42    ObjectSynchronizer::oops_do(adjust_root_pointer_closure());
   37.43    FlatProfiler::oops_do(adjust_root_pointer_closure());
   37.44    Management::oops_do(adjust_root_pointer_closure());
   37.45 @@ -617,6 +623,7 @@
   37.46    // SO_AllClasses
   37.47    SystemDictionary::oops_do(adjust_root_pointer_closure());
   37.48    vmSymbols::oops_do(adjust_root_pointer_closure());
   37.49 +  //CodeCache::scavenge_root_nmethods_oops_do(adjust_root_pointer_closure());
   37.50  
   37.51    // Now adjust pointers in remaining weak roots.  (All of which should
   37.52    // have been cleared if they pointed to non-surviving objects.)
    38.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Tue Sep 15 11:09:34 2009 -0700
    38.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Tue Sep 15 21:53:47 2009 -0700
    38.3 @@ -2322,6 +2322,7 @@
    38.4  
    38.5    {
    38.6      TraceTime tm_m("par mark", print_phases(), true, gclog_or_tty);
    38.7 +    ParallelScavengeHeap::ParStrongRootsScope psrs;
    38.8  
    38.9      GCTaskQueue* q = GCTaskQueue::create();
   38.10  
   38.11 @@ -2335,6 +2336,7 @@
   38.12      q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::system_dictionary));
   38.13      q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::jvmti));
   38.14      q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::vm_symbols));
   38.15 +    q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::code_cache));
   38.16  
   38.17      if (parallel_gc_threads > 1) {
   38.18        for (uint j = 0; j < parallel_gc_threads; j++) {
   38.19 @@ -2405,7 +2407,7 @@
   38.20    Universe::oops_do(adjust_root_pointer_closure());
   38.21    ReferenceProcessor::oops_do(adjust_root_pointer_closure());
   38.22    JNIHandles::oops_do(adjust_root_pointer_closure());   // Global (strong) JNI handles
   38.23 -  Threads::oops_do(adjust_root_pointer_closure());
   38.24 +  Threads::oops_do(adjust_root_pointer_closure(), NULL);
   38.25    ObjectSynchronizer::oops_do(adjust_root_pointer_closure());
   38.26    FlatProfiler::oops_do(adjust_root_pointer_closure());
   38.27    Management::oops_do(adjust_root_pointer_closure());
    39.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp	Tue Sep 15 11:09:34 2009 -0700
    39.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp	Tue Sep 15 21:53:47 2009 -0700
    39.3 @@ -799,8 +799,7 @@
    39.4      FollowRootClosure(ParCompactionManager* cm) : _compaction_manager(cm) { }
    39.5      virtual void do_oop(oop* p);
    39.6      virtual void do_oop(narrowOop* p);
    39.7 -    virtual const bool do_nmethods() const { return true; }
    39.8 -  };
    39.9 + };
   39.10  
   39.11    class FollowStackClosure: public VoidClosure {
   39.12     private:
   39.13 @@ -817,6 +816,8 @@
   39.14      AdjustPointerClosure(bool is_root) : _is_root(is_root) { }
   39.15      virtual void do_oop(oop* p);
   39.16      virtual void do_oop(narrowOop* p);
   39.17 +    // do not walk from thread stacks to the code cache on this phase
   39.18 +    virtual void do_code_blob(CodeBlob* cb) const { }
   39.19    };
   39.20  
   39.21    // Closure for verifying update of pointers.  Does not
   39.22 @@ -1062,7 +1063,6 @@
   39.23      MarkAndPushClosure(ParCompactionManager* cm) : _compaction_manager(cm) { }
   39.24      virtual void do_oop(oop* p);
   39.25      virtual void do_oop(narrowOop* p);
   39.26 -    virtual const bool do_nmethods() const { return true; }
   39.27    };
   39.28  
   39.29    PSParallelCompact();
    40.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Tue Sep 15 11:09:34 2009 -0700
    40.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Tue Sep 15 21:53:47 2009 -0700
    40.3 @@ -358,6 +358,7 @@
    40.4      PSPromotionManager* promotion_manager = PSPromotionManager::vm_thread_promotion_manager();
    40.5      {
    40.6        // TraceTime("Roots");
    40.7 +      ParallelScavengeHeap::ParStrongRootsScope psrs;
    40.8  
    40.9        GCTaskQueue* q = GCTaskQueue::create();
   40.10  
   40.11 @@ -376,6 +377,7 @@
   40.12        q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::management));
   40.13        q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::system_dictionary));
   40.14        q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jvmti));
   40.15 +      q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::code_cache));
   40.16  
   40.17        ParallelTaskTerminator terminator(
   40.18          gc_task_manager()->workers(),
    41.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp	Tue Sep 15 11:09:34 2009 -0700
    41.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp	Tue Sep 15 21:53:47 2009 -0700
    41.3 @@ -66,7 +66,7 @@
    41.4      case threads:
    41.5      {
    41.6        ResourceMark rm;
    41.7 -      Threads::oops_do(&roots_closure);
    41.8 +      Threads::oops_do(&roots_closure, NULL);
    41.9      }
   41.10      break;
   41.11  
   41.12 @@ -90,6 +90,14 @@
   41.13        JvmtiExport::oops_do(&roots_closure);
   41.14        break;
   41.15  
   41.16 +
   41.17 +    case code_cache:
   41.18 +      {
   41.19 +        CodeBlobToOopClosure each_scavengable_code_blob(&roots_closure, /*do_marking=*/ true);
   41.20 +        CodeCache::scavenge_root_nmethods_do(&each_scavengable_code_blob);
   41.21 +      }
   41.22 +      break;
   41.23 +
   41.24      default:
   41.25        fatal("Unknown root type");
   41.26    }
   41.27 @@ -107,12 +115,13 @@
   41.28  
   41.29    PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(which);
   41.30    PSScavengeRootsClosure roots_closure(pm);
   41.31 +  CodeBlobToOopClosure roots_in_blobs(&roots_closure, /*do_marking=*/ true);
   41.32  
   41.33    if (_java_thread != NULL)
   41.34 -    _java_thread->oops_do(&roots_closure);
   41.35 +    _java_thread->oops_do(&roots_closure, &roots_in_blobs);
   41.36  
   41.37    if (_vm_thread != NULL)
   41.38 -    _vm_thread->oops_do(&roots_closure);
   41.39 +    _vm_thread->oops_do(&roots_closure, &roots_in_blobs);
   41.40  
   41.41    // Do the real work
   41.42    pm->drain_stacks(false);
    42.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psTasks.hpp	Tue Sep 15 11:09:34 2009 -0700
    42.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psTasks.hpp	Tue Sep 15 21:53:47 2009 -0700
    42.3 @@ -54,7 +54,8 @@
    42.4      flat_profiler         = 5,
    42.5      system_dictionary     = 6,
    42.6      management            = 7,
    42.7 -    jvmti                 = 8
    42.8 +    jvmti                 = 8,
    42.9 +    code_cache            = 9
   42.10    };
   42.11   private:
   42.12    RootType _root_type;
    43.1 --- a/src/share/vm/gc_implementation/shared/markSweep.cpp	Tue Sep 15 11:09:34 2009 -0700
    43.2 +++ b/src/share/vm/gc_implementation/shared/markSweep.cpp	Tue Sep 15 21:53:47 2009 -0700
    43.3 @@ -69,6 +69,7 @@
    43.4  }
    43.5  
    43.6  MarkSweep::FollowRootClosure  MarkSweep::follow_root_closure;
    43.7 +CodeBlobToOopClosure MarkSweep::follow_code_root_closure(&MarkSweep::follow_root_closure, /*do_marking=*/ true);
    43.8  
    43.9  void MarkSweep::FollowRootClosure::do_oop(oop* p)       { follow_root(p); }
   43.10  void MarkSweep::FollowRootClosure::do_oop(narrowOop* p) { follow_root(p); }
    44.1 --- a/src/share/vm/gc_implementation/shared/markSweep.hpp	Tue Sep 15 11:09:34 2009 -0700
    44.2 +++ b/src/share/vm/gc_implementation/shared/markSweep.hpp	Tue Sep 15 21:53:47 2009 -0700
    44.3 @@ -57,14 +57,12 @@
    44.4     public:
    44.5      virtual void do_oop(oop* p);
    44.6      virtual void do_oop(narrowOop* p);
    44.7 -    virtual const bool do_nmethods() const { return true; }
    44.8    };
    44.9  
   44.10    class MarkAndPushClosure: public OopClosure {
   44.11     public:
   44.12      virtual void do_oop(oop* p);
   44.13      virtual void do_oop(narrowOop* p);
   44.14 -    virtual const bool do_nmethods() const { return true; }
   44.15    };
   44.16  
   44.17    class FollowStackClosure: public VoidClosure {
   44.18 @@ -163,6 +161,7 @@
   44.19   public:
   44.20    // Public closures
   44.21    static FollowRootClosure    follow_root_closure;
   44.22 +  static CodeBlobToOopClosure follow_code_root_closure; // => follow_root_closure
   44.23    static MarkAndPushClosure   mark_and_push_closure;
   44.24    static FollowStackClosure   follow_stack_closure;
   44.25    static AdjustPointerClosure adjust_root_pointer_closure;
    45.1 --- a/src/share/vm/gc_interface/collectedHeap.hpp	Tue Sep 15 11:09:34 2009 -0700
    45.2 +++ b/src/share/vm/gc_interface/collectedHeap.hpp	Tue Sep 15 21:53:47 2009 -0700
    45.3 @@ -256,6 +256,14 @@
    45.4      return p == NULL || is_in_permanent(p);
    45.5    }
    45.6  
    45.7 +  // An object is scavengable if its location may move during a scavenge.
    45.8 +  // (A scavenge is a GC which is not a full GC.)
    45.9 +  // Currently, this just means it is not perm (and not null).
   45.10 +  // This could change if we rethink what's in perm-gen.
   45.11 +  bool is_scavengable(const void *p) const {
   45.12 +    return !is_in_permanent_or_null(p);
   45.13 +  }
   45.14 +
   45.15    // Returns "TRUE" if "p" is a method oop in the
   45.16    // current heap, with high probability. This predicate
   45.17    // is not stable, in general.
    46.1 --- a/src/share/vm/memory/defNewGeneration.cpp	Tue Sep 15 11:09:34 2009 -0700
    46.2 +++ b/src/share/vm/memory/defNewGeneration.cpp	Tue Sep 15 21:53:47 2009 -0700
    46.3 @@ -555,12 +555,14 @@
    46.4           "save marks have not been newly set.");
    46.5  
    46.6    gch->gen_process_strong_roots(_level,
    46.7 -                                true, // Process younger gens, if any, as
    46.8 -                                      // strong roots.
    46.9 -                                false,// not collecting permanent generation.
   46.10 +                                true,  // Process younger gens, if any,
   46.11 +                                       // as strong roots.
   46.12 +                                true,  // activate StrongRootsScope
   46.13 +                                false, // not collecting perm generation.
   46.14                                  SharedHeap::SO_AllClasses,
   46.15 -                                &fsc_with_gc_barrier,
   46.16 -                                &fsc_with_no_gc_barrier);
   46.17 +                                &fsc_with_no_gc_barrier,
   46.18 +                                true,   // walk *all* scavengable nmethods
   46.19 +                                &fsc_with_gc_barrier);
   46.20  
   46.21    // "evacuate followers".
   46.22    evacuate_followers.do_void();
    47.1 --- a/src/share/vm/memory/genCollectedHeap.cpp	Tue Sep 15 11:09:34 2009 -0700
    47.2 +++ b/src/share/vm/memory/genCollectedHeap.cpp	Tue Sep 15 21:53:47 2009 -0700
    47.3 @@ -677,13 +677,23 @@
    47.4  void GenCollectedHeap::
    47.5  gen_process_strong_roots(int level,
    47.6                           bool younger_gens_as_roots,
    47.7 +                         bool activate_scope,
    47.8                           bool collecting_perm_gen,
    47.9                           SharedHeap::ScanningOption so,
   47.10 -                         OopsInGenClosure* older_gens,
   47.11 -                         OopsInGenClosure* not_older_gens) {
   47.12 +                         OopsInGenClosure* not_older_gens,
   47.13 +                         bool do_code_roots,
   47.14 +                         OopsInGenClosure* older_gens) {
   47.15    // General strong roots.
   47.16 -  SharedHeap::process_strong_roots(collecting_perm_gen, so,
   47.17 -                                   not_older_gens, older_gens);
   47.18 +
   47.19 +  if (!do_code_roots) {
   47.20 +    SharedHeap::process_strong_roots(activate_scope, collecting_perm_gen, so,
   47.21 +                                     not_older_gens, NULL, older_gens);
   47.22 +  } else {
   47.23 +    bool do_code_marking = (activate_scope || nmethod::oops_do_marking_is_active());
   47.24 +    CodeBlobToOopClosure code_roots(not_older_gens, /*do_marking=*/ do_code_marking);
   47.25 +    SharedHeap::process_strong_roots(activate_scope, collecting_perm_gen, so,
   47.26 +                                     not_older_gens, &code_roots, older_gens);
   47.27 +  }
   47.28  
   47.29    if (younger_gens_as_roots) {
   47.30      if (!_gen_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) {
   47.31 @@ -706,8 +716,9 @@
   47.32  }
   47.33  
   47.34  void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure,
   47.35 +                                              CodeBlobClosure* code_roots,
   47.36                                                OopClosure* non_root_closure) {
   47.37 -  SharedHeap::process_weak_roots(root_closure, non_root_closure);
   47.38 +  SharedHeap::process_weak_roots(root_closure, code_roots, non_root_closure);
   47.39    // "Local" "weak" refs
   47.40    for (int i = 0; i < _n_gens; i++) {
   47.41      _gens[i]->ref_processor()->weak_oops_do(root_closure);
    48.1 --- a/src/share/vm/memory/genCollectedHeap.hpp	Tue Sep 15 11:09:34 2009 -0700
    48.2 +++ b/src/share/vm/memory/genCollectedHeap.hpp	Tue Sep 15 21:53:47 2009 -0700
    48.3 @@ -408,16 +408,22 @@
    48.4    // "SO_SystemClasses" to all the "system" classes and loaders;
    48.5    // "SO_Symbols_and_Strings" applies the closure to all entries in
    48.6    // SymbolsTable and StringTable.
    48.7 -  void gen_process_strong_roots(int level, bool younger_gens_as_roots,
    48.8 +  void gen_process_strong_roots(int level,
    48.9 +                                bool younger_gens_as_roots,
   48.10 +                                // The remaining arguments are in an order
   48.11 +                                // consistent with SharedHeap::process_strong_roots:
   48.12 +                                bool activate_scope,
   48.13                                  bool collecting_perm_gen,
   48.14                                  SharedHeap::ScanningOption so,
   48.15 -                                OopsInGenClosure* older_gens,
   48.16 -                                OopsInGenClosure* not_older_gens);
   48.17 +                                OopsInGenClosure* not_older_gens,
   48.18 +                                bool do_code_roots,
   48.19 +                                OopsInGenClosure* older_gens);
   48.20  
   48.21    // Apply "blk" to all the weak roots of the system.  These include
   48.22    // JNI weak roots, the code cache, system dictionary, symbol table,
   48.23    // string table, and referents of reachable weak refs.
   48.24    void gen_process_weak_roots(OopClosure* root_closure,
   48.25 +                              CodeBlobClosure* code_roots,
   48.26                                OopClosure* non_root_closure);
   48.27  
   48.28    // Set the saved marks of generations, if that makes sense.
    49.1 --- a/src/share/vm/memory/genMarkSweep.cpp	Tue Sep 15 11:09:34 2009 -0700
    49.2 +++ b/src/share/vm/memory/genMarkSweep.cpp	Tue Sep 15 21:53:47 2009 -0700
    49.3 @@ -240,9 +240,12 @@
    49.4  
    49.5    gch->gen_process_strong_roots(level,
    49.6                                  false, // Younger gens are not roots.
    49.7 +                                true,  // activate StrongRootsScope
    49.8                                  true,  // Collecting permanent generation.
    49.9                                  SharedHeap::SO_SystemClasses,
   49.10 -                                &follow_root_closure, &follow_root_closure);
   49.11 +                                &follow_root_closure,
   49.12 +                                true,   // walk code active on stacks
   49.13 +                                &follow_root_closure);
   49.14  
   49.15    // Process reference objects found during marking
   49.16    {
   49.17 @@ -330,14 +333,19 @@
   49.18  
   49.19    gch->gen_process_strong_roots(level,
   49.20                                  false, // Younger gens are not roots.
   49.21 +                                true,  // activate StrongRootsScope
   49.22                                  true,  // Collecting permanent generation.
   49.23                                  SharedHeap::SO_AllClasses,
   49.24                                  &adjust_root_pointer_closure,
   49.25 +                                false, // do not walk code
   49.26                                  &adjust_root_pointer_closure);
   49.27  
   49.28    // Now adjust pointers in remaining weak roots.  (All of which should
   49.29    // have been cleared if they pointed to non-surviving objects.)
   49.30 +  CodeBlobToOopClosure adjust_code_pointer_closure(&adjust_pointer_closure,
   49.31 +                                                   /*do_marking=*/ false);
   49.32    gch->gen_process_weak_roots(&adjust_root_pointer_closure,
   49.33 +                              &adjust_code_pointer_closure,
   49.34                                &adjust_pointer_closure);
   49.35  
   49.36    adjust_marks();
    50.1 --- a/src/share/vm/memory/iterator.cpp	Tue Sep 15 11:09:34 2009 -0700
    50.2 +++ b/src/share/vm/memory/iterator.cpp	Tue Sep 15 21:53:47 2009 -0700
    50.3 @@ -32,3 +32,42 @@
    50.4  void VoidClosure::do_void() {
    50.5    ShouldNotCallThis();
    50.6  }
    50.7 +
    50.8 +MarkingCodeBlobClosure::MarkScope::MarkScope(bool activate)
    50.9 +  : _active(activate)
   50.10 +{
   50.11 +  if (_active)  nmethod::oops_do_marking_prologue();
   50.12 +}
   50.13 +
   50.14 +MarkingCodeBlobClosure::MarkScope::~MarkScope() {
   50.15 +  if (_active)  nmethod::oops_do_marking_epilogue();
   50.16 +}
   50.17 +
   50.18 +void MarkingCodeBlobClosure::do_code_blob(CodeBlob* cb) {
   50.19 +  if (!cb->is_nmethod())  return;
   50.20 +  nmethod* nm = (nmethod*) cb;
   50.21 +  if (!nm->test_set_oops_do_mark()) {
   50.22 +    NOT_PRODUCT(if (TraceScavenge)  nm->print_on(tty, "oops_do, 1st visit\n"));
   50.23 +    do_newly_marked_nmethod(nm);
   50.24 +  } else {
   50.25 +    NOT_PRODUCT(if (TraceScavenge)  nm->print_on(tty, "oops_do, skipped on 2nd visit\n"));
   50.26 +  }
   50.27 +}
   50.28 +
   50.29 +void CodeBlobToOopClosure::do_newly_marked_nmethod(CodeBlob* cb) {
   50.30 +  cb->oops_do(_cl);
   50.31 +}
   50.32 +
   50.33 +void CodeBlobToOopClosure::do_code_blob(CodeBlob* cb) {
   50.34 +  if (!_do_marking) {
   50.35 +    NOT_PRODUCT(if (TraceScavenge && Verbose && cb->is_nmethod())  ((nmethod*)cb)->print_on(tty, "oops_do, unmarked visit\n"));
   50.36 +    // This assert won't work, since there are lots of mini-passes
   50.37 +    // (mostly in debug mode) that co-exist with marking phases.
   50.38 +    //assert(!(cb->is_nmethod() && ((nmethod*)cb)->test_oops_do_mark()), "found marked nmethod during mark-free phase");
   50.39 +    cb->oops_do(_cl);
   50.40 +  } else {
   50.41 +    MarkingCodeBlobClosure::do_code_blob(cb);
   50.42 +  }
   50.43 +}
   50.44 +
   50.45 +
    51.1 --- a/src/share/vm/memory/iterator.hpp	Tue Sep 15 11:09:34 2009 -0700
    51.2 +++ b/src/share/vm/memory/iterator.hpp	Tue Sep 15 21:53:47 2009 -0700
    51.3 @@ -24,6 +24,7 @@
    51.4  
    51.5  // The following classes are C++ `closures` for iterating over objects, roots and spaces
    51.6  
    51.7 +class CodeBlob;
    51.8  class ReferenceProcessor;
    51.9  
   51.10  // Closure provides abortability.
   51.11 @@ -57,9 +58,6 @@
   51.12    virtual const bool should_remember_klasses() const { return false;    }
   51.13    virtual void remember_klass(Klass* k) { /* do nothing */ }
   51.14  
   51.15 -  // If "true", invoke on nmethods (when scanning compiled frames).
   51.16 -  virtual const bool do_nmethods() const { return false; }
   51.17 -
   51.18    // The methods below control how object iterations invoking this closure
   51.19    // should be performed:
   51.20  
   51.21 @@ -158,6 +156,51 @@
   51.22  };
   51.23  
   51.24  
   51.25 +// CodeBlobClosure is used for iterating through code blobs
   51.26 +// in the code cache or on thread stacks
   51.27 +
   51.28 +class CodeBlobClosure : public Closure {
   51.29 + public:
   51.30 +  // Called for each code blob.
   51.31 +  virtual void do_code_blob(CodeBlob* cb) = 0;
   51.32 +};
   51.33 +
   51.34 +
   51.35 +class MarkingCodeBlobClosure : public CodeBlobClosure {
   51.36 + public:
   51.37 +  // Called for each code blob, but at most once per unique blob.
   51.38 +  virtual void do_newly_marked_nmethod(CodeBlob* cb) = 0;
   51.39 +
   51.40 +  virtual void do_code_blob(CodeBlob* cb);
   51.41 +    // = { if (!nmethod(cb)->test_set_oops_do_mark())  do_newly_marked_nmethod(cb); }
   51.42 +
   51.43 +  class MarkScope : public StackObj {
   51.44 +  protected:
   51.45 +    bool _active;
   51.46 +  public:
   51.47 +    MarkScope(bool activate = true);
   51.48 +      // = { if (active) nmethod::oops_do_marking_prologue(); }
   51.49 +    ~MarkScope();
   51.50 +      // = { if (active) nmethod::oops_do_marking_epilogue(); }
   51.51 +  };
   51.52 +};
   51.53 +
   51.54 +
   51.55 +// Applies an oop closure to all ref fields in code blobs
   51.56 +// iterated over in an object iteration.
   51.57 +class CodeBlobToOopClosure: public MarkingCodeBlobClosure {
   51.58 +  OopClosure* _cl;
   51.59 +  bool _do_marking;
   51.60 +public:
   51.61 +  virtual void do_newly_marked_nmethod(CodeBlob* cb);
   51.62 +    // = { cb->oops_do(_cl); }
   51.63 +  virtual void do_code_blob(CodeBlob* cb);
   51.64 +    // = { if (_do_marking)  super::do_code_blob(cb); else cb->oops_do(_cl); }
   51.65 +  CodeBlobToOopClosure(OopClosure* cl, bool do_marking)
   51.66 +    : _cl(cl), _do_marking(do_marking) {}
   51.67 +};
   51.68 +
   51.69 +
   51.70  
   51.71  // MonitorClosure is used for iterating over monitors in the monitors cache
   51.72  
    52.1 --- a/src/share/vm/memory/sharedHeap.cpp	Tue Sep 15 11:09:34 2009 -0700
    52.2 +++ b/src/share/vm/memory/sharedHeap.cpp	Tue Sep 15 21:53:47 2009 -0700
    52.3 @@ -100,12 +100,27 @@
    52.4           "Not in range.");
    52.5  }
    52.6  
    52.7 -void SharedHeap::process_strong_roots(bool collecting_perm_gen,
    52.8 +SharedHeap::StrongRootsScope::StrongRootsScope(SharedHeap* outer, bool activate)
    52.9 +  : MarkScope(activate)
   52.10 +{
   52.11 +  if (_active) {
   52.12 +    outer->change_strong_roots_parity();
   52.13 +  }
   52.14 +}
   52.15 +
   52.16 +SharedHeap::StrongRootsScope::~StrongRootsScope() {
   52.17 +  // nothing particular
   52.18 +}
   52.19 +
   52.20 +void SharedHeap::process_strong_roots(bool activate_scope,
   52.21 +                                      bool collecting_perm_gen,
   52.22                                        ScanningOption so,
   52.23                                        OopClosure* roots,
   52.24 +                                      CodeBlobClosure* code_roots,
   52.25                                        OopsInGenClosure* perm_blk) {
   52.26 +  StrongRootsScope srs(this, activate_scope);
   52.27    // General strong roots.
   52.28 -  if (n_par_threads() == 0) change_strong_roots_parity();
   52.29 +  assert(_strong_roots_parity != 0, "must have called prologue code");
   52.30    if (!_process_strong_tasks->is_task_claimed(SH_PS_Universe_oops_do)) {
   52.31      Universe::oops_do(roots);
   52.32      ReferenceProcessor::oops_do(roots);
   52.33 @@ -117,9 +132,9 @@
   52.34      JNIHandles::oops_do(roots);
   52.35    // All threads execute this; the individual threads are task groups.
   52.36    if (ParallelGCThreads > 0) {
   52.37 -    Threads::possibly_parallel_oops_do(roots);
   52.38 +    Threads::possibly_parallel_oops_do(roots, code_roots);
   52.39    } else {
   52.40 -    Threads::oops_do(roots);
   52.41 +    Threads::oops_do(roots, code_roots);
   52.42    }
   52.43    if (!_process_strong_tasks-> is_task_claimed(SH_PS_ObjectSynchronizer_oops_do))
   52.44      ObjectSynchronizer::oops_do(roots);
   52.45 @@ -156,11 +171,29 @@
   52.46    }
   52.47  
   52.48    if (!_process_strong_tasks->is_task_claimed(SH_PS_CodeCache_oops_do)) {
   52.49 -     if (so & SO_CodeCache) {
   52.50 -       CodeCache::oops_do(roots);
   52.51 -     }
   52.52 +    if (so & SO_CodeCache) {
   52.53 +      // (Currently, CMSCollector uses this to do intermediate-strength collections.)
   52.54 +      assert(collecting_perm_gen, "scanning all of code cache");
   52.55 +      assert(code_roots != NULL, "must supply closure for code cache");
   52.56 +      if (code_roots != NULL) {
   52.57 +        CodeCache::blobs_do(code_roots);
   52.58 +      }
   52.59 +    } else if (so & (SO_SystemClasses|SO_AllClasses)) {
   52.60 +      if (!collecting_perm_gen) {
   52.61 +        // If we are collecting from class statics, but we are not going to
   52.62 +        // visit all of the CodeCache, collect from the non-perm roots if any.
   52.63 +        // This makes the code cache function temporarily as a source of strong
   52.64 +        // roots for oops, until the next major collection.
   52.65 +        //
   52.66 +        // If collecting_perm_gen is true, we require that this phase will call
   52.67 +        // CodeCache::do_unloading.  This will kill off nmethods with expired
   52.68 +        // weak references, such as stale invokedynamic targets.
   52.69 +        CodeCache::scavenge_root_nmethods_do(code_roots);
   52.70 +      }
   52.71 +    }
   52.72      // Verify if the code cache contents are in the perm gen
   52.73 -    NOT_PRODUCT(CodeCache::oops_do(&assert_is_perm_closure));
   52.74 +    NOT_PRODUCT(CodeBlobToOopClosure assert_code_is_perm(&assert_is_perm_closure, /*do_marking=*/ false));
   52.75 +    NOT_PRODUCT(CodeCache::asserted_non_scavengable_nmethods_do(&assert_code_is_perm));
   52.76    }
   52.77  
   52.78    // Roots that should point only into permanent generation.
   52.79 @@ -220,11 +253,12 @@
   52.80  // just skip adjusting any shared entries in the string table.
   52.81  
   52.82  void SharedHeap::process_weak_roots(OopClosure* root_closure,
   52.83 +                                    CodeBlobClosure* code_roots,
   52.84                                      OopClosure* non_root_closure) {
   52.85    // Global (weak) JNI handles
   52.86    JNIHandles::weak_oops_do(&always_true, root_closure);
   52.87  
   52.88 -  CodeCache::oops_do(non_root_closure);
   52.89 +  CodeCache::blobs_do(code_roots);
   52.90    SymbolTable::oops_do(root_closure);
   52.91    if (UseSharedSpaces && !DumpSharedSpaces) {
   52.92      SkipAdjustingSharedStrings skip_closure(root_closure);
    53.1 --- a/src/share/vm/memory/sharedHeap.hpp	Tue Sep 15 11:09:34 2009 -0700
    53.2 +++ b/src/share/vm/memory/sharedHeap.hpp	Tue Sep 15 21:53:47 2009 -0700
    53.3 @@ -165,9 +165,21 @@
    53.4    //   c) to never return a distinguished value (zero) with which such
    53.5    //      task-claiming variables may be initialized, to indicate "never
    53.6    //      claimed".
    53.7 + private:
    53.8    void change_strong_roots_parity();
    53.9 + public:
   53.10    int strong_roots_parity() { return _strong_roots_parity; }
   53.11  
   53.12 +  // Call these in sequential code around process_strong_roots.
   53.13 +  // strong_roots_prologue calls change_strong_roots_parity, if
   53.14 +  // parallel tasks are enabled.
   53.15 +  class StrongRootsScope : public MarkingCodeBlobClosure::MarkScope {
   53.16 +  public:
   53.17 +    StrongRootsScope(SharedHeap* outer, bool activate = true);
   53.18 +    ~StrongRootsScope();
   53.19 +  };
   53.20 +  friend class StrongRootsScope;
   53.21 +
   53.22    enum ScanningOption {
   53.23      SO_None                = 0x0,
   53.24      SO_AllClasses          = 0x1,
   53.25 @@ -198,15 +210,18 @@
   53.26    // "SO_Symbols" applies the closure to all entries in SymbolsTable;
   53.27    // "SO_Strings" applies the closure to all entries in StringTable;
   53.28    // "SO_CodeCache" applies the closure to all elements of the CodeCache.
   53.29 -  void process_strong_roots(bool collecting_perm_gen,
   53.30 +  void process_strong_roots(bool activate_scope,
   53.31 +                            bool collecting_perm_gen,
   53.32                              ScanningOption so,
   53.33                              OopClosure* roots,
   53.34 +                            CodeBlobClosure* code_roots,
   53.35                              OopsInGenClosure* perm_blk);
   53.36  
   53.37    // Apply "blk" to all the weak roots of the system.  These include
   53.38    // JNI weak roots, the code cache, system dictionary, symbol table,
   53.39    // string table.
   53.40    void process_weak_roots(OopClosure* root_closure,
   53.41 +                          CodeBlobClosure* code_roots,
   53.42                            OopClosure* non_root_closure);
   53.43  
   53.44  
    54.1 --- a/src/share/vm/oops/instanceKlass.cpp	Tue Sep 15 11:09:34 2009 -0700
    54.2 +++ b/src/share/vm/oops/instanceKlass.cpp	Tue Sep 15 21:53:47 2009 -0700
    54.3 @@ -2025,7 +2025,7 @@
    54.4    // This is a short non-blocking critical region, so the no safepoint check is ok.
    54.5    OsrList_lock->lock_without_safepoint_check();
    54.6    assert(n->is_osr_method(), "wrong kind of nmethod");
    54.7 -  n->set_link(osr_nmethods_head());
    54.8 +  n->set_osr_link(osr_nmethods_head());
    54.9    set_osr_nmethods_head(n);
   54.10    // Remember to unlock again
   54.11    OsrList_lock->unlock();
   54.12 @@ -2041,17 +2041,17 @@
   54.13    // Search for match
   54.14    while(cur != NULL && cur != n) {
   54.15      last = cur;
   54.16 -    cur = cur->link();
   54.17 +    cur = cur->osr_link();
   54.18    }
   54.19    if (cur == n) {
   54.20      if (last == NULL) {
   54.21        // Remove first element
   54.22 -      set_osr_nmethods_head(osr_nmethods_head()->link());
   54.23 +      set_osr_nmethods_head(osr_nmethods_head()->osr_link());
   54.24      } else {
   54.25 -      last->set_link(cur->link());
   54.26 +      last->set_osr_link(cur->osr_link());
   54.27      }
   54.28    }
   54.29 -  n->set_link(NULL);
   54.30 +  n->set_osr_link(NULL);
   54.31    // Remember to unlock again
   54.32    OsrList_lock->unlock();
   54.33  }
   54.34 @@ -2068,7 +2068,7 @@
   54.35        OsrList_lock->unlock();
   54.36        return osr;
   54.37      }
   54.38 -    osr = osr->link();
   54.39 +    osr = osr->osr_link();
   54.40    }
   54.41    OsrList_lock->unlock();
   54.42    return NULL;
    55.1 --- a/src/share/vm/oops/oop.hpp	Tue Sep 15 11:09:34 2009 -0700
    55.2 +++ b/src/share/vm/oops/oop.hpp	Tue Sep 15 21:53:47 2009 -0700
    55.3 @@ -330,6 +330,7 @@
    55.4  
    55.5    bool is_perm() const;
    55.6    bool is_perm_or_null() const;
    55.7 +  bool is_scavengable() const;
    55.8    bool is_shared() const;
    55.9    bool is_shared_readonly() const;
   55.10    bool is_shared_readwrite() const;
    56.1 --- a/src/share/vm/oops/oop.inline2.hpp	Tue Sep 15 11:09:34 2009 -0700
    56.2 +++ b/src/share/vm/oops/oop.inline2.hpp	Tue Sep 15 21:53:47 2009 -0700
    56.3 @@ -34,3 +34,7 @@
    56.4  inline bool oopDesc::is_perm_or_null() const {
    56.5    return this == NULL || is_perm();
    56.6  }
    56.7 +
    56.8 +inline bool oopDesc::is_scavengable() const {
    56.9 +  return Universe::heap()->is_scavengable(this);
   56.10 +}
    57.1 --- a/src/share/vm/opto/output.cpp	Tue Sep 15 11:09:34 2009 -0700
    57.2 +++ b/src/share/vm/opto/output.cpp	Tue Sep 15 21:53:47 2009 -0700
    57.3 @@ -611,7 +611,7 @@
    57.4        assert(cik->is_instance_klass() ||
    57.5               cik->is_array_klass(), "Not supported allocation.");
    57.6        sv = new ObjectValue(spobj->_idx,
    57.7 -                           new ConstantOopWriteValue(cik->encoding()));
    57.8 +                           new ConstantOopWriteValue(cik->constant_encoding()));
    57.9        Compile::set_sv_for_object_node(objs, sv);
   57.10  
   57.11        uint first_ind = spobj->first_index();
   57.12 @@ -702,13 +702,13 @@
   57.13    case Type::AryPtr:
   57.14    case Type::InstPtr:
   57.15    case Type::KlassPtr:          // fall through
   57.16 -    array->append(new ConstantOopWriteValue(t->isa_oopptr()->const_oop()->encoding()));
   57.17 +    array->append(new ConstantOopWriteValue(t->isa_oopptr()->const_oop()->constant_encoding()));
   57.18      break;
   57.19    case Type::NarrowOop:
   57.20      if (t == TypeNarrowOop::NULL_PTR) {
   57.21        array->append(new ConstantOopWriteValue(NULL));
   57.22      } else {
   57.23 -      array->append(new ConstantOopWriteValue(t->make_ptr()->isa_oopptr()->const_oop()->encoding()));
   57.24 +      array->append(new ConstantOopWriteValue(t->make_ptr()->isa_oopptr()->const_oop()->constant_encoding()));
   57.25      }
   57.26      break;
   57.27    case Type::Int:
   57.28 @@ -871,7 +871,7 @@
   57.29            assert(cik->is_instance_klass() ||
   57.30                   cik->is_array_klass(), "Not supported allocation.");
   57.31            ObjectValue* sv = new ObjectValue(spobj->_idx,
   57.32 -                                new ConstantOopWriteValue(cik->encoding()));
   57.33 +                                new ConstantOopWriteValue(cik->constant_encoding()));
   57.34            Compile::set_sv_for_object_node(objs, sv);
   57.35  
   57.36            uint first_ind = spobj->first_index();
   57.37 @@ -890,7 +890,7 @@
   57.38          }
   57.39        } else {
   57.40          const TypePtr *tp = obj_node->bottom_type()->make_ptr();
   57.41 -        scval = new ConstantOopWriteValue(tp->is_instptr()->const_oop()->encoding());
   57.42 +        scval = new ConstantOopWriteValue(tp->is_instptr()->const_oop()->constant_encoding());
   57.43        }
   57.44  
   57.45        OptoReg::Name box_reg = BoxLockNode::stack_slot(box_node);
    58.1 --- a/src/share/vm/opto/parse.hpp	Tue Sep 15 11:09:34 2009 -0700
    58.2 +++ b/src/share/vm/opto/parse.hpp	Tue Sep 15 21:53:47 2009 -0700
    58.3 @@ -469,7 +469,7 @@
    58.4  
    58.5    // loading from a constant field or the constant pool
    58.6    // returns false if push failed (non-perm field constants only, not ldcs)
    58.7 -  bool push_constant(ciConstant con);
    58.8 +  bool push_constant(ciConstant con, bool require_constant = false);
    58.9  
   58.10    // implementation of object creation bytecodes
   58.11    void do_new();
    59.1 --- a/src/share/vm/opto/parse2.cpp	Tue Sep 15 11:09:34 2009 -0700
    59.2 +++ b/src/share/vm/opto/parse2.cpp	Tue Sep 15 21:53:47 2009 -0700
    59.3 @@ -1325,7 +1325,8 @@
    59.4            }
    59.5          }
    59.6        }
    59.7 -      push_constant(constant);
    59.8 +      bool pushed = push_constant(constant, true);
    59.9 +      guarantee(pushed, "must be possible to push this constant");
   59.10      }
   59.11  
   59.12      break;
    60.1 --- a/src/share/vm/opto/parse3.cpp	Tue Sep 15 11:09:34 2009 -0700
    60.2 +++ b/src/share/vm/opto/parse3.cpp	Tue Sep 15 21:53:47 2009 -0700
    60.3 @@ -267,7 +267,7 @@
    60.4  }
    60.5  
    60.6  
    60.7 -bool Parse::push_constant(ciConstant constant) {
    60.8 +bool Parse::push_constant(ciConstant constant, bool require_constant) {
    60.9    switch (constant.basic_type()) {
   60.10    case T_BOOLEAN:  push( intcon(constant.as_boolean()) ); break;
   60.11    case T_INT:      push( intcon(constant.as_int())     ); break;
   60.12 @@ -279,13 +279,16 @@
   60.13    case T_LONG:     push_pair( longcon(constant.as_long()) ); break;
   60.14    case T_ARRAY:
   60.15    case T_OBJECT: {
   60.16 -    // the oop is in perm space if the ciObject "has_encoding"
   60.17 +    // cases:
   60.18 +    //   can_be_constant    = (oop not scavengable || ScavengeRootsInCode != 0)
   60.19 +    //   should_be_constant = (oop not scavengable || ScavengeRootsInCode >= 2)
   60.20 +    // An oop is not scavengable if it is in the perm gen.
   60.21      ciObject* oop_constant = constant.as_object();
   60.22      if (oop_constant->is_null_object()) {
   60.23        push( zerocon(T_OBJECT) );
   60.24        break;
   60.25 -    } else if (oop_constant->has_encoding()) {
   60.26 -      push( makecon(TypeOopPtr::make_from_constant(oop_constant)) );
   60.27 +    } else if (require_constant || oop_constant->should_be_constant()) {
   60.28 +      push( makecon(TypeOopPtr::make_from_constant(oop_constant, require_constant)) );
   60.29        break;
   60.30      } else {
   60.31        // we cannot inline the oop, but we can use it later to narrow a type
    61.1 --- a/src/share/vm/opto/type.cpp	Tue Sep 15 11:09:34 2009 -0700
    61.2 +++ b/src/share/vm/opto/type.cpp	Tue Sep 15 21:53:47 2009 -0700
    61.3 @@ -2411,14 +2411,13 @@
    61.4  
    61.5  //------------------------------make_from_constant-----------------------------
    61.6  // Make a java pointer from an oop constant
    61.7 -const TypeOopPtr* TypeOopPtr::make_from_constant(ciObject* o) {
    61.8 +const TypeOopPtr* TypeOopPtr::make_from_constant(ciObject* o, bool require_constant) {
    61.9    if (o->is_method_data() || o->is_method()) {
   61.10      // Treat much like a typeArray of bytes, like below, but fake the type...
   61.11 -    assert(o->has_encoding(), "must be a perm space object");
   61.12      const Type* etype = (Type*)get_const_basic_type(T_BYTE);
   61.13      const TypeAry* arr0 = TypeAry::make(etype, TypeInt::POS);
   61.14      ciKlass *klass = ciTypeArrayKlass::make((BasicType) T_BYTE);
   61.15 -    assert(o->has_encoding(), "method data oops should be tenured");
   61.16 +    assert(o->can_be_constant(), "method data oops should be tenured");
   61.17      const TypeAryPtr* arr = TypeAryPtr::make(TypePtr::Constant, o, arr0, klass, true, 0);
   61.18      return arr;
   61.19    } else {
   61.20 @@ -2427,8 +2426,9 @@
   61.21      ciKlass *klass = o->klass();
   61.22      if (klass->is_instance_klass()) {
   61.23        // Element is an instance
   61.24 -      if (!o->has_encoding()) {  // not a perm-space constant
   61.25 -        // %%% remove this restriction by rewriting non-perm ConPNodes in a later phase
   61.26 +      if (require_constant) {
   61.27 +        if (!o->can_be_constant())  return NULL;
   61.28 +      } else if (!o->should_be_constant()) {
   61.29          return TypeInstPtr::make(TypePtr::NotNull, klass, true, NULL, 0);
   61.30        }
   61.31        return TypeInstPtr::make(o);
   61.32 @@ -2440,8 +2440,9 @@
   61.33        // We used to pass NotNull in here, asserting that the sub-arrays
   61.34        // are all not-null.  This is not true in generally, as code can
   61.35        // slam NULLs down in the subarrays.
   61.36 -      if (!o->has_encoding()) {  // not a perm-space constant
   61.37 -        // %%% remove this restriction by rewriting non-perm ConPNodes in a later phase
   61.38 +      if (require_constant) {
   61.39 +        if (!o->can_be_constant())  return NULL;
   61.40 +      } else if (!o->should_be_constant()) {
   61.41          return TypeAryPtr::make(TypePtr::NotNull, arr0, klass, true, 0);
   61.42        }
   61.43        const TypeAryPtr* arr = TypeAryPtr::make(TypePtr::Constant, o, arr0, klass, true, 0);
   61.44 @@ -2453,8 +2454,9 @@
   61.45        const TypeAry* arr0 = TypeAry::make(etype, TypeInt::make(o->as_array()->length()));
   61.46        // We used to pass NotNull in here, asserting that the array pointer
   61.47        // is not-null. That was not true in general.
   61.48 -      if (!o->has_encoding()) {  // not a perm-space constant
   61.49 -        // %%% remove this restriction by rewriting non-perm ConPNodes in a later phase
   61.50 +      if (require_constant) {
   61.51 +        if (!o->can_be_constant())  return NULL;
   61.52 +      } else if (!o->should_be_constant()) {
   61.53          return TypeAryPtr::make(TypePtr::NotNull, arr0, klass, true, 0);
   61.54        }
   61.55        const TypeAryPtr* arr = TypeAryPtr::make(TypePtr::Constant, o, arr0, klass, true, 0);
   61.56 @@ -2483,7 +2485,7 @@
   61.57      ShouldNotReachHere();
   61.58    }
   61.59  
   61.60 -  return (intptr_t)const_oop()->encoding();
   61.61 +  return (intptr_t)const_oop()->constant_encoding();
   61.62  }
   61.63  
   61.64  
   61.65 @@ -3338,14 +3340,19 @@
   61.66        ciObject* o = const_oop();
   61.67        if( _ptr == Constant ) {
   61.68          if( tap->const_oop() != NULL && !o->equals(tap->const_oop()) ) {
   61.69 +          xk = (klass() == tap->klass());
   61.70            ptr = NotNull;
   61.71            o = NULL;
   61.72            instance_id = InstanceBot;
   61.73 +        } else {
   61.74 +          xk = true;
   61.75          }
   61.76        } else if( above_centerline(_ptr) ) {
   61.77          o = tap->const_oop();
   61.78 +        xk = true;
   61.79 +      } else {
   61.80 +        xk = this->_klass_is_exact;
   61.81        }
   61.82 -      xk = true;
   61.83        return TypeAryPtr::make( ptr, o, tary, tap->_klass, xk, off, instance_id );
   61.84      }
   61.85      case NotNull:
    62.1 --- a/src/share/vm/opto/type.hpp	Tue Sep 15 11:09:34 2009 -0700
    62.2 +++ b/src/share/vm/opto/type.hpp	Tue Sep 15 21:53:47 2009 -0700
    62.3 @@ -711,7 +711,10 @@
    62.4      return make_from_klass_common(klass, false, false);
    62.5    }
    62.6    // Creates a singleton type given an object.
    62.7 -  static const TypeOopPtr* make_from_constant(ciObject* o);
    62.8 +  // If the object cannot be rendered as a constant,
    62.9 +  // may return a non-singleton type.
   62.10 +  // If require_constant, produce a NULL if a singleton is not possible.
   62.11 +  static const TypeOopPtr* make_from_constant(ciObject* o, bool require_constant = false);
   62.12  
   62.13    // Make a generic (unclassed) pointer to an oop.
   62.14    static const TypeOopPtr* make(PTR ptr, int offset, int instance_id = InstanceBot);
    63.1 --- a/src/share/vm/prims/jvmtiTagMap.cpp	Tue Sep 15 11:09:34 2009 -0700
    63.2 +++ b/src/share/vm/prims/jvmtiTagMap.cpp	Tue Sep 15 21:53:47 2009 -0700
    63.3 @@ -3126,6 +3126,12 @@
    63.4    // exceptions) will be visible.
    63.5    blk.set_kind(JVMTI_HEAP_REFERENCE_OTHER);
    63.6    Universe::oops_do(&blk);
    63.7 +
    63.8 +  // If there are any non-perm roots in the code cache, visit them.
    63.9 +  blk.set_kind(JVMTI_HEAP_REFERENCE_OTHER);
   63.10 +  CodeBlobToOopClosure look_in_blobs(&blk, false);
   63.11 +  CodeCache::scavenge_root_nmethods_do(&look_in_blobs);
   63.12 +
   63.13    return true;
   63.14  }
   63.15  
    64.1 --- a/src/share/vm/runtime/arguments.cpp	Tue Sep 15 11:09:34 2009 -0700
    64.2 +++ b/src/share/vm/runtime/arguments.cpp	Tue Sep 15 21:53:47 2009 -0700
    64.3 @@ -2639,16 +2639,22 @@
    64.4  
    64.5    if (EnableInvokeDynamic && !EnableMethodHandles) {
    64.6      if (!FLAG_IS_DEFAULT(EnableMethodHandles)) {
    64.7 -      warning("forcing EnableMethodHandles true to allow EnableInvokeDynamic");
    64.8 +      warning("forcing EnableMethodHandles true because EnableInvokeDynamic is true");
    64.9      }
   64.10      EnableMethodHandles = true;
   64.11    }
   64.12    if (EnableMethodHandles && !AnonymousClasses) {
   64.13      if (!FLAG_IS_DEFAULT(AnonymousClasses)) {
   64.14 -      warning("forcing AnonymousClasses true to enable EnableMethodHandles");
   64.15 +      warning("forcing AnonymousClasses true because EnableMethodHandles is true");
   64.16      }
   64.17      AnonymousClasses = true;
   64.18    }
   64.19 +  if ((EnableMethodHandles || AnonymousClasses) && ScavengeRootsInCode == 0) {
   64.20 +    if (!FLAG_IS_DEFAULT(ScavengeRootsInCode)) {
   64.21 +      warning("forcing ScavengeRootsInCode non-zero because EnableMethodHandles or AnonymousClasses is true");
   64.22 +    }
   64.23 +    ScavengeRootsInCode = 1;
   64.24 +  }
   64.25  
   64.26    if (PrintGCDetails) {
   64.27      // Turn on -verbose:gc options as well
    65.1 --- a/src/share/vm/runtime/frame.cpp	Tue Sep 15 11:09:34 2009 -0700
    65.2 +++ b/src/share/vm/runtime/frame.cpp	Tue Sep 15 21:53:47 2009 -0700
    65.3 @@ -1043,7 +1043,7 @@
    65.4    finder.oops_do();
    65.5  }
    65.6  
    65.7 -void frame::oops_code_blob_do(OopClosure* f, const RegisterMap* reg_map) {
    65.8 +void frame::oops_code_blob_do(OopClosure* f, CodeBlobClosure* cf, const RegisterMap* reg_map) {
    65.9    assert(_cb != NULL, "sanity check");
   65.10    if (_cb->oop_maps() != NULL) {
   65.11      OopMapSet::oops_do(this, reg_map, f);
   65.12 @@ -1058,21 +1058,9 @@
   65.13    // oops referenced from nmethods active on thread stacks so as to
   65.14    // prevent them from being collected. However, this visit should be
   65.15    // restricted to certain phases of the collection only. The
   65.16 -  // closure answers whether it wants nmethods to be traced.
   65.17 -  // (All CodeBlob subtypes other than NMethod currently have
   65.18 -  // an empty oops_do() method.
   65.19 -  if (f->do_nmethods()) {
   65.20 -    _cb->oops_do(f);
   65.21 -  }
   65.22 -}
   65.23 -
   65.24 -void frame::nmethods_code_blob_do() {
   65.25 -  assert(_cb != NULL, "sanity check");
   65.26 -
   65.27 -  // If we see an activation belonging to a non_entrant nmethod, we mark it.
   65.28 -  if (_cb->is_nmethod() && ((nmethod *)_cb)->is_not_entrant()) {
   65.29 -    ((nmethod*)_cb)->mark_as_seen_on_stack();
   65.30 -  }
   65.31 +  // closure decides how it wants nmethods to be traced.
   65.32 +  if (cf != NULL)
   65.33 +    cf->do_code_blob(_cb);
   65.34  }
   65.35  
   65.36  class CompiledArgumentOopFinder: public SignatureInfo {
   65.37 @@ -1201,18 +1189,18 @@
   65.38  }
   65.39  
   65.40  
   65.41 -void frame::oops_do_internal(OopClosure* f, RegisterMap* map, bool use_interpreter_oop_map_cache) {
   65.42 +void frame::oops_do_internal(OopClosure* f, CodeBlobClosure* cf, RegisterMap* map, bool use_interpreter_oop_map_cache) {
   65.43           if (is_interpreted_frame())    { oops_interpreted_do(f, map, use_interpreter_oop_map_cache);
   65.44    } else if (is_entry_frame())          { oops_entry_do      (f, map);
   65.45 -  } else if (CodeCache::contains(pc())) { oops_code_blob_do  (f, map);
   65.46 +  } else if (CodeCache::contains(pc())) { oops_code_blob_do  (f, cf, map);
   65.47    } else {
   65.48      ShouldNotReachHere();
   65.49    }
   65.50  }
   65.51  
   65.52 -void frame::nmethods_do() {
   65.53 +void frame::nmethods_do(CodeBlobClosure* cf) {
   65.54    if (_cb != NULL && _cb->is_nmethod()) {
   65.55 -    nmethods_code_blob_do();
   65.56 +    cf->do_code_blob(_cb);
   65.57    }
   65.58  }
   65.59  
   65.60 @@ -1358,7 +1346,7 @@
   65.61      }
   65.62    }
   65.63    COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(), "must be empty before verify");)
   65.64 -  oops_do_internal(&VerifyOopClosure::verify_oop, (RegisterMap*)map, false);
   65.65 +  oops_do_internal(&VerifyOopClosure::verify_oop, NULL, (RegisterMap*)map, false);
   65.66  }
   65.67  
   65.68  
    66.1 --- a/src/share/vm/runtime/frame.hpp	Tue Sep 15 11:09:34 2009 -0700
    66.2 +++ b/src/share/vm/runtime/frame.hpp	Tue Sep 15 21:53:47 2009 -0700
    66.3 @@ -384,16 +384,14 @@
    66.4    void oops_interpreted_arguments_do(symbolHandle signature, bool is_static, OopClosure* f);
    66.5  
    66.6    // Iteration of oops
    66.7 -  void oops_do_internal(OopClosure* f, RegisterMap* map, bool use_interpreter_oop_map_cache);
    66.8 +  void oops_do_internal(OopClosure* f, CodeBlobClosure* cf, RegisterMap* map, bool use_interpreter_oop_map_cache);
    66.9    void oops_entry_do(OopClosure* f, const RegisterMap* map);
   66.10 -  void oops_code_blob_do(OopClosure* f, const RegisterMap* map);
   66.11 +  void oops_code_blob_do(OopClosure* f, CodeBlobClosure* cf, const RegisterMap* map);
   66.12    int adjust_offset(methodOop method, int index); // helper for above fn
   66.13 -  // Iteration of nmethods
   66.14 -  void nmethods_code_blob_do();
   66.15   public:
   66.16    // Memory management
   66.17 -  void oops_do(OopClosure* f, RegisterMap* map) { oops_do_internal(f, map, true); }
   66.18 -  void nmethods_do();
   66.19 +  void oops_do(OopClosure* f, CodeBlobClosure* cf, RegisterMap* map) { oops_do_internal(f, cf, map, true); }
   66.20 +  void nmethods_do(CodeBlobClosure* cf);
   66.21  
   66.22    void gc_prologue();
   66.23    void gc_epilogue();
    67.1 --- a/src/share/vm/runtime/globals.hpp	Tue Sep 15 11:09:34 2009 -0700
    67.2 +++ b/src/share/vm/runtime/globals.hpp	Tue Sep 15 21:53:47 2009 -0700
    67.3 @@ -714,6 +714,11 @@
    67.4    diagnostic(bool, TraceNMethodInstalls, false,                             \
    67.5               "Trace nmethod intallation")                                   \
    67.6                                                                              \
    67.7 +  diagnostic(intx, ScavengeRootsInCode, 0,                                  \
    67.8 +             "0: do not allow scavengable oops in the code cache; "         \
    67.9 +             "1: allow scavenging from the code cache; "                    \
   67.10 +             "2: emit as many constants as the compiler can see")           \
   67.11 +                                                                            \
   67.12    diagnostic(bool, TraceOSRBreakpoint, false,                               \
   67.13               "Trace OSR Breakpoint ")                                       \
   67.14                                                                              \
    68.1 --- a/src/share/vm/runtime/sweeper.cpp	Tue Sep 15 11:09:34 2009 -0700
    68.2 +++ b/src/share/vm/runtime/sweeper.cpp	Tue Sep 15 21:53:47 2009 -0700
    68.3 @@ -34,6 +34,17 @@
    68.4  jint      NMethodSweeper::_not_entrant_seen_on_stack = 0;
    68.5  bool      NMethodSweeper::_rescan = false;
    68.6  
    68.7 +class MarkActivationClosure: public CodeBlobClosure {
    68.8 +public:
    68.9 +  virtual void do_code_blob(CodeBlob* cb) {
   68.10 +    // If we see an activation belonging to a non_entrant nmethod, we mark it.
   68.11 +    if (cb->is_nmethod() && ((nmethod*)cb)->is_not_entrant()) {
   68.12 +      ((nmethod*)cb)->mark_as_seen_on_stack();
   68.13 +    }
   68.14 +  }
   68.15 +};
   68.16 +static MarkActivationClosure mark_activation_closure;
   68.17 +
   68.18  void NMethodSweeper::sweep() {
   68.19    assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint");
   68.20    if (!MethodFlushing) return;
   68.21 @@ -57,7 +68,7 @@
   68.22      if (PrintMethodFlushing) {
   68.23        tty->print_cr("### Sweep: stack traversal %d", _traversals);
   68.24      }
   68.25 -    Threads::nmethods_do();
   68.26 +    Threads::nmethods_do(&mark_activation_closure);
   68.27  
   68.28      // reset the flags since we started a scan from the beginning.
   68.29      _rescan = false;
    69.1 --- a/src/share/vm/runtime/thread.cpp	Tue Sep 15 11:09:34 2009 -0700
    69.2 +++ b/src/share/vm/runtime/thread.cpp	Tue Sep 15 21:53:47 2009 -0700
    69.3 @@ -683,14 +683,15 @@
    69.4    return false;
    69.5  }
    69.6  
    69.7 -void Thread::oops_do(OopClosure* f) {
    69.8 +void Thread::oops_do(OopClosure* f, CodeBlobClosure* cf) {
    69.9    active_handles()->oops_do(f);
   69.10    // Do oop for ThreadShadow
   69.11    f->do_oop((oop*)&_pending_exception);
   69.12    handle_area()->oops_do(f);
   69.13  }
   69.14  
   69.15 -void Thread::nmethods_do() {
   69.16 +void Thread::nmethods_do(CodeBlobClosure* cf) {
   69.17 +  // no nmethods in a generic thread...
   69.18  }
   69.19  
   69.20  void Thread::print_on(outputStream* st) const {
   69.21 @@ -2316,12 +2317,12 @@
   69.22  }
   69.23  
   69.24  
   69.25 -void JavaThread::oops_do(OopClosure* f) {
   69.26 +void JavaThread::oops_do(OopClosure* f, CodeBlobClosure* cf) {
   69.27    // The ThreadProfiler oops_do is done from FlatProfiler::oops_do
   69.28    // since there may be more than one thread using each ThreadProfiler.
   69.29  
   69.30    // Traverse the GCHandles
   69.31 -  Thread::oops_do(f);
   69.32 +  Thread::oops_do(f, cf);
   69.33  
   69.34    assert( (!has_last_Java_frame() && java_call_counter() == 0) ||
   69.35            (has_last_Java_frame() && java_call_counter() > 0), "wrong java_sp info!");
   69.36 @@ -2347,7 +2348,7 @@
   69.37  
   69.38      // Traverse the execution stack
   69.39      for(StackFrameStream fst(this); !fst.is_done(); fst.next()) {
   69.40 -      fst.current()->oops_do(f, fst.register_map());
   69.41 +      fst.current()->oops_do(f, cf, fst.register_map());
   69.42      }
   69.43    }
   69.44  
   69.45 @@ -2379,9 +2380,8 @@
   69.46    }
   69.47  }
   69.48  
   69.49 -void JavaThread::nmethods_do() {
   69.50 -  // Traverse the GCHandles
   69.51 -  Thread::nmethods_do();
   69.52 +void JavaThread::nmethods_do(CodeBlobClosure* cf) {
   69.53 +  Thread::nmethods_do(cf);  // (super method is a no-op)
   69.54  
   69.55    assert( (!has_last_Java_frame() && java_call_counter() == 0) ||
   69.56            (has_last_Java_frame() && java_call_counter() > 0), "wrong java_sp info!");
   69.57 @@ -2389,7 +2389,7 @@
   69.58    if (has_last_Java_frame()) {
   69.59      // Traverse the execution stack
   69.60      for(StackFrameStream fst(this); !fst.is_done(); fst.next()) {
   69.61 -      fst.current()->nmethods_do();
   69.62 +      fst.current()->nmethods_do(cf);
   69.63      }
   69.64    }
   69.65  }
   69.66 @@ -2463,7 +2463,7 @@
   69.67  
   69.68  void JavaThread::verify() {
   69.69    // Verify oops in the thread.
   69.70 -  oops_do(&VerifyOopClosure::verify_oop);
   69.71 +  oops_do(&VerifyOopClosure::verify_oop, NULL);
   69.72  
   69.73    // Verify the stack frames.
   69.74    frames_do(frame_verify);
   69.75 @@ -3602,14 +3602,14 @@
   69.76  // uses the Threads_lock to gurantee this property. It also makes sure that
   69.77  // all threads gets blocked when exiting or starting).
   69.78  
   69.79 -void Threads::oops_do(OopClosure* f) {
   69.80 +void Threads::oops_do(OopClosure* f, CodeBlobClosure* cf) {
   69.81    ALL_JAVA_THREADS(p) {
   69.82 -    p->oops_do(f);
   69.83 +    p->oops_do(f, cf);
   69.84    }
   69.85 -  VMThread::vm_thread()->oops_do(f);
   69.86 +  VMThread::vm_thread()->oops_do(f, cf);
   69.87  }
   69.88  
   69.89 -void Threads::possibly_parallel_oops_do(OopClosure* f) {
   69.90 +void Threads::possibly_parallel_oops_do(OopClosure* f, CodeBlobClosure* cf) {
   69.91    // Introduce a mechanism allowing parallel threads to claim threads as
   69.92    // root groups.  Overhead should be small enough to use all the time,
   69.93    // even in sequential code.
   69.94 @@ -3618,12 +3618,12 @@
   69.95    int cp = SharedHeap::heap()->strong_roots_parity();
   69.96    ALL_JAVA_THREADS(p) {
   69.97      if (p->claim_oops_do(is_par, cp)) {
   69.98 -      p->oops_do(f);
   69.99 +      p->oops_do(f, cf);
  69.100      }
  69.101    }
  69.102    VMThread* vmt = VMThread::vm_thread();
  69.103    if (vmt->claim_oops_do(is_par, cp))
  69.104 -    vmt->oops_do(f);
  69.105 +    vmt->oops_do(f, cf);
  69.106  }
  69.107  
  69.108  #ifndef SERIALGC
  69.109 @@ -3644,11 +3644,11 @@
  69.110  }
  69.111  #endif // SERIALGC
  69.112  
  69.113 -void Threads::nmethods_do() {
  69.114 +void Threads::nmethods_do(CodeBlobClosure* cf) {
  69.115    ALL_JAVA_THREADS(p) {
  69.116 -    p->nmethods_do();
  69.117 +    p->nmethods_do(cf);
  69.118    }
  69.119 -  VMThread::vm_thread()->nmethods_do();
  69.120 +  VMThread::vm_thread()->nmethods_do(cf);
  69.121  }
  69.122  
  69.123  void Threads::gc_epilogue() {
    70.1 --- a/src/share/vm/runtime/thread.hpp	Tue Sep 15 11:09:34 2009 -0700
    70.2 +++ b/src/share/vm/runtime/thread.hpp	Tue Sep 15 21:53:47 2009 -0700
    70.3 @@ -374,7 +374,8 @@
    70.4  
    70.5    // GC support
    70.6    // Apply "f->do_oop" to all root oops in "this".
    70.7 -  void oops_do(OopClosure* f);
    70.8 +  // Apply "cf->do_code_blob" (if !NULL) to all code blobs active in frames
    70.9 +  void oops_do(OopClosure* f, CodeBlobClosure* cf);
   70.10  
   70.11    // Handles the parallel case for the method below.
   70.12  private:
   70.13 @@ -398,7 +399,7 @@
   70.14    }
   70.15  
   70.16    // Sweeper support
   70.17 -  void nmethods_do();
   70.18 +  void nmethods_do(CodeBlobClosure* cf);
   70.19  
   70.20    // Tells if adr belong to this thread. This is used
   70.21    // for checking if a lock is owned by the running thread.
   70.22 @@ -1238,10 +1239,10 @@
   70.23    void frames_do(void f(frame*, const RegisterMap*));
   70.24  
   70.25    // Memory operations
   70.26 -  void oops_do(OopClosure* f);
   70.27 +  void oops_do(OopClosure* f, CodeBlobClosure* cf);
   70.28  
   70.29    // Sweeper operations
   70.30 -  void nmethods_do();
   70.31 +  void nmethods_do(CodeBlobClosure* cf);
   70.32  
   70.33    // Memory management operations
   70.34    void gc_epilogue();
   70.35 @@ -1629,9 +1630,9 @@
   70.36  
   70.37    // Apply "f->do_oop" to all root oops in all threads.
   70.38    // This version may only be called by sequential code.
   70.39 -  static void oops_do(OopClosure* f);
   70.40 +  static void oops_do(OopClosure* f, CodeBlobClosure* cf);
   70.41    // This version may be called by sequential or parallel code.
   70.42 -  static void possibly_parallel_oops_do(OopClosure* f);
   70.43 +  static void possibly_parallel_oops_do(OopClosure* f, CodeBlobClosure* cf);
   70.44    // This creates a list of GCTasks, one per thread.
   70.45    static void create_thread_roots_tasks(GCTaskQueue* q);
   70.46    // This creates a list of GCTasks, one per thread, for marking objects.
   70.47 @@ -1639,13 +1640,13 @@
   70.48  
   70.49    // Apply "f->do_oop" to roots in all threads that
   70.50    // are part of compiled frames
   70.51 -  static void compiled_frame_oops_do(OopClosure* f);
   70.52 +  static void compiled_frame_oops_do(OopClosure* f, CodeBlobClosure* cf);
   70.53  
   70.54    static void convert_hcode_pointers();
   70.55    static void restore_hcode_pointers();
   70.56  
   70.57    // Sweeper
   70.58 -  static void nmethods_do();
   70.59 +  static void nmethods_do(CodeBlobClosure* cf);
   70.60  
   70.61    static void gc_epilogue();
   70.62    static void gc_prologue();
    71.1 --- a/src/share/vm/runtime/vmStructs.cpp	Tue Sep 15 11:09:34 2009 -0700
    71.2 +++ b/src/share/vm/runtime/vmStructs.cpp	Tue Sep 15 21:53:47 2009 -0700
    71.3 @@ -549,6 +549,7 @@
    71.4    /********************************/                                                                                                 \
    71.5                                                                                                                                       \
    71.6       static_field(CodeCache,                   _heap,                                         CodeHeap*)                             \
    71.7 +     static_field(CodeCache,                   _scavenge_root_nmethods,                       nmethod*)                              \
    71.8                                                                                                                                       \
    71.9    /*******************************/                                                                                                  \
   71.10    /* CodeHeap (NOTE: incomplete) */                                                                                                  \
   71.11 @@ -618,7 +619,9 @@
   71.12       static_field(nmethod,             _zombie_instruction_size,                      int)                                   \
   71.13    nonstatic_field(nmethod,             _method,                                       methodOop)                             \
   71.14    nonstatic_field(nmethod,             _entry_bci,                                    int)                                   \
   71.15 -  nonstatic_field(nmethod,             _link,                                         nmethod*)                              \
   71.16 +  nonstatic_field(nmethod,             _osr_link,                                     nmethod*)                              \
   71.17 +  nonstatic_field(nmethod,             _scavenge_root_link,                           nmethod*)                              \
   71.18 +  nonstatic_field(nmethod,             _scavenge_root_state,                          jbyte)                                 \
   71.19    nonstatic_field(nmethod,             _exception_offset,                             int)                                   \
   71.20    nonstatic_field(nmethod,             _deoptimize_offset,                            int)                                   \
   71.21    nonstatic_field(nmethod,             _orig_pc_offset,                               int)                                   \
    72.1 --- a/src/share/vm/runtime/vmThread.cpp	Tue Sep 15 11:09:34 2009 -0700
    72.2 +++ b/src/share/vm/runtime/vmThread.cpp	Tue Sep 15 21:53:47 2009 -0700
    72.3 @@ -619,8 +619,8 @@
    72.4  }
    72.5  
    72.6  
    72.7 -void VMThread::oops_do(OopClosure* f) {
    72.8 -  Thread::oops_do(f);
    72.9 +void VMThread::oops_do(OopClosure* f, CodeBlobClosure* cf) {
   72.10 +  Thread::oops_do(f, cf);
   72.11    _vm_queue->oops_do(f);
   72.12  }
   72.13  
   72.14 @@ -652,5 +652,5 @@
   72.15  #endif
   72.16  
   72.17  void VMThread::verify() {
   72.18 -  oops_do(&VerifyOopClosure::verify_oop);
   72.19 +  oops_do(&VerifyOopClosure::verify_oop, NULL);
   72.20  }
    73.1 --- a/src/share/vm/runtime/vmThread.hpp	Tue Sep 15 11:09:34 2009 -0700
    73.2 +++ b/src/share/vm/runtime/vmThread.hpp	Tue Sep 15 21:53:47 2009 -0700
    73.3 @@ -121,7 +121,7 @@
    73.4    static VMThread* vm_thread()                    { return _vm_thread; }
    73.5  
    73.6    // GC support
    73.7 -  void oops_do(OopClosure* f);
    73.8 +  void oops_do(OopClosure* f, CodeBlobClosure* cf);
    73.9  
   73.10    // Debugging
   73.11    void print_on(outputStream* st) const;
    74.1 --- a/src/share/vm/utilities/debug.cpp	Tue Sep 15 11:09:34 2009 -0700
    74.2 +++ b/src/share/vm/utilities/debug.cpp	Tue Sep 15 21:53:47 2009 -0700
    74.3 @@ -702,11 +702,14 @@
    74.4    tty->print_cr("Searching strong roots:");
    74.5    Universe::oops_do(&lookFor, false);
    74.6    JNIHandles::oops_do(&lookFor);   // Global (strong) JNI handles
    74.7 -  Threads::oops_do(&lookFor);
    74.8 +  Threads::oops_do(&lookFor, NULL);
    74.9    ObjectSynchronizer::oops_do(&lookFor);
   74.10    //FlatProfiler::oops_do(&lookFor);
   74.11    SystemDictionary::oops_do(&lookFor);
   74.12  
   74.13 +  tty->print_cr("Searching code cache:");
   74.14 +  CodeCache::oops_do(&lookFor);
   74.15 +
   74.16    tty->print_cr("Done.");
   74.17  }
   74.18  

mercurial