Merge

Mon, 24 May 2010 14:15:14 -0700

author
jrose
date
Mon, 24 May 2010 14:15:14 -0700
changeset 1921
9f669cf29cb0
parent 1905
c9a07413e82b
parent 1920
ab102d5d923e
child 1922
110501f54a99

Merge

src/cpu/sparc/vm/assembler_sparc.hpp file | annotate | diff | comparison | revisions
     1.1 --- a/agent/src/share/classes/sun/jvm/hotspot/code/CodeBlob.java	Thu May 20 08:32:11 2010 -0700
     1.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/code/CodeBlob.java	Mon May 24 14:15:14 2010 -0700
     1.3 @@ -1,5 +1,5 @@
     1.4  /*
     1.5 - * Copyright 2000-2005 Sun Microsystems, Inc.  All Rights Reserved.
     1.6 + * Copyright 2000-2010 Sun Microsystems, Inc.  All Rights Reserved.
     1.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     1.8   *
     1.9   * This code is free software; you can redistribute it and/or modify it
    1.10 @@ -42,8 +42,6 @@
    1.11    private static CIntegerField instructionsOffsetField;
    1.12    private static CIntegerField frameCompleteOffsetField;
    1.13    private static CIntegerField dataOffsetField;
    1.14 -  private static CIntegerField oopsOffsetField;
    1.15 -  private static CIntegerField oopsLengthField;
    1.16    private static CIntegerField frameSizeField;
    1.17    private static AddressField  oopMapsField;
    1.18  
    1.19 @@ -72,8 +70,6 @@
    1.20      frameCompleteOffsetField = type.getCIntegerField("_frame_complete_offset");
    1.21      instructionsOffsetField  = type.getCIntegerField("_instructions_offset");
    1.22      dataOffsetField          = type.getCIntegerField("_data_offset");
    1.23 -    oopsOffsetField          = type.getCIntegerField("_oops_offset");
    1.24 -    oopsLengthField          = type.getCIntegerField("_oops_length");
    1.25      frameSizeField           = type.getCIntegerField("_frame_size");
    1.26      oopMapsField             = type.getAddressField("_oop_maps");
    1.27  
    1.28 @@ -131,19 +127,10 @@
    1.29      return headerBegin().addOffsetTo(sizeField.getValue(addr));
    1.30    }
    1.31  
    1.32 -  public Address oopsBegin() {
    1.33 -    return headerBegin().addOffsetTo(oopsOffsetField.getValue(addr));
    1.34 -  }
    1.35 -
    1.36 -  public Address oopsEnd() {
    1.37 -    return oopsBegin().addOffsetTo(getOopsLength());
    1.38 -  }
    1.39 -
    1.40    // Offsets
    1.41    public int getRelocationOffset()   { return (int) headerSizeField.getValue(addr);         }
    1.42    public int getInstructionsOffset() { return (int) instructionsOffsetField.getValue(addr); }
    1.43    public int getDataOffset()         { return (int) dataOffsetField.getValue(addr);         }
    1.44 -  public int getOopsOffset()         { return (int) oopsOffsetField.getValue(addr);         }
    1.45  
    1.46    // Sizes
    1.47    public int getSize()             { return (int) sizeField.getValue(addr);                     }
    1.48 @@ -157,19 +144,9 @@
    1.49    // FIXME: add relocationContains
    1.50    public boolean instructionsContains(Address addr) { return instructionsBegin().lessThanOrEqual(addr) && instructionsEnd().greaterThan(addr); }
    1.51    public boolean dataContains(Address addr)         { return dataBegin().lessThanOrEqual(addr) && dataEnd().greaterThan(addr);                 }
    1.52 -  public boolean oopsContains(Address addr)         { return oopsBegin().lessThanOrEqual(addr) && oopsEnd().greaterThan(addr);                 }
    1.53    public boolean contains(Address addr)             { return instructionsContains(addr);                                                       }
    1.54    public boolean isFrameCompleteAt(Address a)       { return instructionsContains(a) && a.minus(instructionsBegin()) >= frameCompleteOffsetField.getValue(addr); }
    1.55  
    1.56 -  /** Support for oops in scopes and relocs. Note: index 0 is reserved for null. */
    1.57 -  public OopHandle getOopAt(int index) {
    1.58 -    if (index == 0) return null;
    1.59 -    if (Assert.ASSERTS_ENABLED) {
    1.60 -      Assert.that(index > 0 && index <= getOopsLength(), "must be a valid non-zero index");
    1.61 -    }
    1.62 -    return oopsBegin().getOopHandleAt((index - 1) * VM.getVM().getOopSize());
    1.63 -  }
    1.64 -
    1.65    // Reclamation support (really only used by the nmethods, but in order to get asserts to work
    1.66    // in the CodeCache they are defined virtual here)
    1.67    public boolean isZombie()             { return false; }
    1.68 @@ -223,18 +200,8 @@
    1.69    }
    1.70  
    1.71    protected void printComponentsOn(PrintStream tty) {
    1.72 -    // FIXME: add relocation information
    1.73      tty.println(" instructions: [" + instructionsBegin() + ", " + instructionsEnd() + "), " +
    1.74                  " data: [" + dataBegin() + ", " + dataEnd() + "), " +
    1.75 -                " oops: [" + oopsBegin() + ", " + oopsEnd() + "), " +
    1.76                  " frame size: " + getFrameSize());
    1.77    }
    1.78 -
    1.79 -  //--------------------------------------------------------------------------------
    1.80 -  // Internals only below this point
    1.81 -  //
    1.82 -
    1.83 -  private int getOopsLength() {
    1.84 -    return (int) oopsLengthField.getValue(addr);
    1.85 -  }
    1.86  }
     2.1 --- a/agent/src/share/classes/sun/jvm/hotspot/code/NMethod.java	Thu May 20 08:32:11 2010 -0700
     2.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/code/NMethod.java	Mon May 24 14:15:14 2010 -0700
     2.3 @@ -1,5 +1,5 @@
     2.4  /*
     2.5 - * Copyright 2000-2009 Sun Microsystems, Inc.  All Rights Reserved.
     2.6 + * Copyright 2000-2010 Sun Microsystems, Inc.  All Rights Reserved.
     2.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     2.8   *
     2.9   * This code is free software; you can redistribute it and/or modify it
    2.10 @@ -49,6 +49,7 @@
    2.11    private static CIntegerField deoptOffsetField;
    2.12    private static CIntegerField origPCOffsetField;
    2.13    private static CIntegerField stubOffsetField;
    2.14 +  private static CIntegerField oopsOffsetField;
    2.15    private static CIntegerField scopesDataOffsetField;
    2.16    private static CIntegerField scopesPCsOffsetField;
    2.17    private static CIntegerField dependenciesOffsetField;
    2.18 @@ -98,6 +99,7 @@
    2.19      deoptOffsetField            = type.getCIntegerField("_deoptimize_offset");
    2.20      origPCOffsetField           = type.getCIntegerField("_orig_pc_offset");
    2.21      stubOffsetField             = type.getCIntegerField("_stub_offset");
    2.22 +    oopsOffsetField             = type.getCIntegerField("_oops_offset");
    2.23      scopesDataOffsetField       = type.getCIntegerField("_scopes_data_offset");
    2.24      scopesPCsOffsetField        = type.getCIntegerField("_scopes_pcs_offset");
    2.25      dependenciesOffsetField     = type.getCIntegerField("_dependencies_offset");
    2.26 @@ -141,7 +143,9 @@
    2.27    public Address exceptionBegin()       { return headerBegin().addOffsetTo(getExceptionOffset());    }
    2.28    public Address deoptBegin()           { return headerBegin().addOffsetTo(getDeoptOffset());        }
    2.29    public Address stubBegin()            { return headerBegin().addOffsetTo(getStubOffset());         }
    2.30 -  public Address stubEnd()              { return headerBegin().addOffsetTo(getScopesDataOffset());   }
    2.31 +  public Address stubEnd()              { return headerBegin().addOffsetTo(getOopsOffset());         }
    2.32 +  public Address oopsBegin()            { return headerBegin().addOffsetTo(getOopsOffset());         }
    2.33 +  public Address oopsEnd()              { return headerBegin().addOffsetTo(getScopesDataOffset());   }
    2.34    public Address scopesDataBegin()      { return headerBegin().addOffsetTo(getScopesDataOffset());   }
    2.35    public Address scopesDataEnd()        { return headerBegin().addOffsetTo(getScopesPCsOffset());    }
    2.36    public Address scopesPCsBegin()       { return headerBegin().addOffsetTo(getScopesPCsOffset());    }
    2.37 @@ -156,6 +160,7 @@
    2.38    public int constantsSize()            { return (int) constantsEnd()   .minus(constantsBegin());    }
    2.39    public int codeSize()                 { return (int) codeEnd()        .minus(codeBegin());         }
    2.40    public int stubSize()                 { return (int) stubEnd()        .minus(stubBegin());         }
    2.41 +  public int oopsSize()                 { return (int) oopsEnd()        .minus(oopsBegin());         }
    2.42    public int scopesDataSize()           { return (int) scopesDataEnd()  .minus(scopesDataBegin());   }
    2.43    public int scopesPCsSize()            { return (int) scopesPCsEnd()   .minus(scopesPCsBegin());    }
    2.44    public int dependenciesSize()         { return (int) dependenciesEnd().minus(dependenciesBegin()); }
    2.45 @@ -178,6 +183,7 @@
    2.46    public boolean constantsContains   (Address addr) { return constantsBegin()   .lessThanOrEqual(addr) && constantsEnd()   .greaterThan(addr); }
    2.47    public boolean codeContains        (Address addr) { return codeBegin()        .lessThanOrEqual(addr) && codeEnd()        .greaterThan(addr); }
    2.48    public boolean stubContains        (Address addr) { return stubBegin()        .lessThanOrEqual(addr) && stubEnd()        .greaterThan(addr); }
    2.49 +  public boolean oopsContains        (Address addr) { return oopsBegin()        .lessThanOrEqual(addr) && oopsEnd()        .greaterThan(addr); }
    2.50    public boolean scopesDataContains  (Address addr) { return scopesDataBegin()  .lessThanOrEqual(addr) && scopesDataEnd()  .greaterThan(addr); }
    2.51    public boolean scopesPCsContains   (Address addr) { return scopesPCsBegin()   .lessThanOrEqual(addr) && scopesPCsEnd()   .greaterThan(addr); }
    2.52    public boolean handlerTableContains(Address addr) { return handlerTableBegin().lessThanOrEqual(addr) && handlerTableEnd().greaterThan(addr); }
    2.53 @@ -187,6 +193,15 @@
    2.54    public Address getEntryPoint()         { return entryPointField.getValue(addr);         }
    2.55    public Address getVerifiedEntryPoint() { return verifiedEntryPointField.getValue(addr); }
    2.56  
    2.57 +  /** Support for oops in scopes and relocs. Note: index 0 is reserved for null. */
    2.58 +  public OopHandle getOopAt(int index) {
    2.59 +    if (index == 0) return null;
    2.60 +    if (Assert.ASSERTS_ENABLED) {
    2.61 +      Assert.that(index > 0 && index <= oopsSize(), "must be a valid non-zero index");
    2.62 +    }
    2.63 +    return oopsBegin().getOopHandleAt((index - 1) * VM.getVM().getOopSize());
    2.64 +  }
    2.65 +
    2.66    // FIXME: add interpreter_entry_point()
    2.67    // FIXME: add lazy_interpreter_entry_point() for C2
    2.68  
    2.69 @@ -338,6 +353,14 @@
    2.70      printOn(System.out);
    2.71    }
    2.72  
    2.73 +  protected void printComponentsOn(PrintStream tty) {
    2.74 +    // FIXME: add relocation information
    2.75 +    tty.println(" instructions: [" + instructionsBegin() + ", " + instructionsEnd() + "), " +
    2.76 +                " data: [" + dataBegin() + ", " + dataEnd() + "), " +
    2.77 +                " oops: [" + oopsBegin() + ", " + oopsEnd() + "), " +
    2.78 +                " frame size: " + getFrameSize());
    2.79 +  }
    2.80 +
    2.81    public String toString() {
    2.82      Method method = getMethod();
    2.83      return "NMethod for " +
    2.84 @@ -367,6 +390,7 @@
    2.85    private int getExceptionOffset()    { return (int) exceptionOffsetField   .getValue(addr); }
    2.86    private int getDeoptOffset()        { return (int) deoptOffsetField       .getValue(addr); }
    2.87    private int getStubOffset()         { return (int) stubOffsetField        .getValue(addr); }
    2.88 +  private int getOopsOffset()         { return (int) oopsOffsetField        .getValue(addr); }
    2.89    private int getScopesDataOffset()   { return (int) scopesDataOffsetField  .getValue(addr); }
    2.90    private int getScopesPCsOffset()    { return (int) scopesPCsOffsetField   .getValue(addr); }
    2.91    private int getDependenciesOffset() { return (int) dependenciesOffsetField.getValue(addr); }
     3.1 --- a/agent/src/share/classes/sun/jvm/hotspot/utilities/PointerFinder.java	Thu May 20 08:32:11 2010 -0700
     3.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/utilities/PointerFinder.java	Mon May 24 14:15:14 2010 -0700
     3.3 @@ -1,5 +1,5 @@
     3.4  /*
     3.5 - * Copyright 2000-2004 Sun Microsystems, Inc.  All Rights Reserved.
     3.6 + * Copyright 2000-2010 Sun Microsystems, Inc.  All Rights Reserved.
     3.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     3.8   *
     3.9   * This code is free software; you can redistribute it and/or modify it
    3.10 @@ -98,7 +98,12 @@
    3.11          }
    3.12          loc.inBlobInstructions = loc.blob.instructionsContains(a);
    3.13          loc.inBlobData         = loc.blob.dataContains(a);
    3.14 -        loc.inBlobOops         = loc.blob.oopsContains(a);
    3.15 +
    3.16 +        if (loc.blob.isNMethod()) {
    3.17 +            NMethod nm = (NMethod) loc.blob;
    3.18 +            loc.inBlobOops = nm.oopsContains(a);
    3.19 +        }
    3.20 +
    3.21          loc.inBlobUnknownLocation = (!(loc.inBlobInstructions ||
    3.22                                         loc.inBlobData ||
    3.23                                         loc.inBlobOops));
     4.1 --- a/src/cpu/sparc/vm/assembler_sparc.hpp	Thu May 20 08:32:11 2010 -0700
     4.2 +++ b/src/cpu/sparc/vm/assembler_sparc.hpp	Mon May 24 14:15:14 2010 -0700
     4.3 @@ -87,6 +87,7 @@
     4.4  // JSR 292 fixed register usages:
     4.5  REGISTER_DECLARATION(Register, G5_method_type        , G5);
     4.6  REGISTER_DECLARATION(Register, G3_method_handle      , G3);
     4.7 +REGISTER_DECLARATION(Register, L7_mh_SP_save         , L7);
     4.8  
     4.9  // The compiler requires that G5_megamorphic_method is G5_inline_cache_klass,
    4.10  // because a single patchable "set" instruction (NativeMovConstReg,
     5.1 --- a/src/cpu/sparc/vm/c1_FrameMap_sparc.cpp	Thu May 20 08:32:11 2010 -0700
     5.2 +++ b/src/cpu/sparc/vm/c1_FrameMap_sparc.cpp	Mon May 24 14:15:14 2010 -0700
     5.3 @@ -1,5 +1,5 @@
     5.4  /*
     5.5 - * Copyright 1999-2009 Sun Microsystems, Inc.  All Rights Reserved.
     5.6 + * Copyright 1999-2010 Sun Microsystems, Inc.  All Rights Reserved.
     5.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     5.8   *
     5.9   * This code is free software; you can redistribute it and/or modify it
    5.10 @@ -345,6 +345,13 @@
    5.11  }
    5.12  
    5.13  
    5.14 +// JSR 292
    5.15 +LIR_Opr FrameMap::method_handle_invoke_SP_save_opr() {
    5.16 +  assert(L7 == L7_mh_SP_save, "must be same register");
    5.17 +  return L7_opr;
    5.18 +}
    5.19 +
    5.20 +
    5.21  bool FrameMap::validate_frame() {
    5.22    int max_offset = in_bytes(framesize_in_bytes());
    5.23    int java_index = 0;
     6.1 --- a/src/cpu/sparc/vm/c1_FrameMap_sparc.hpp	Thu May 20 08:32:11 2010 -0700
     6.2 +++ b/src/cpu/sparc/vm/c1_FrameMap_sparc.hpp	Mon May 24 14:15:14 2010 -0700
     6.3 @@ -143,6 +143,3 @@
     6.4  
     6.5    static bool is_caller_save_register (LIR_Opr  reg);
     6.6    static bool is_caller_save_register (Register r);
     6.7 -
     6.8 -  // JSR 292
     6.9 -  static LIR_Opr& method_handle_invoke_SP_save_opr() { return L7_opr; }
     7.1 --- a/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Thu May 20 08:32:11 2010 -0700
     7.2 +++ b/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Mon May 24 14:15:14 2010 -0700
     7.3 @@ -736,7 +736,8 @@
     7.4  
     7.5  void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
     7.6    __ call(op->addr(), rtype);
     7.7 -  // the peephole pass fills the delay slot
     7.8 +  // The peephole pass fills the delay slot, add_call_info is done in
     7.9 +  // LIR_Assembler::emit_delay.
    7.10  }
    7.11  
    7.12  
    7.13 @@ -745,7 +746,8 @@
    7.14    __ set_oop((jobject)Universe::non_oop_word(), G5_inline_cache_reg);
    7.15    __ relocate(rspec);
    7.16    __ call(op->addr(), relocInfo::none);
    7.17 -  // the peephole pass fills the delay slot
    7.18 +  // The peephole pass fills the delay slot, add_call_info is done in
    7.19 +  // LIR_Assembler::emit_delay.
    7.20  }
    7.21  
    7.22  
    7.23 @@ -766,16 +768,6 @@
    7.24  }
    7.25  
    7.26  
    7.27 -void LIR_Assembler::preserve_SP(LIR_OpJavaCall* op) {
    7.28 -  Unimplemented();
    7.29 -}
    7.30 -
    7.31 -
    7.32 -void LIR_Assembler::restore_SP(LIR_OpJavaCall* op) {
    7.33 -  Unimplemented();
    7.34 -}
    7.35 -
    7.36 -
    7.37  // load with 32-bit displacement
    7.38  int LIR_Assembler::load(Register s, int disp, Register d, BasicType ld_type, CodeEmitInfo *info) {
    7.39    int load_offset = code_offset();
    7.40 @@ -2934,7 +2926,7 @@
    7.41  
    7.42    // we may also be emitting the call info for the instruction
    7.43    // which we are the delay slot of.
    7.44 -  CodeEmitInfo * call_info = op->call_info();
    7.45 +  CodeEmitInfo* call_info = op->call_info();
    7.46    if (call_info) {
    7.47      add_call_info(code_offset(), call_info);
    7.48    }
    7.49 @@ -3159,6 +3151,7 @@
    7.50                tty->print_cr("delayed");
    7.51                inst->at(i - 1)->print();
    7.52                inst->at(i)->print();
    7.53 +              tty->cr();
    7.54              }
    7.55  #endif
    7.56              continue;
    7.57 @@ -3174,8 +3167,8 @@
    7.58        case lir_static_call:
    7.59        case lir_virtual_call:
    7.60        case lir_icvirtual_call:
    7.61 -      case lir_optvirtual_call: {
    7.62 -        LIR_Op* delay_op = NULL;
    7.63 +      case lir_optvirtual_call:
    7.64 +      case lir_dynamic_call: {
    7.65          LIR_Op* prev = inst->at(i - 1);
    7.66          if (LIRFillDelaySlots && prev && prev->code() == lir_move && prev->info() == NULL &&
    7.67              (op->code() != lir_virtual_call ||
    7.68 @@ -3192,15 +3185,14 @@
    7.69              tty->print_cr("delayed");
    7.70              inst->at(i - 1)->print();
    7.71              inst->at(i)->print();
    7.72 +            tty->cr();
    7.73            }
    7.74  #endif
    7.75            continue;
    7.76          }
    7.77  
    7.78 -        if (!delay_op) {
    7.79 -          delay_op = new LIR_OpDelay(new LIR_Op0(lir_nop), op->as_OpJavaCall()->info());
    7.80 -          inst->insert_before(i + 1, delay_op);
    7.81 -        }
    7.82 +        LIR_Op* delay_op = new LIR_OpDelay(new LIR_Op0(lir_nop), op->as_OpJavaCall()->info());
    7.83 +        inst->insert_before(i + 1, delay_op);
    7.84          break;
    7.85        }
    7.86      }
     8.1 --- a/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp	Thu May 20 08:32:11 2010 -0700
     8.2 +++ b/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp	Mon May 24 14:15:14 2010 -0700
     8.3 @@ -679,8 +679,15 @@
     8.4          __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address),
     8.5                          G2_thread, Oissuing_pc->after_save());
     8.6          __ verify_not_null_oop(Oexception->after_save());
     8.7 -        __ jmp(O0, 0);
     8.8 -        __ delayed()->restore();
     8.9 +
    8.10 +        // Restore SP from L7 if the exception PC is a MethodHandle call site.
    8.11 +        __ mov(O0, G5);  // Save the target address.
    8.12 +        __ lduw(Address(G2_thread, JavaThread::is_method_handle_return_offset()), L0);
    8.13 +        __ tst(L0);  // Condition codes are preserved over the restore.
    8.14 +        __ restore();
    8.15 +
    8.16 +        __ jmp(G5, 0);
    8.17 +        __ delayed()->movcc(Assembler::notZero, false, Assembler::icc, L7_mh_SP_save, SP);  // Restore SP if required.
    8.18        }
    8.19        break;
    8.20  
     9.1 --- a/src/cpu/sparc/vm/frame_sparc.cpp	Thu May 20 08:32:11 2010 -0700
     9.2 +++ b/src/cpu/sparc/vm/frame_sparc.cpp	Mon May 24 14:15:14 2010 -0700
     9.3 @@ -336,9 +336,11 @@
     9.4  #endif // ASSERT
     9.5  }
     9.6  
     9.7 -frame::frame(intptr_t* sp, intptr_t* younger_sp, bool younger_frame_adjusted_stack) {
     9.8 -  _sp = sp;
     9.9 -  _younger_sp = younger_sp;
    9.10 +frame::frame(intptr_t* sp, intptr_t* younger_sp, bool younger_frame_is_interpreted) :
    9.11 +  _sp(sp),
    9.12 +  _younger_sp(younger_sp),
    9.13 +  _deopt_state(unknown),
    9.14 +  _sp_adjustment_by_callee(0) {
    9.15    if (younger_sp == NULL) {
    9.16      // make a deficient frame which doesn't know where its PC is
    9.17      _pc = NULL;
    9.18 @@ -352,20 +354,32 @@
    9.19      // wrong.  (the _last_native_pc will have the right value)
    9.20      // So do not put add any asserts on the _pc here.
    9.21    }
    9.22 -  if (younger_frame_adjusted_stack) {
    9.23 -    // compute adjustment to this frame's SP made by its interpreted callee
    9.24 -    _sp_adjustment_by_callee = (intptr_t*)((intptr_t)younger_sp[I5_savedSP->sp_offset_in_saved_window()] +
    9.25 -                                             STACK_BIAS) - sp;
    9.26 -  } else {
    9.27 -    _sp_adjustment_by_callee = 0;
    9.28 +
    9.29 +  if (_pc != NULL)
    9.30 +    _cb = CodeCache::find_blob(_pc);
    9.31 +
    9.32 +  // Check for MethodHandle call sites.
    9.33 +  if (_cb != NULL) {
    9.34 +    nmethod* nm = _cb->as_nmethod_or_null();
    9.35 +    if (nm != NULL) {
    9.36 +      if (nm->is_deopt_mh_entry(_pc) || nm->is_method_handle_return(_pc)) {
    9.37 +        _sp_adjustment_by_callee = (intptr_t*) ((intptr_t) sp[L7_mh_SP_save->sp_offset_in_saved_window()] + STACK_BIAS) - sp;
    9.38 +        // The SP is already adjusted by this MH call site, don't
    9.39 +        // overwrite this value with the wrong interpreter value.
    9.40 +        younger_frame_is_interpreted = false;
    9.41 +      }
    9.42 +    }
    9.43    }
    9.44  
    9.45 -  _deopt_state = unknown;
    9.46 +  if (younger_frame_is_interpreted) {
    9.47 +    // compute adjustment to this frame's SP made by its interpreted callee
    9.48 +    _sp_adjustment_by_callee = (intptr_t*) ((intptr_t) younger_sp[I5_savedSP->sp_offset_in_saved_window()] + STACK_BIAS) - sp;
    9.49 +  }
    9.50  
    9.51 -  // It is important that frame be fully construct when we do this lookup
    9.52 -  // as get_original_pc() needs correct value for unextended_sp()
    9.53 +  // It is important that the frame is fully constructed when we do
    9.54 +  // this lookup as get_deopt_original_pc() needs a correct value for
    9.55 +  // unextended_sp() which uses _sp_adjustment_by_callee.
    9.56    if (_pc != NULL) {
    9.57 -    _cb = CodeCache::find_blob(_pc);
    9.58      address original_pc = nmethod::get_deopt_original_pc(this);
    9.59      if (original_pc != NULL) {
    9.60        _pc = original_pc;
    9.61 @@ -462,9 +476,8 @@
    9.62  
    9.63    if (is_entry_frame()) return sender_for_entry_frame(map);
    9.64  
    9.65 -  intptr_t* younger_sp     = sp();
    9.66 -  intptr_t* sp             = sender_sp();
    9.67 -  bool      adjusted_stack = false;
    9.68 +  intptr_t* younger_sp = sp();
    9.69 +  intptr_t* sp         = sender_sp();
    9.70  
    9.71    // Note:  The version of this operation on any platform with callee-save
    9.72    //        registers must update the register map (if not null).
    9.73 @@ -483,8 +496,8 @@
    9.74    // interpreted but its pc is in the code cache (for c1 -> osr_frame_return_id stub), so it must be
    9.75    // explicitly recognized.
    9.76  
    9.77 -  adjusted_stack = is_interpreted_frame();
    9.78 -  if (adjusted_stack) {
    9.79 +  bool frame_is_interpreted = is_interpreted_frame();
    9.80 +  if (frame_is_interpreted) {
    9.81      map->make_integer_regs_unsaved();
    9.82      map->shift_window(sp, younger_sp);
    9.83    } else if (_cb != NULL) {
    9.84 @@ -503,7 +516,7 @@
    9.85        }
    9.86      }
    9.87    }
    9.88 -  return frame(sp, younger_sp, adjusted_stack);
    9.89 +  return frame(sp, younger_sp, frame_is_interpreted);
    9.90  }
    9.91  
    9.92  
    10.1 --- a/src/cpu/sparc/vm/interp_masm_sparc.cpp	Thu May 20 08:32:11 2010 -0700
    10.2 +++ b/src/cpu/sparc/vm/interp_masm_sparc.cpp	Mon May 24 14:15:14 2010 -0700
    10.3 @@ -720,25 +720,30 @@
    10.4  
    10.5  
    10.6  void InterpreterMacroAssembler::get_cache_index_at_bcp(Register cache, Register tmp,
    10.7 -                                                       int bcp_offset, bool giant_index) {
    10.8 +                                                       int bcp_offset, size_t index_size) {
    10.9    assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
   10.10 -  if (!giant_index) {
   10.11 +  if (index_size == sizeof(u2)) {
   10.12      get_2_byte_integer_at_bcp(bcp_offset, cache, tmp, Unsigned);
   10.13 -  } else {
   10.14 +  } else if (index_size == sizeof(u4)) {
   10.15      assert(EnableInvokeDynamic, "giant index used only for EnableInvokeDynamic");
   10.16      get_4_byte_integer_at_bcp(bcp_offset, cache, tmp);
   10.17      assert(constantPoolCacheOopDesc::decode_secondary_index(~123) == 123, "else change next line");
   10.18      xor3(tmp, -1, tmp);  // convert to plain index
   10.19 +  } else if (index_size == sizeof(u1)) {
   10.20 +    assert(EnableMethodHandles, "tiny index used only for EnableMethodHandles");
   10.21 +    ldub(Lbcp, bcp_offset, tmp);
   10.22 +  } else {
   10.23 +    ShouldNotReachHere();
   10.24    }
   10.25  }
   10.26  
   10.27  
   10.28  void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, Register tmp,
   10.29 -                                                           int bcp_offset, bool giant_index) {
   10.30 +                                                           int bcp_offset, size_t index_size) {
   10.31    assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
   10.32    assert_different_registers(cache, tmp);
   10.33    assert_not_delayed();
   10.34 -  get_cache_index_at_bcp(cache, tmp, bcp_offset, giant_index);
   10.35 +  get_cache_index_at_bcp(cache, tmp, bcp_offset, index_size);
   10.36    // convert from field index to ConstantPoolCacheEntry index and from
   10.37    // word index to byte offset
   10.38    sll(tmp, exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord), tmp);
   10.39 @@ -747,12 +752,15 @@
   10.40  
   10.41  
   10.42  void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, Register tmp,
   10.43 -                                                               int bcp_offset, bool giant_index) {
   10.44 +                                                               int bcp_offset, size_t index_size) {
   10.45    assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
   10.46    assert_different_registers(cache, tmp);
   10.47    assert_not_delayed();
   10.48 -  assert(!giant_index,"NYI");
   10.49 -  get_2_byte_integer_at_bcp(bcp_offset, cache, tmp, Unsigned);
   10.50 +  if (index_size == sizeof(u2)) {
   10.51 +    get_2_byte_integer_at_bcp(bcp_offset, cache, tmp, Unsigned);
   10.52 +  } else {
   10.53 +    ShouldNotReachHere();  // other sizes not supported here
   10.54 +  }
   10.55                // convert from field index to ConstantPoolCacheEntry index
   10.56                // and from word index to byte offset
   10.57    sll(tmp, exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord), tmp);
    11.1 --- a/src/cpu/sparc/vm/interp_masm_sparc.hpp	Thu May 20 08:32:11 2010 -0700
    11.2 +++ b/src/cpu/sparc/vm/interp_masm_sparc.hpp	Mon May 24 14:15:14 2010 -0700
    11.3 @@ -182,9 +182,9 @@
    11.4                                    Register   Rdst,
    11.5                                    setCCOrNot should_set_CC = dont_set_CC );
    11.6  
    11.7 -  void get_cache_and_index_at_bcp(Register cache, Register tmp, int bcp_offset, bool giant_index = false);
    11.8 -  void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset, bool giant_index = false);
    11.9 -  void get_cache_index_at_bcp(Register cache, Register tmp, int bcp_offset, bool giant_index = false);
   11.10 +  void get_cache_and_index_at_bcp(Register cache, Register tmp, int bcp_offset, size_t index_size = sizeof(u2));
   11.11 +  void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset, size_t index_size = sizeof(u2));
   11.12 +  void get_cache_index_at_bcp(Register cache, Register tmp, int bcp_offset, size_t index_size = sizeof(u2));
   11.13  
   11.14  
   11.15    // common code
    12.1 --- a/src/cpu/sparc/vm/methodHandles_sparc.cpp	Thu May 20 08:32:11 2010 -0700
    12.2 +++ b/src/cpu/sparc/vm/methodHandles_sparc.cpp	Mon May 24 14:15:14 2010 -0700
    12.3 @@ -375,10 +375,10 @@
    12.4        Register O0_scratch = O0_argslot;
    12.5        int stackElementSize = Interpreter::stackElementSize;
    12.6  
    12.7 -      // Make space on the stack for the arguments.
    12.8 -      __ sub(SP,    4*stackElementSize, SP);
    12.9 -      __ sub(Gargs, 3*stackElementSize, Gargs);
   12.10 -      //__ sub(Lesp,  3*stackElementSize, Lesp);
   12.11 +      // Make space on the stack for the arguments and set Gargs
   12.12 +      // correctly.
   12.13 +      __ sub(SP, 4*stackElementSize, SP);  // Keep stack aligned.
   12.14 +      __ add(SP, (frame::varargs_offset)*wordSize - 1*Interpreter::stackElementSize + STACK_BIAS + BytesPerWord, Gargs);
   12.15  
   12.16        // void raiseException(int code, Object actual, Object required)
   12.17        __ st(    O1_scratch, Address(Gargs, 2*stackElementSize));  // code
    13.1 --- a/src/cpu/sparc/vm/nativeInst_sparc.cpp	Thu May 20 08:32:11 2010 -0700
    13.2 +++ b/src/cpu/sparc/vm/nativeInst_sparc.cpp	Mon May 24 14:15:14 2010 -0700
    13.3 @@ -1,5 +1,5 @@
    13.4  /*
    13.5 - * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
    13.6 + * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
    13.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    13.8   *
    13.9   * This code is free software; you can redistribute it and/or modify it
   13.10 @@ -321,7 +321,8 @@
   13.11    set_long_at(add_offset,   set_data32_simm13( long_at(add_offset),   x));
   13.12  
   13.13    // also store the value into an oop_Relocation cell, if any
   13.14 -  CodeBlob* nm = CodeCache::find_blob(instruction_address());
   13.15 +  CodeBlob* cb = CodeCache::find_blob(instruction_address());
   13.16 +  nmethod*  nm = cb ? cb->as_nmethod_or_null() : NULL;
   13.17    if (nm != NULL) {
   13.18      RelocIterator iter(nm, instruction_address(), next_instruction_address());
   13.19      oop* oop_addr = NULL;
   13.20 @@ -430,7 +431,8 @@
   13.21    set_long_at(add_offset, set_data32_simm13(long_at(add_offset), x));
   13.22  
   13.23    // also store the value into an oop_Relocation cell, if any
   13.24 -  CodeBlob* nm = CodeCache::find_blob(instruction_address());
   13.25 +  CodeBlob* cb = CodeCache::find_blob(instruction_address());
   13.26 +  nmethod*  nm = cb ? cb->as_nmethod_or_null() : NULL;
   13.27    if (nm != NULL) {
   13.28      RelocIterator iter(nm, instruction_address(), next_instruction_address());
   13.29      oop* oop_addr = NULL;
    14.1 --- a/src/cpu/sparc/vm/register_definitions_sparc.cpp	Thu May 20 08:32:11 2010 -0700
    14.2 +++ b/src/cpu/sparc/vm/register_definitions_sparc.cpp	Mon May 24 14:15:14 2010 -0700
    14.3 @@ -1,5 +1,5 @@
    14.4  /*
    14.5 - * Copyright 2002-2009 Sun Microsystems, Inc.  All Rights Reserved.
    14.6 + * Copyright 2002-2010 Sun Microsystems, Inc.  All Rights Reserved.
    14.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    14.8   *
    14.9   * This code is free software; you can redistribute it and/or modify it
   14.10 @@ -142,9 +142,12 @@
   14.11  REGISTER_DEFINITION(Register, G3_scratch);
   14.12  REGISTER_DEFINITION(Register, G4_scratch);
   14.13  REGISTER_DEFINITION(Register, Gtemp);
   14.14 +REGISTER_DEFINITION(Register, Lentry_args);
   14.15 +
   14.16 +// JSR 292
   14.17  REGISTER_DEFINITION(Register, G5_method_type);
   14.18  REGISTER_DEFINITION(Register, G3_method_handle);
   14.19 -REGISTER_DEFINITION(Register, Lentry_args);
   14.20 +REGISTER_DEFINITION(Register, L7_mh_SP_save);
   14.21  
   14.22  #ifdef CC_INTERP
   14.23  REGISTER_DEFINITION(Register, Lstate);
    15.1 --- a/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Thu May 20 08:32:11 2010 -0700
    15.2 +++ b/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Mon May 24 14:15:14 2010 -0700
    15.3 @@ -908,26 +908,13 @@
    15.4    // O0-O5          - Outgoing args in compiled layout
    15.5    // O6             - Adjusted or restored SP
    15.6    // O7             - Valid return address
    15.7 -  // L0-L7, I0-I7    - Caller's temps (no frame pushed yet)
    15.8 +  // L0-L7, I0-I7   - Caller's temps (no frame pushed yet)
    15.9    // F0-F7          - more outgoing args
   15.10  
   15.11  
   15.12    // Gargs is the incoming argument base, and also an outgoing argument.
   15.13    __ sub(Gargs, BytesPerWord, Gargs);
   15.14  
   15.15 -#ifdef ASSERT
   15.16 -  {
   15.17 -    // on entry OsavedSP and SP should be equal
   15.18 -    Label ok;
   15.19 -    __ cmp(O5_savedSP, SP);
   15.20 -    __ br(Assembler::equal, false, Assembler::pt, ok);
   15.21 -    __ delayed()->nop();
   15.22 -    __ stop("I5_savedSP not set");
   15.23 -    __ should_not_reach_here();
   15.24 -    __ bind(ok);
   15.25 -  }
   15.26 -#endif
   15.27 -
   15.28    // ON ENTRY TO THE CODE WE ARE MAKING, WE HAVE AN INTERPRETED FRAME
   15.29    // WITH O7 HOLDING A VALID RETURN PC
   15.30    //
    16.1 --- a/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Thu May 20 08:32:11 2010 -0700
    16.2 +++ b/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Mon May 24 14:15:14 2010 -0700
    16.3 @@ -2911,16 +2911,6 @@
    16.4      // arraycopy stubs used by compilers
    16.5      generate_arraycopy_stubs();
    16.6  
    16.7 -    // generic method handle stubs
    16.8 -    if (EnableMethodHandles && SystemDictionary::MethodHandle_klass() != NULL) {
    16.9 -      for (MethodHandles::EntryKind ek = MethodHandles::_EK_FIRST;
   16.10 -           ek < MethodHandles::_EK_LIMIT;
   16.11 -           ek = MethodHandles::EntryKind(1 + (int)ek)) {
   16.12 -        StubCodeMark mark(this, "MethodHandle", MethodHandles::entry_name(ek));
   16.13 -        MethodHandles::generate_method_handle_stub(_masm, ek);
   16.14 -      }
   16.15 -    }
   16.16 -
   16.17      // Don't initialize the platform math functions since sparc
   16.18      // doesn't have intrinsics for these operations.
   16.19    }
    17.1 --- a/src/cpu/sparc/vm/stubRoutines_sparc.hpp	Thu May 20 08:32:11 2010 -0700
    17.2 +++ b/src/cpu/sparc/vm/stubRoutines_sparc.hpp	Mon May 24 14:15:14 2010 -0700
    17.3 @@ -43,7 +43,7 @@
    17.4  
    17.5  // MethodHandles adapters
    17.6  enum method_handles_platform_dependent_constants {
    17.7 -  method_handles_adapters_code_size = 5000
    17.8 +  method_handles_adapters_code_size = 6000
    17.9  };
   17.10  
   17.11  class Sparc {
    18.1 --- a/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Thu May 20 08:32:11 2010 -0700
    18.2 +++ b/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Mon May 24 14:15:14 2010 -0700
    18.3 @@ -204,7 +204,7 @@
    18.4    // out of the main line of code...
    18.5    if (EnableInvokeDynamic) {
    18.6      __ bind(L_giant_index);
    18.7 -    __ get_cache_and_index_at_bcp(cache, G1_scratch, 1, true);
    18.8 +    __ get_cache_and_index_at_bcp(cache, G1_scratch, 1, sizeof(u4));
    18.9      __ ba(false, L_got_cache);
   18.10      __ delayed()->nop();
   18.11    }
    19.1 --- a/src/cpu/sparc/vm/templateTable_sparc.cpp	Thu May 20 08:32:11 2010 -0700
    19.2 +++ b/src/cpu/sparc/vm/templateTable_sparc.cpp	Mon May 24 14:15:14 2010 -0700
    19.3 @@ -1949,23 +1949,30 @@
    19.4  }
    19.5  
    19.6  // ----------------------------------------------------------------------------
    19.7 -void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Register index) {
    19.8 -  assert(byte_no == 1 || byte_no == 2, "byte_no out of range");
    19.9 -  bool is_invokedynamic = (bytecode() == Bytecodes::_invokedynamic);
   19.10 -
   19.11 +void TemplateTable::resolve_cache_and_index(int byte_no,
   19.12 +                                            Register result,
   19.13 +                                            Register Rcache,
   19.14 +                                            Register index,
   19.15 +                                            size_t index_size) {
   19.16    // Depends on cpCacheOop layout!
   19.17 -  const int shift_count = (1 + byte_no)*BitsPerByte;
   19.18    Label resolved;
   19.19  
   19.20 -  __ get_cache_and_index_at_bcp(Rcache, index, 1, is_invokedynamic);
   19.21 -  if (is_invokedynamic) {
   19.22 -    // We are resolved if the f1 field contains a non-null CallSite object.
   19.23 +  __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
   19.24 +  if (byte_no == f1_oop) {
   19.25 +    // We are resolved if the f1 field contains a non-null object (CallSite, etc.)
   19.26 +    // This kind of CP cache entry does not need to match the flags byte, because
   19.27 +    // there is a 1-1 relation between bytecode type and CP entry type.
   19.28 +    assert_different_registers(result, Rcache);
   19.29      __ ld_ptr(Rcache, constantPoolCacheOopDesc::base_offset() +
   19.30 -              ConstantPoolCacheEntry::f1_offset(), Lbyte_code);
   19.31 -    __ tst(Lbyte_code);
   19.32 +              ConstantPoolCacheEntry::f1_offset(), result);
   19.33 +    __ tst(result);
   19.34      __ br(Assembler::notEqual, false, Assembler::pt, resolved);
   19.35      __ delayed()->set((int)bytecode(), O1);
   19.36    } else {
   19.37 +    assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
   19.38 +    assert(result == noreg, "");  //else change code for setting result
   19.39 +    const int shift_count = (1 + byte_no)*BitsPerByte;
   19.40 +
   19.41      __ ld_ptr(Rcache, constantPoolCacheOopDesc::base_offset() +
   19.42                ConstantPoolCacheEntry::indices_offset(), Lbyte_code);
   19.43  
   19.44 @@ -1992,7 +1999,10 @@
   19.45    // first time invocation - must resolve first
   19.46    __ call_VM(noreg, entry, O1);
   19.47    // Update registers with resolved info
   19.48 -  __ get_cache_and_index_at_bcp(Rcache, index, 1, is_invokedynamic);
   19.49 +  __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
   19.50 +  if (result != noreg)
   19.51 +    __ ld_ptr(Rcache, constantPoolCacheOopDesc::base_offset() +
   19.52 +              ConstantPoolCacheEntry::f1_offset(), result);
   19.53    __ bind(resolved);
   19.54  }
   19.55  
   19.56 @@ -2001,7 +2011,8 @@
   19.57                                                 Register Ritable_index,
   19.58                                                 Register Rflags,
   19.59                                                 bool is_invokevirtual,
   19.60 -                                               bool is_invokevfinal) {
   19.61 +                                               bool is_invokevfinal,
   19.62 +                                               bool is_invokedynamic) {
   19.63    // Uses both G3_scratch and G4_scratch
   19.64    Register Rcache = G3_scratch;
   19.65    Register Rscratch = G4_scratch;
   19.66 @@ -2025,11 +2036,15 @@
   19.67  
   19.68    if (is_invokevfinal) {
   19.69      __ get_cache_and_index_at_bcp(Rcache, Rscratch, 1);
   19.70 +    __ ld_ptr(Rcache, method_offset, Rmethod);
   19.71 +  } else if (byte_no == f1_oop) {
   19.72 +    // Resolved f1_oop goes directly into 'method' register.
   19.73 +    resolve_cache_and_index(byte_no, Rmethod, Rcache, Rscratch, sizeof(u4));
   19.74    } else {
   19.75 -    resolve_cache_and_index(byte_no, Rcache, Rscratch);
   19.76 +    resolve_cache_and_index(byte_no, noreg, Rcache, Rscratch, sizeof(u2));
   19.77 +    __ ld_ptr(Rcache, method_offset, Rmethod);
   19.78    }
   19.79  
   19.80 -  __ ld_ptr(Rcache, method_offset, Rmethod);
   19.81    if (Ritable_index != noreg) {
   19.82      __ ld_ptr(Rcache, index_offset, Ritable_index);
   19.83    }
   19.84 @@ -2110,7 +2125,7 @@
   19.85    Register Rflags = G1_scratch;
   19.86    ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
   19.87  
   19.88 -  resolve_cache_and_index(byte_no, Rcache, index);
   19.89 +  resolve_cache_and_index(byte_no, noreg, Rcache, index, sizeof(u2));
   19.90    jvmti_post_field_access(Rcache, index, is_static, false);
   19.91    load_field_cp_cache_entry(Rclass, Rcache, index, Roffset, Rflags, is_static);
   19.92  
   19.93 @@ -2475,7 +2490,7 @@
   19.94    Register Rflags = G1_scratch;
   19.95    ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
   19.96  
   19.97 -  resolve_cache_and_index(byte_no, Rcache, index);
   19.98 +  resolve_cache_and_index(byte_no, noreg, Rcache, index, sizeof(u2));
   19.99    jvmti_post_field_mod(Rcache, index, is_static);
  19.100    load_field_cp_cache_entry(Rclass, Rcache, index, Roffset, Rflags, is_static);
  19.101  
  19.102 @@ -2816,6 +2831,7 @@
  19.103  
  19.104  void TemplateTable::invokevirtual(int byte_no) {
  19.105    transition(vtos, vtos);
  19.106 +  assert(byte_no == f2_byte, "use this argument");
  19.107  
  19.108    Register Rscratch = G3_scratch;
  19.109    Register Rtemp = G4_scratch;
  19.110 @@ -2823,7 +2839,7 @@
  19.111    Register Rrecv = G5_method;
  19.112    Label notFinal;
  19.113  
  19.114 -  load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, true);
  19.115 +  load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, true, false, false);
  19.116    __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
  19.117  
  19.118    // Check for vfinal
  19.119 @@ -2864,9 +2880,10 @@
  19.120  
  19.121  void TemplateTable::fast_invokevfinal(int byte_no) {
  19.122    transition(vtos, vtos);
  19.123 +  assert(byte_no == f2_byte, "use this argument");
  19.124  
  19.125    load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Lscratch, true,
  19.126 -                             /*is_invokevfinal*/true);
  19.127 +                             /*is_invokevfinal*/true, false);
  19.128    __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
  19.129    invokevfinal_helper(G3_scratch, Lscratch);
  19.130  }
  19.131 @@ -2901,12 +2918,13 @@
  19.132  
  19.133  void TemplateTable::invokespecial(int byte_no) {
  19.134    transition(vtos, vtos);
  19.135 +  assert(byte_no == f1_byte, "use this argument");
  19.136  
  19.137    Register Rscratch = G3_scratch;
  19.138    Register Rtemp = G4_scratch;
  19.139    Register Rret = Lscratch;
  19.140  
  19.141 -  load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, false);
  19.142 +  load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, /*virtual*/ false, false, false);
  19.143    __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
  19.144  
  19.145    __ verify_oop(G5_method);
  19.146 @@ -2934,12 +2952,13 @@
  19.147  
  19.148  void TemplateTable::invokestatic(int byte_no) {
  19.149    transition(vtos, vtos);
  19.150 +  assert(byte_no == f1_byte, "use this argument");
  19.151  
  19.152    Register Rscratch = G3_scratch;
  19.153    Register Rtemp = G4_scratch;
  19.154    Register Rret = Lscratch;
  19.155  
  19.156 -  load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, false);
  19.157 +  load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, /*virtual*/ false, false, false);
  19.158    __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
  19.159  
  19.160    __ verify_oop(G5_method);
  19.161 @@ -2992,6 +3011,7 @@
  19.162  
  19.163  void TemplateTable::invokeinterface(int byte_no) {
  19.164    transition(vtos, vtos);
  19.165 +  assert(byte_no == f1_byte, "use this argument");
  19.166  
  19.167    Register Rscratch = G4_scratch;
  19.168    Register Rret = G3_scratch;
  19.169 @@ -3001,7 +3021,7 @@
  19.170    Register Rflags = O1;
  19.171    assert_different_registers(Rscratch, G5_method);
  19.172  
  19.173 -  load_invoke_cp_cache_entry(byte_no, Rinterface, Rindex, Rflags, false);
  19.174 +  load_invoke_cp_cache_entry(byte_no, Rinterface, Rindex, Rflags, /*virtual*/ false, false, false);
  19.175    __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
  19.176  
  19.177    // get receiver
  19.178 @@ -3118,6 +3138,7 @@
  19.179  
  19.180  void TemplateTable::invokedynamic(int byte_no) {
  19.181    transition(vtos, vtos);
  19.182 +  assert(byte_no == f1_oop, "use this argument");
  19.183  
  19.184    if (!EnableInvokeDynamic) {
  19.185      // We should not encounter this bytecode if !EnableInvokeDynamic.
  19.186 @@ -3132,7 +3153,6 @@
  19.187  
  19.188    // G5: CallSite object (f1)
  19.189    // XX: unused (f2)
  19.190 -  // G3: receiver address
  19.191    // XX: flags (unused)
  19.192  
  19.193    Register G5_callsite = G5_method;
  19.194 @@ -3140,7 +3160,8 @@
  19.195    Register Rtemp       = G1_scratch;
  19.196    Register Rret        = Lscratch;
  19.197  
  19.198 -  load_invoke_cp_cache_entry(byte_no, G5_callsite, noreg, Rret, false);
  19.199 +  load_invoke_cp_cache_entry(byte_no, G5_callsite, noreg, Rret,
  19.200 +                             /*virtual*/ false, /*vfinal*/ false, /*indy*/ true);
  19.201    __ mov(SP, O5_savedSP);  // record SP that we wanted the callee to restore
  19.202  
  19.203    __ verify_oop(G5_callsite);
    20.1 --- a/src/cpu/x86/vm/assembler_x86.hpp	Thu May 20 08:32:11 2010 -0700
    20.2 +++ b/src/cpu/x86/vm/assembler_x86.hpp	Mon May 24 14:15:14 2010 -0700
    20.3 @@ -135,6 +135,9 @@
    20.4  
    20.5  #endif // _LP64
    20.6  
    20.7 +// JSR 292 fixed register usages:
    20.8 +REGISTER_DECLARATION(Register, rbp_mh_SP_save, rbp);
    20.9 +
   20.10  // Address is an abstraction used to represent a memory location
   20.11  // using any of the amd64 addressing modes with one object.
   20.12  //
    21.1 --- a/src/cpu/x86/vm/c1_FrameMap_x86.cpp	Thu May 20 08:32:11 2010 -0700
    21.2 +++ b/src/cpu/x86/vm/c1_FrameMap_x86.cpp	Mon May 24 14:15:14 2010 -0700
    21.3 @@ -1,5 +1,5 @@
    21.4  /*
    21.5 - * Copyright 1999-2008 Sun Microsystems, Inc.  All Rights Reserved.
    21.6 + * Copyright 1999-2010 Sun Microsystems, Inc.  All Rights Reserved.
    21.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    21.8   *
    21.9   * This code is free software; you can redistribute it and/or modify it
   21.10 @@ -309,6 +309,13 @@
   21.11  }
   21.12  
   21.13  
   21.14 +// JSR 292
   21.15 +LIR_Opr FrameMap::method_handle_invoke_SP_save_opr() {
   21.16 +  assert(rbp == rbp_mh_SP_save, "must be same register");
   21.17 +  return rbp_opr;
   21.18 +}
   21.19 +
   21.20 +
   21.21  bool FrameMap::validate_frame() {
   21.22    return true;
   21.23  }
    22.1 --- a/src/cpu/x86/vm/c1_FrameMap_x86.hpp	Thu May 20 08:32:11 2010 -0700
    22.2 +++ b/src/cpu/x86/vm/c1_FrameMap_x86.hpp	Mon May 24 14:15:14 2010 -0700
    22.3 @@ -126,6 +126,3 @@
    22.4      assert(i >= 0 && i < nof_caller_save_xmm_regs, "out of bounds");
    22.5      return _caller_save_xmm_regs[i];
    22.6    }
    22.7 -
    22.8 -  // JSR 292
    22.9 -  static LIR_Opr& method_handle_invoke_SP_save_opr() { return rbp_opr; }
    23.1 --- a/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Thu May 20 08:32:11 2010 -0700
    23.2 +++ b/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Mon May 24 14:15:14 2010 -0700
    23.3 @@ -2784,7 +2784,7 @@
    23.4    assert(!os::is_MP() || (__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0,
    23.5           "must be aligned");
    23.6    __ call(AddressLiteral(op->addr(), rtype));
    23.7 -  add_call_info(code_offset(), op->info(), op->is_method_handle_invoke());
    23.8 +  add_call_info(code_offset(), op->info());
    23.9  }
   23.10  
   23.11  
   23.12 @@ -2795,7 +2795,7 @@
   23.13           (__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0,
   23.14           "must be aligned");
   23.15    __ call(AddressLiteral(op->addr(), rh));
   23.16 -  add_call_info(code_offset(), op->info(), op->is_method_handle_invoke());
   23.17 +  add_call_info(code_offset(), op->info());
   23.18  }
   23.19  
   23.20  
   23.21 @@ -2805,16 +2805,6 @@
   23.22  }
   23.23  
   23.24  
   23.25 -void LIR_Assembler::preserve_SP(LIR_OpJavaCall* op) {
   23.26 -  __ movptr(FrameMap::method_handle_invoke_SP_save_opr()->as_register(), rsp);
   23.27 -}
   23.28 -
   23.29 -
   23.30 -void LIR_Assembler::restore_SP(LIR_OpJavaCall* op) {
   23.31 -  __ movptr(rsp, FrameMap::method_handle_invoke_SP_save_opr()->as_register());
   23.32 -}
   23.33 -
   23.34 -
   23.35  void LIR_Assembler::emit_static_call_stub() {
   23.36    address call_pc = __ pc();
   23.37    address stub = __ start_a_stub(call_stub_size);
    24.1 --- a/src/cpu/x86/vm/c1_Runtime1_x86.cpp	Thu May 20 08:32:11 2010 -0700
    24.2 +++ b/src/cpu/x86/vm/c1_Runtime1_x86.cpp	Mon May 24 14:15:14 2010 -0700
    24.3 @@ -782,7 +782,7 @@
    24.4    // Restore SP from BP if the exception PC is a MethodHandle call site.
    24.5    NOT_LP64(__ get_thread(thread);)
    24.6    __ cmpl(Address(thread, JavaThread::is_method_handle_return_offset()), 0);
    24.7 -  __ cmovptr(Assembler::notEqual, rsp, rbp);
    24.8 +  __ cmovptr(Assembler::notEqual, rsp, rbp_mh_SP_save);
    24.9  
   24.10    // continue at exception handler (return address removed)
   24.11    // note: do *not* remove arguments when unwinding the
    25.1 --- a/src/cpu/x86/vm/interp_masm_x86_32.cpp	Thu May 20 08:32:11 2010 -0700
    25.2 +++ b/src/cpu/x86/vm/interp_masm_x86_32.cpp	Mon May 24 14:15:14 2010 -0700
    25.3 @@ -189,11 +189,11 @@
    25.4  }
    25.5  
    25.6  
    25.7 -void InterpreterMacroAssembler::get_cache_index_at_bcp(Register reg, int bcp_offset, bool giant_index) {
    25.8 +void InterpreterMacroAssembler::get_cache_index_at_bcp(Register reg, int bcp_offset, size_t index_size) {
    25.9    assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
   25.10 -  if (!giant_index) {
   25.11 +  if (index_size == sizeof(u2)) {
   25.12      load_unsigned_short(reg, Address(rsi, bcp_offset));
   25.13 -  } else {
   25.14 +  } else if (index_size == sizeof(u4)) {
   25.15      assert(EnableInvokeDynamic, "giant index used only for EnableInvokeDynamic");
   25.16      movl(reg, Address(rsi, bcp_offset));
   25.17      // Check if the secondary index definition is still ~x, otherwise
   25.18 @@ -201,14 +201,19 @@
   25.19      // plain index.
   25.20      assert(constantPoolCacheOopDesc::decode_secondary_index(~123) == 123, "else change next line");
   25.21      notl(reg);  // convert to plain index
   25.22 +  } else if (index_size == sizeof(u1)) {
   25.23 +    assert(EnableMethodHandles, "tiny index used only for EnableMethodHandles");
   25.24 +    load_unsigned_byte(reg, Address(rsi, bcp_offset));
   25.25 +  } else {
   25.26 +    ShouldNotReachHere();
   25.27    }
   25.28  }
   25.29  
   25.30  
   25.31  void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, Register index,
   25.32 -                                                           int bcp_offset, bool giant_index) {
   25.33 +                                                           int bcp_offset, size_t index_size) {
   25.34    assert(cache != index, "must use different registers");
   25.35 -  get_cache_index_at_bcp(index, bcp_offset, giant_index);
   25.36 +  get_cache_index_at_bcp(index, bcp_offset, index_size);
   25.37    movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize));
   25.38    assert(sizeof(ConstantPoolCacheEntry) == 4*wordSize, "adjust code below");
   25.39    shlptr(index, 2); // convert from field index to ConstantPoolCacheEntry index
   25.40 @@ -216,9 +221,9 @@
   25.41  
   25.42  
   25.43  void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, Register tmp,
   25.44 -                                                               int bcp_offset, bool giant_index) {
   25.45 +                                                               int bcp_offset, size_t index_size) {
   25.46    assert(cache != tmp, "must use different register");
   25.47 -  get_cache_index_at_bcp(tmp, bcp_offset, giant_index);
   25.48 +  get_cache_index_at_bcp(tmp, bcp_offset, index_size);
   25.49    assert(sizeof(ConstantPoolCacheEntry) == 4*wordSize, "adjust code below");
   25.50                                 // convert from field index to ConstantPoolCacheEntry index
   25.51                                 // and from word offset to byte offset
    26.1 --- a/src/cpu/x86/vm/interp_masm_x86_32.hpp	Thu May 20 08:32:11 2010 -0700
    26.2 +++ b/src/cpu/x86/vm/interp_masm_x86_32.hpp	Mon May 24 14:15:14 2010 -0700
    26.3 @@ -76,9 +76,9 @@
    26.4    void get_cpool_and_tags(Register cpool, Register tags)   { get_constant_pool(cpool); movptr(tags, Address(cpool, constantPoolOopDesc::tags_offset_in_bytes()));
    26.5    }
    26.6    void get_unsigned_2_byte_index_at_bcp(Register reg, int bcp_offset);
    26.7 -  void get_cache_and_index_at_bcp(Register cache, Register index, int bcp_offset, bool giant_index = false);
    26.8 -  void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset, bool giant_index = false);
    26.9 -  void get_cache_index_at_bcp(Register index, int bcp_offset, bool giant_index = false);
   26.10 +  void get_cache_and_index_at_bcp(Register cache, Register index, int bcp_offset, size_t index_size = sizeof(u2));
   26.11 +  void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset, size_t index_size = sizeof(u2));
   26.12 +  void get_cache_index_at_bcp(Register index, int bcp_offset, size_t index_size = sizeof(u2));
   26.13  
   26.14    // Expression stack
   26.15    void f2ieee();                                           // truncate ftos to 32bits
    27.1 --- a/src/cpu/x86/vm/interp_masm_x86_64.cpp	Thu May 20 08:32:11 2010 -0700
    27.2 +++ b/src/cpu/x86/vm/interp_masm_x86_64.cpp	Mon May 24 14:15:14 2010 -0700
    27.3 @@ -187,11 +187,11 @@
    27.4  
    27.5  void InterpreterMacroAssembler::get_cache_index_at_bcp(Register index,
    27.6                                                         int bcp_offset,
    27.7 -                                                       bool giant_index) {
    27.8 +                                                       size_t index_size) {
    27.9    assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
   27.10 -  if (!giant_index) {
   27.11 +  if (index_size == sizeof(u2)) {
   27.12      load_unsigned_short(index, Address(r13, bcp_offset));
   27.13 -  } else {
   27.14 +  } else if (index_size == sizeof(u4)) {
   27.15      assert(EnableInvokeDynamic, "giant index used only for EnableInvokeDynamic");
   27.16      movl(index, Address(r13, bcp_offset));
   27.17      // Check if the secondary index definition is still ~x, otherwise
   27.18 @@ -199,6 +199,11 @@
   27.19      // plain index.
   27.20      assert(constantPoolCacheOopDesc::decode_secondary_index(~123) == 123, "else change next line");
   27.21      notl(index);  // convert to plain index
   27.22 +  } else if (index_size == sizeof(u1)) {
   27.23 +    assert(EnableMethodHandles, "tiny index used only for EnableMethodHandles");
   27.24 +    load_unsigned_byte(index, Address(r13, bcp_offset));
   27.25 +  } else {
   27.26 +    ShouldNotReachHere();
   27.27    }
   27.28  }
   27.29  
   27.30 @@ -206,9 +211,9 @@
   27.31  void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache,
   27.32                                                             Register index,
   27.33                                                             int bcp_offset,
   27.34 -                                                           bool giant_index) {
   27.35 +                                                           size_t index_size) {
   27.36    assert(cache != index, "must use different registers");
   27.37 -  get_cache_index_at_bcp(index, bcp_offset, giant_index);
   27.38 +  get_cache_index_at_bcp(index, bcp_offset, index_size);
   27.39    movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize));
   27.40    assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below");
   27.41    // convert from field index to ConstantPoolCacheEntry index
   27.42 @@ -219,9 +224,9 @@
   27.43  void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache,
   27.44                                                                 Register tmp,
   27.45                                                                 int bcp_offset,
   27.46 -                                                               bool giant_index) {
   27.47 +                                                               size_t index_size) {
   27.48    assert(cache != tmp, "must use different register");
   27.49 -  get_cache_index_at_bcp(tmp, bcp_offset, giant_index);
   27.50 +  get_cache_index_at_bcp(tmp, bcp_offset, index_size);
   27.51    assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below");
   27.52    // convert from field index to ConstantPoolCacheEntry index
   27.53    // and from word offset to byte offset
    28.1 --- a/src/cpu/x86/vm/interp_masm_x86_64.hpp	Thu May 20 08:32:11 2010 -0700
    28.2 +++ b/src/cpu/x86/vm/interp_masm_x86_64.hpp	Mon May 24 14:15:14 2010 -0700
    28.3 @@ -95,10 +95,10 @@
    28.4  
    28.5    void get_unsigned_2_byte_index_at_bcp(Register reg, int bcp_offset);
    28.6    void get_cache_and_index_at_bcp(Register cache, Register index,
    28.7 -                                  int bcp_offset, bool giant_index = false);
    28.8 +                                  int bcp_offset, size_t index_size = sizeof(u2));
    28.9    void get_cache_entry_pointer_at_bcp(Register cache, Register tmp,
   28.10 -                                      int bcp_offset, bool giant_index = false);
   28.11 -  void get_cache_index_at_bcp(Register index, int bcp_offset, bool giant_index = false);
   28.12 +                                      int bcp_offset, size_t index_size = sizeof(u2));
   28.13 +  void get_cache_index_at_bcp(Register index, int bcp_offset, size_t index_size = sizeof(u2));
   28.14  
   28.15  
   28.16    void pop_ptr(Register r = rax);
    29.1 --- a/src/cpu/x86/vm/register_definitions_x86.cpp	Thu May 20 08:32:11 2010 -0700
    29.2 +++ b/src/cpu/x86/vm/register_definitions_x86.cpp	Mon May 24 14:15:14 2010 -0700
    29.3 @@ -1,5 +1,5 @@
    29.4  /*
    29.5 - * Copyright 2002-2008 Sun Microsystems, Inc.  All Rights Reserved.
    29.6 + * Copyright 2002-2010 Sun Microsystems, Inc.  All Rights Reserved.
    29.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    29.8   *
    29.9   * This code is free software; you can redistribute it and/or modify it
   29.10 @@ -115,3 +115,6 @@
   29.11  REGISTER_DEFINITION(MMXRegister, mmx5 );
   29.12  REGISTER_DEFINITION(MMXRegister, mmx6 );
   29.13  REGISTER_DEFINITION(MMXRegister, mmx7 );
   29.14 +
   29.15 +// JSR 292
   29.16 +REGISTER_DEFINITION(Register, rbp_mh_SP_save);
    30.1 --- a/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Thu May 20 08:32:11 2010 -0700
    30.2 +++ b/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Mon May 24 14:15:14 2010 -0700
    30.3 @@ -214,7 +214,7 @@
    30.4      __ cmpb(Address(rsi, 0), Bytecodes::_invokedynamic);
    30.5      __ jcc(Assembler::equal, L_giant_index);
    30.6    }
    30.7 -  __ get_cache_and_index_at_bcp(rbx, rcx, 1, false);
    30.8 +  __ get_cache_and_index_at_bcp(rbx, rcx, 1, sizeof(u2));
    30.9    __ bind(L_got_cache);
   30.10    __ movl(rbx, Address(rbx, rcx,
   30.11                      Address::times_ptr, constantPoolCacheOopDesc::base_offset() +
   30.12 @@ -226,7 +226,7 @@
   30.13    // out of the main line of code...
   30.14    if (EnableInvokeDynamic) {
   30.15      __ bind(L_giant_index);
   30.16 -    __ get_cache_and_index_at_bcp(rbx, rcx, 1, true);
   30.17 +    __ get_cache_and_index_at_bcp(rbx, rcx, 1, sizeof(u4));
   30.18      __ jmp(L_got_cache);
   30.19    }
   30.20  
    31.1 --- a/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Thu May 20 08:32:11 2010 -0700
    31.2 +++ b/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Mon May 24 14:15:14 2010 -0700
    31.3 @@ -192,7 +192,7 @@
    31.4      __ cmpb(Address(r13, 0), Bytecodes::_invokedynamic);
    31.5      __ jcc(Assembler::equal, L_giant_index);
    31.6    }
    31.7 -  __ get_cache_and_index_at_bcp(rbx, rcx, 1, false);
    31.8 +  __ get_cache_and_index_at_bcp(rbx, rcx, 1, sizeof(u2));
    31.9    __ bind(L_got_cache);
   31.10    __ movl(rbx, Address(rbx, rcx,
   31.11                         Address::times_ptr,
   31.12 @@ -205,7 +205,7 @@
   31.13    // out of the main line of code...
   31.14    if (EnableInvokeDynamic) {
   31.15      __ bind(L_giant_index);
   31.16 -    __ get_cache_and_index_at_bcp(rbx, rcx, 1, true);
   31.17 +    __ get_cache_and_index_at_bcp(rbx, rcx, 1, sizeof(u4));
   31.18      __ jmp(L_got_cache);
   31.19    }
   31.20  
    32.1 --- a/src/cpu/x86/vm/templateTable_x86_32.cpp	Thu May 20 08:32:11 2010 -0700
    32.2 +++ b/src/cpu/x86/vm/templateTable_x86_32.cpp	Mon May 24 14:15:14 2010 -0700
    32.3 @@ -2012,22 +2012,29 @@
    32.4    __ membar(order_constraint);
    32.5  }
    32.6  
    32.7 -void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Register index) {
    32.8 -  assert(byte_no == 1 || byte_no == 2, "byte_no out of range");
    32.9 -  bool is_invokedynamic = (bytecode() == Bytecodes::_invokedynamic);
   32.10 -
   32.11 +void TemplateTable::resolve_cache_and_index(int byte_no,
   32.12 +                                            Register result,
   32.13 +                                            Register Rcache,
   32.14 +                                            Register index,
   32.15 +                                            size_t index_size) {
   32.16    Register temp = rbx;
   32.17  
   32.18 -  assert_different_registers(Rcache, index, temp);
   32.19 -
   32.20 -  const int shift_count = (1 + byte_no)*BitsPerByte;
   32.21 +  assert_different_registers(result, Rcache, index, temp);
   32.22 +
   32.23    Label resolved;
   32.24 -  __ get_cache_and_index_at_bcp(Rcache, index, 1, is_invokedynamic);
   32.25 -  if (is_invokedynamic) {
   32.26 -    // we are resolved if the f1 field contains a non-null CallSite object
   32.27 -    __ cmpptr(Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()), (int32_t) NULL_WORD);
   32.28 +  __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
   32.29 +  if (byte_no == f1_oop) {
   32.30 +    // We are resolved if the f1 field contains a non-null object (CallSite, etc.)
   32.31 +    // This kind of CP cache entry does not need to match the flags byte, because
   32.32 +    // there is a 1-1 relation between bytecode type and CP entry type.
   32.33 +    assert(result != noreg, ""); //else do cmpptr(Address(...), (int32_t) NULL_WORD)
   32.34 +    __ movptr(result, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()));
   32.35 +    __ testptr(result, result);
   32.36      __ jcc(Assembler::notEqual, resolved);
   32.37    } else {
   32.38 +    assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
   32.39 +    assert(result == noreg, "");  //else change code for setting result
   32.40 +    const int shift_count = (1 + byte_no)*BitsPerByte;
   32.41      __ movl(temp, Address(Rcache, index, Address::times_4, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset()));
   32.42      __ shrl(temp, shift_count);
   32.43      // have we resolved this bytecode?
   32.44 @@ -2053,7 +2060,9 @@
   32.45    __ movl(temp, (int)bytecode());
   32.46    __ call_VM(noreg, entry, temp);
   32.47    // Update registers with resolved info
   32.48 -  __ get_cache_and_index_at_bcp(Rcache, index, 1, is_invokedynamic);
   32.49 +  __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
   32.50 +  if (result != noreg)
   32.51 +    __ movptr(result, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()));
   32.52    __ bind(resolved);
   32.53  }
   32.54  
   32.55 @@ -2087,7 +2096,8 @@
   32.56                                                 Register itable_index,
   32.57                                                 Register flags,
   32.58                                                 bool is_invokevirtual,
   32.59 -                                               bool is_invokevfinal /*unused*/) {
   32.60 +                                               bool is_invokevfinal /*unused*/,
   32.61 +                                               bool is_invokedynamic) {
   32.62    // setup registers
   32.63    const Register cache = rcx;
   32.64    const Register index = rdx;
   32.65 @@ -2109,13 +2119,18 @@
   32.66    const int index_offset = in_bytes(constantPoolCacheOopDesc::base_offset() +
   32.67                                      ConstantPoolCacheEntry::f2_offset());
   32.68  
   32.69 -  resolve_cache_and_index(byte_no, cache, index);
   32.70 -
   32.71 -  __ movptr(method, Address(cache, index, Address::times_ptr, method_offset));
   32.72 +  if (byte_no == f1_oop) {
   32.73 +    // Resolved f1_oop goes directly into 'method' register.
   32.74 +    assert(is_invokedynamic, "");
   32.75 +    resolve_cache_and_index(byte_no, method, cache, index, sizeof(u4));
   32.76 +  } else {
   32.77 +    resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
   32.78 +    __ movptr(method, Address(cache, index, Address::times_ptr, method_offset));
   32.79 +  }
   32.80    if (itable_index != noreg) {
   32.81      __ movptr(itable_index, Address(cache, index, Address::times_ptr, index_offset));
   32.82    }
   32.83 -  __ movl(flags , Address(cache, index, Address::times_ptr, flags_offset ));
   32.84 +  __ movl(flags, Address(cache, index, Address::times_ptr, flags_offset));
   32.85  }
   32.86  
   32.87  
   32.88 @@ -2169,7 +2184,7 @@
   32.89    const Register off   = rbx;
   32.90    const Register flags = rax;
   32.91  
   32.92 -  resolve_cache_and_index(byte_no, cache, index);
   32.93 +  resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
   32.94    jvmti_post_field_access(cache, index, is_static, false);
   32.95    load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
   32.96  
   32.97 @@ -2378,7 +2393,7 @@
   32.98    const Register off   = rbx;
   32.99    const Register flags = rax;
  32.100  
  32.101 -  resolve_cache_and_index(byte_no, cache, index);
  32.102 +  resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
  32.103    jvmti_post_field_mod(cache, index, is_static);
  32.104    load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
  32.105  
  32.106 @@ -2815,10 +2830,11 @@
  32.107    // save 'interpreter return address'
  32.108    __ save_bcp();
  32.109  
  32.110 -  load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual);
  32.111 +  load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
  32.112  
  32.113    // load receiver if needed (note: no return address pushed yet)
  32.114    if (load_receiver) {
  32.115 +    assert(!is_invokedynamic, "");
  32.116      __ movl(recv, flags);
  32.117      __ andl(recv, 0xFF);
  32.118      // recv count is 0 based?
  32.119 @@ -2910,6 +2926,7 @@
  32.120  
  32.121  void TemplateTable::invokevirtual(int byte_no) {
  32.122    transition(vtos, vtos);
  32.123 +  assert(byte_no == f2_byte, "use this argument");
  32.124    prepare_invoke(rbx, noreg, byte_no);
  32.125  
  32.126    // rbx,: index
  32.127 @@ -2922,6 +2939,7 @@
  32.128  
  32.129  void TemplateTable::invokespecial(int byte_no) {
  32.130    transition(vtos, vtos);
  32.131 +  assert(byte_no == f1_byte, "use this argument");
  32.132    prepare_invoke(rbx, noreg, byte_no);
  32.133    // do the call
  32.134    __ verify_oop(rbx);
  32.135 @@ -2932,6 +2950,7 @@
  32.136  
  32.137  void TemplateTable::invokestatic(int byte_no) {
  32.138    transition(vtos, vtos);
  32.139 +  assert(byte_no == f1_byte, "use this argument");
  32.140    prepare_invoke(rbx, noreg, byte_no);
  32.141    // do the call
  32.142    __ verify_oop(rbx);
  32.143 @@ -2942,12 +2961,14 @@
  32.144  
  32.145  void TemplateTable::fast_invokevfinal(int byte_no) {
  32.146    transition(vtos, vtos);
  32.147 +  assert(byte_no == f2_byte, "use this argument");
  32.148    __ stop("fast_invokevfinal not used on x86");
  32.149  }
  32.150  
  32.151  
  32.152  void TemplateTable::invokeinterface(int byte_no) {
  32.153    transition(vtos, vtos);
  32.154 +  assert(byte_no == f1_byte, "use this argument");
  32.155    prepare_invoke(rax, rbx, byte_no);
  32.156  
  32.157    // rax,: Interface
  32.158 @@ -3036,11 +3057,11 @@
  32.159      return;
  32.160    }
  32.161  
  32.162 +  assert(byte_no == f1_oop, "use this argument");
  32.163    prepare_invoke(rax, rbx, byte_no);
  32.164  
  32.165    // rax: CallSite object (f1)
  32.166    // rbx: unused (f2)
  32.167 -  // rcx: receiver address
  32.168    // rdx: flags (unused)
  32.169  
  32.170    if (ProfileInterpreter) {
    33.1 --- a/src/cpu/x86/vm/templateTable_x86_64.cpp	Thu May 20 08:32:11 2010 -0700
    33.2 +++ b/src/cpu/x86/vm/templateTable_x86_64.cpp	Mon May 24 14:15:14 2010 -0700
    33.3 @@ -2015,21 +2015,28 @@
    33.4    }
    33.5  }
    33.6  
    33.7 -void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Register index) {
    33.8 -  assert(byte_no == 1 || byte_no == 2, "byte_no out of range");
    33.9 -  bool is_invokedynamic = (bytecode() == Bytecodes::_invokedynamic);
   33.10 -
   33.11 +void TemplateTable::resolve_cache_and_index(int byte_no,
   33.12 +                                            Register result,
   33.13 +                                            Register Rcache,
   33.14 +                                            Register index,
   33.15 +                                            size_t index_size) {
   33.16    const Register temp = rbx;
   33.17 -  assert_different_registers(Rcache, index, temp);
   33.18 -
   33.19 -  const int shift_count = (1 + byte_no) * BitsPerByte;
   33.20 +  assert_different_registers(result, Rcache, index, temp);
   33.21 +
   33.22    Label resolved;
   33.23 -  __ get_cache_and_index_at_bcp(Rcache, index, 1, is_invokedynamic);
   33.24 -  if (is_invokedynamic) {
   33.25 -    // we are resolved if the f1 field contains a non-null CallSite object
   33.26 -    __ cmpptr(Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()), (int32_t) NULL_WORD);
   33.27 +  __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
   33.28 +  if (byte_no == f1_oop) {
   33.29 +    // We are resolved if the f1 field contains a non-null object (CallSite, etc.)
   33.30 +    // This kind of CP cache entry does not need to match the flags byte, because
   33.31 +    // there is a 1-1 relation between bytecode type and CP entry type.
   33.32 +    assert(result != noreg, ""); //else do cmpptr(Address(...), (int32_t) NULL_WORD)
   33.33 +    __ movptr(result, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()));
   33.34 +    __ testptr(result, result);
   33.35      __ jcc(Assembler::notEqual, resolved);
   33.36    } else {
   33.37 +    assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
   33.38 +    assert(result == noreg, "");  //else change code for setting result
   33.39 +    const int shift_count = (1 + byte_no) * BitsPerByte;
   33.40      __ movl(temp, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset()));
   33.41      __ shrl(temp, shift_count);
   33.42      // have we resolved this bytecode?
   33.43 @@ -2064,7 +2071,9 @@
   33.44    __ call_VM(noreg, entry, temp);
   33.45  
   33.46    // Update registers with resolved info
   33.47 -  __ get_cache_and_index_at_bcp(Rcache, index, 1, is_invokedynamic);
   33.48 +  __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
   33.49 +  if (result != noreg)
   33.50 +    __ movptr(result, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()));
   33.51    __ bind(resolved);
   33.52  }
   33.53  
   33.54 @@ -2100,7 +2109,8 @@
   33.55                                                 Register itable_index,
   33.56                                                 Register flags,
   33.57                                                 bool is_invokevirtual,
   33.58 -                                               bool is_invokevfinal /*unused*/) {
   33.59 +                                               bool is_invokevfinal, /*unused*/
   33.60 +                                               bool is_invokedynamic) {
   33.61    // setup registers
   33.62    const Register cache = rcx;
   33.63    const Register index = rdx;
   33.64 @@ -2120,15 +2130,18 @@
   33.65    const int index_offset = in_bytes(constantPoolCacheOopDesc::base_offset() +
   33.66                                      ConstantPoolCacheEntry::f2_offset());
   33.67  
   33.68 -  resolve_cache_and_index(byte_no, cache, index);
   33.69 -
   33.70 -  assert(wordSize == 8, "adjust code below");
   33.71 -  __ movptr(method, Address(cache, index, Address::times_8, method_offset));
   33.72 +  if (byte_no == f1_oop) {
   33.73 +    // Resolved f1_oop goes directly into 'method' register.
   33.74 +    assert(is_invokedynamic, "");
   33.75 +    resolve_cache_and_index(byte_no, method, cache, index, sizeof(u4));
   33.76 +  } else {
   33.77 +    resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
   33.78 +    __ movptr(method, Address(cache, index, Address::times_ptr, method_offset));
   33.79 +  }
   33.80    if (itable_index != noreg) {
   33.81 -    __ movptr(itable_index,
   33.82 -            Address(cache, index, Address::times_8, index_offset));
   33.83 +    __ movptr(itable_index, Address(cache, index, Address::times_ptr, index_offset));
   33.84    }
   33.85 -  __ movl(flags , Address(cache, index, Address::times_8, flags_offset));
   33.86 +  __ movl(flags, Address(cache, index, Address::times_ptr, flags_offset));
   33.87  }
   33.88  
   33.89  
   33.90 @@ -2187,7 +2200,7 @@
   33.91    const Register flags = rax;
   33.92    const Register bc = c_rarg3; // uses same reg as obj, so don't mix them
   33.93  
   33.94 -  resolve_cache_and_index(byte_no, cache, index);
   33.95 +  resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
   33.96    jvmti_post_field_access(cache, index, is_static, false);
   33.97    load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
   33.98  
   33.99 @@ -2390,7 +2403,7 @@
  33.100    const Register flags = rax;
  33.101    const Register bc    = c_rarg3;
  33.102  
  33.103 -  resolve_cache_and_index(byte_no, cache, index);
  33.104 +  resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
  33.105    jvmti_post_field_mod(cache, index, is_static);
  33.106    load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
  33.107  
  33.108 @@ -2815,10 +2828,11 @@
  33.109    // save 'interpreter return address'
  33.110    __ save_bcp();
  33.111  
  33.112 -  load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual);
  33.113 +  load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
  33.114  
  33.115    // load receiver if needed (note: no return address pushed yet)
  33.116    if (load_receiver) {
  33.117 +    assert(!is_invokedynamic, "");
  33.118      __ movl(recv, flags);
  33.119      __ andl(recv, 0xFF);
  33.120      Address recv_addr(rsp, recv, Address::times_8, -Interpreter::expr_offset_in_bytes(1));
  33.121 @@ -2914,6 +2928,7 @@
  33.122  
  33.123  void TemplateTable::invokevirtual(int byte_no) {
  33.124    transition(vtos, vtos);
  33.125 +  assert(byte_no == f2_byte, "use this argument");
  33.126    prepare_invoke(rbx, noreg, byte_no);
  33.127  
  33.128    // rbx: index
  33.129 @@ -2926,6 +2941,7 @@
  33.130  
  33.131  void TemplateTable::invokespecial(int byte_no) {
  33.132    transition(vtos, vtos);
  33.133 +  assert(byte_no == f1_byte, "use this argument");
  33.134    prepare_invoke(rbx, noreg, byte_no);
  33.135    // do the call
  33.136    __ verify_oop(rbx);
  33.137 @@ -2936,6 +2952,7 @@
  33.138  
  33.139  void TemplateTable::invokestatic(int byte_no) {
  33.140    transition(vtos, vtos);
  33.141 +  assert(byte_no == f1_byte, "use this argument");
  33.142    prepare_invoke(rbx, noreg, byte_no);
  33.143    // do the call
  33.144    __ verify_oop(rbx);
  33.145 @@ -2945,11 +2962,13 @@
  33.146  
  33.147  void TemplateTable::fast_invokevfinal(int byte_no) {
  33.148    transition(vtos, vtos);
  33.149 +  assert(byte_no == f2_byte, "use this argument");
  33.150    __ stop("fast_invokevfinal not used on amd64");
  33.151  }
  33.152  
  33.153  void TemplateTable::invokeinterface(int byte_no) {
  33.154    transition(vtos, vtos);
  33.155 +  assert(byte_no == f1_byte, "use this argument");
  33.156    prepare_invoke(rax, rbx, byte_no);
  33.157  
  33.158    // rax: Interface
  33.159 @@ -3027,6 +3046,7 @@
  33.160  
  33.161  void TemplateTable::invokedynamic(int byte_no) {
  33.162    transition(vtos, vtos);
  33.163 +  assert(byte_no == f1_oop, "use this argument");
  33.164  
  33.165    if (!EnableInvokeDynamic) {
  33.166      // We should not encounter this bytecode if !EnableInvokeDynamic.
  33.167 @@ -3039,6 +3059,7 @@
  33.168      return;
  33.169    }
  33.170  
  33.171 +  assert(byte_no == f1_oop, "use this argument");
  33.172    prepare_invoke(rax, rbx, byte_no);
  33.173  
  33.174    // rax: CallSite object (f1)
    34.1 --- a/src/os/solaris/dtrace/generateJvmOffsets.cpp	Thu May 20 08:32:11 2010 -0700
    34.2 +++ b/src/os/solaris/dtrace/generateJvmOffsets.cpp	Mon May 24 14:15:14 2010 -0700
    34.3 @@ -1,5 +1,5 @@
    34.4  /*
    34.5 - * Copyright 2003-2009 Sun Microsystems, Inc.  All Rights Reserved.
    34.6 + * Copyright 2003-2010 Sun Microsystems, Inc.  All Rights Reserved.
    34.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    34.8   *
    34.9   * This code is free software; you can redistribute it and/or modify it
   34.10 @@ -232,12 +232,11 @@
   34.11    GEN_OFFS(CodeBlob, _header_size);
   34.12    GEN_OFFS(CodeBlob, _instructions_offset);
   34.13    GEN_OFFS(CodeBlob, _data_offset);
   34.14 -  GEN_OFFS(CodeBlob, _oops_offset);
   34.15 -  GEN_OFFS(CodeBlob, _oops_length);
   34.16    GEN_OFFS(CodeBlob, _frame_size);
   34.17    printf("\n");
   34.18  
   34.19    GEN_OFFS(nmethod, _method);
   34.20 +  GEN_OFFS(nmethod, _oops_offset);
   34.21    GEN_OFFS(nmethod, _scopes_data_offset);
   34.22    GEN_OFFS(nmethod, _scopes_pcs_offset);
   34.23    GEN_OFFS(nmethod, _handler_table_offset);
    35.1 --- a/src/os/solaris/dtrace/libjvm_db.c	Thu May 20 08:32:11 2010 -0700
    35.2 +++ b/src/os/solaris/dtrace/libjvm_db.c	Mon May 24 14:15:14 2010 -0700
    35.3 @@ -1,5 +1,5 @@
    35.4  /*
    35.5 - * Copyright 2003-2009 Sun Microsystems, Inc.  All Rights Reserved.
    35.6 + * Copyright 2003-2010 Sun Microsystems, Inc.  All Rights Reserved.
    35.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    35.8   *
    35.9   * This code is free software; you can redistribute it and/or modify it
   35.10 @@ -130,7 +130,7 @@
   35.11    int32_t  scopes_data_beg;     /* _scopes_data_offset */
   35.12    int32_t  scopes_data_end;
   35.13    int32_t  oops_beg;            /* _oops_offset */
   35.14 -  int32_t  oops_len;            /* _oops_length */
   35.15 +  int32_t  oops_end;
   35.16    int32_t  scopes_pcs_beg;      /* _scopes_pcs_offset */
   35.17    int32_t  scopes_pcs_end;
   35.18  
   35.19 @@ -597,9 +597,9 @@
   35.20    CHECK_FAIL(err);
   35.21  
   35.22    /* Oops */
   35.23 -  err = ps_pread(J->P, nm + OFFSET_CodeBlob_oops_offset, &N->oops_beg, SZ32);
   35.24 +  err = ps_pread(J->P, nm + OFFSET_nmethod_oops_offset, &N->oops_beg, SZ32);
   35.25    CHECK_FAIL(err);
   35.26 -  err = ps_pread(J->P, nm + OFFSET_CodeBlob_oops_length, &N->oops_len, SZ32);
   35.27 +  err = ps_pread(J->P, nm + OFFSET_nmethod_scopes_data_offset, &N->oops_end, SZ32);
   35.28    CHECK_FAIL(err);
   35.29  
   35.30    /* scopes_pcs */
   35.31 @@ -624,8 +624,8 @@
   35.32        fprintf(stderr, "\t nmethod_info: orig_pc_offset: %#x \n",
   35.33                         N->orig_pc_offset);
   35.34  
   35.35 -      fprintf(stderr, "\t nmethod_info: oops_beg: %#x, oops_len: %#x\n",
   35.36 -                       N->oops_beg, N->oops_len);
   35.37 +      fprintf(stderr, "\t nmethod_info: oops_beg: %#x, oops_end: %#x\n",
   35.38 +                       N->oops_beg, N->oops_end);
   35.39  
   35.40        fprintf(stderr, "\t nmethod_info: scopes_data_beg: %#x, scopes_data_end: %#x\n",
   35.41                         N->scopes_data_beg, N->scopes_data_end);
   35.42 @@ -959,8 +959,8 @@
   35.43      err = scope_desc_at(N, decode_offset, vf);
   35.44      CHECK_FAIL(err);
   35.45  
   35.46 -    if (vf->methodIdx > N->oops_len) {
   35.47 -      fprintf(stderr, "\t scopeDesc_chain: (methodIdx > oops_len) !\n");
   35.48 +    if (vf->methodIdx > ((N->oops_end - N->oops_beg) / POINTER_SIZE)) {
   35.49 +      fprintf(stderr, "\t scopeDesc_chain: (methodIdx > oops length) !\n");
   35.50        return -1;
   35.51      }
   35.52      err = read_pointer(N->J, N->nm + N->oops_beg + (vf->methodIdx-1)*POINTER_SIZE,
    36.1 --- a/src/share/vm/asm/codeBuffer.hpp	Thu May 20 08:32:11 2010 -0700
    36.2 +++ b/src/share/vm/asm/codeBuffer.hpp	Mon May 24 14:15:14 2010 -0700
    36.3 @@ -510,9 +510,9 @@
    36.4      copy_relocations_to(blob);
    36.5      copy_code_to(blob);
    36.6    }
    36.7 -  void copy_oops_to(CodeBlob* blob) {
    36.8 +  void copy_oops_to(nmethod* nm) {
    36.9      if (!oop_recorder()->is_unused()) {
   36.10 -      oop_recorder()->copy_to(blob);
   36.11 +      oop_recorder()->copy_to(nm);
   36.12      }
   36.13    }
   36.14  
    37.1 --- a/src/share/vm/c1/c1_FrameMap.hpp	Thu May 20 08:32:11 2010 -0700
    37.2 +++ b/src/share/vm/c1/c1_FrameMap.hpp	Mon May 24 14:15:14 2010 -0700
    37.3 @@ -1,5 +1,5 @@
    37.4  /*
    37.5 - * Copyright 2000-2006 Sun Microsystems, Inc.  All Rights Reserved.
    37.6 + * Copyright 2000-2010 Sun Microsystems, Inc.  All Rights Reserved.
    37.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    37.8   *
    37.9   * This code is free software; you can redistribute it and/or modify it
   37.10 @@ -150,6 +150,9 @@
   37.11    // Opr representing the stack_pointer on this platform
   37.12    static LIR_Opr stack_pointer();
   37.13  
   37.14 +  // JSR 292
   37.15 +  static LIR_Opr method_handle_invoke_SP_save_opr();
   37.16 +
   37.17    static BasicTypeArray*     signature_type_array_for(const ciMethod* method);
   37.18    static BasicTypeArray*     signature_type_array_for(const char * signature);
   37.19  
    38.1 --- a/src/share/vm/c1/c1_GraphBuilder.cpp	Thu May 20 08:32:11 2010 -0700
    38.2 +++ b/src/share/vm/c1/c1_GraphBuilder.cpp	Mon May 24 14:15:14 2010 -0700
    38.3 @@ -2438,13 +2438,13 @@
    38.4        case Bytecodes::_invokestatic   : // fall through
    38.5        case Bytecodes::_invokedynamic  : // fall through
    38.6        case Bytecodes::_invokeinterface: invoke(code); break;
    38.7 -      case Bytecodes::_new            : new_instance(s.get_index_big()); break;
    38.8 +      case Bytecodes::_new            : new_instance(s.get_index_u2()); break;
    38.9        case Bytecodes::_newarray       : new_type_array(); break;
   38.10        case Bytecodes::_anewarray      : new_object_array(); break;
   38.11        case Bytecodes::_arraylength    : ipush(append(new ArrayLength(apop(), lock_stack()))); break;
   38.12        case Bytecodes::_athrow         : throw_op(s.cur_bci()); break;
   38.13 -      case Bytecodes::_checkcast      : check_cast(s.get_index_big()); break;
   38.14 -      case Bytecodes::_instanceof     : instance_of(s.get_index_big()); break;
   38.15 +      case Bytecodes::_checkcast      : check_cast(s.get_index_u2()); break;
   38.16 +      case Bytecodes::_instanceof     : instance_of(s.get_index_u2()); break;
   38.17        // Note: we do not have special handling for the monitorenter bytecode if DeoptC1 && DeoptOnAsyncException
   38.18        case Bytecodes::_monitorenter   : monitorenter(apop(), s.cur_bci()); break;
   38.19        case Bytecodes::_monitorexit    : monitorexit (apop(), s.cur_bci()); break;
    39.1 --- a/src/share/vm/c1/c1_IR.cpp	Thu May 20 08:32:11 2010 -0700
    39.2 +++ b/src/share/vm/c1/c1_IR.cpp	Mon May 24 14:15:14 2010 -0700
    39.3 @@ -230,7 +230,8 @@
    39.4    , _stack(stack)
    39.5    , _exception_handlers(exception_handlers)
    39.6    , _next(NULL)
    39.7 -  , _id(-1) {
    39.8 +  , _id(-1)
    39.9 +  , _is_method_handle_invoke(false) {
   39.10    assert(_stack != NULL, "must be non null");
   39.11    assert(_bci == SynchronizationEntryBCI || Bytecodes::is_defined(scope()->method()->java_code_at_bci(_bci)), "make sure bci points at a real bytecode");
   39.12  }
   39.13 @@ -241,7 +242,8 @@
   39.14    , _exception_handlers(NULL)
   39.15    , _bci(info->_bci)
   39.16    , _scope_debug_info(NULL)
   39.17 -  , _oop_map(NULL) {
   39.18 +  , _oop_map(NULL)
   39.19 +  , _is_method_handle_invoke(info->_is_method_handle_invoke) {
   39.20    if (lock_stack_only) {
   39.21      if (info->_stack != NULL) {
   39.22        _stack = info->_stack->copy_locks();
   39.23 @@ -259,10 +261,10 @@
   39.24  }
   39.25  
   39.26  
   39.27 -void CodeEmitInfo::record_debug_info(DebugInformationRecorder* recorder, int pc_offset, bool is_method_handle_invoke) {
   39.28 +void CodeEmitInfo::record_debug_info(DebugInformationRecorder* recorder, int pc_offset) {
   39.29    // record the safepoint before recording the debug info for enclosing scopes
   39.30    recorder->add_safepoint(pc_offset, _oop_map->deep_copy());
   39.31 -  _scope_debug_info->record_debug_info(recorder, pc_offset, true/*topmost*/, is_method_handle_invoke);
   39.32 +  _scope_debug_info->record_debug_info(recorder, pc_offset, true/*topmost*/, _is_method_handle_invoke);
   39.33    recorder->end_safepoint(pc_offset);
   39.34  }
   39.35  
    40.1 --- a/src/share/vm/c1/c1_IR.hpp	Thu May 20 08:32:11 2010 -0700
    40.2 +++ b/src/share/vm/c1/c1_IR.hpp	Mon May 24 14:15:14 2010 -0700
    40.3 @@ -269,6 +269,7 @@
    40.4    int               _bci;
    40.5    CodeEmitInfo*     _next;
    40.6    int               _id;
    40.7 +  bool              _is_method_handle_invoke;    // true if the associated call site is a MethodHandle call site.
    40.8  
    40.9    FrameMap*     frame_map() const                { return scope()->compilation()->frame_map(); }
   40.10    Compilation*  compilation() const              { return scope()->compilation(); }
   40.11 @@ -287,7 +288,8 @@
   40.12      , _stack(NULL)
   40.13      , _exception_handlers(NULL)
   40.14      , _next(NULL)
   40.15 -    , _id(-1) {
   40.16 +    , _id(-1)
   40.17 +    , _is_method_handle_invoke(false) {
   40.18    }
   40.19  
   40.20    // make a copy
   40.21 @@ -302,13 +304,16 @@
   40.22    int bci() const                                { return _bci; }
   40.23  
   40.24    void add_register_oop(LIR_Opr opr);
   40.25 -  void record_debug_info(DebugInformationRecorder* recorder, int pc_offset, bool is_method_handle_invoke = false);
   40.26 +  void record_debug_info(DebugInformationRecorder* recorder, int pc_offset);
   40.27  
   40.28    CodeEmitInfo* next() const        { return _next; }
   40.29    void set_next(CodeEmitInfo* next) { _next = next; }
   40.30  
   40.31    int id() const      { return _id; }
   40.32    void set_id(int id) { _id = id; }
   40.33 +
   40.34 +  bool     is_method_handle_invoke() const { return _is_method_handle_invoke;     }
   40.35 +  void set_is_method_handle_invoke(bool x) {        _is_method_handle_invoke = x; }
   40.36  };
   40.37  
   40.38  
    41.1 --- a/src/share/vm/c1/c1_LIR.cpp	Thu May 20 08:32:11 2010 -0700
    41.2 +++ b/src/share/vm/c1/c1_LIR.cpp	Mon May 24 14:15:14 2010 -0700
    41.3 @@ -715,7 +715,10 @@
    41.4        }
    41.5  
    41.6        if (opJavaCall->_info)                     do_info(opJavaCall->_info);
    41.7 -      if (opJavaCall->is_method_handle_invoke()) do_temp(FrameMap::method_handle_invoke_SP_save_opr());
    41.8 +      if (opJavaCall->is_method_handle_invoke()) {
    41.9 +        opJavaCall->_method_handle_invoke_SP_save_opr = FrameMap::method_handle_invoke_SP_save_opr();
   41.10 +        do_temp(opJavaCall->_method_handle_invoke_SP_save_opr);
   41.11 +      }
   41.12        do_call();
   41.13        if (opJavaCall->_result->is_valid())       do_output(opJavaCall->_result);
   41.14  
    42.1 --- a/src/share/vm/c1/c1_LIR.hpp	Thu May 20 08:32:11 2010 -0700
    42.2 +++ b/src/share/vm/c1/c1_LIR.hpp	Mon May 24 14:15:14 2010 -0700
    42.3 @@ -1033,8 +1033,9 @@
    42.4   friend class LIR_OpVisitState;
    42.5  
    42.6   private:
    42.7 -  ciMethod*       _method;
    42.8 -  LIR_Opr         _receiver;
    42.9 +  ciMethod* _method;
   42.10 +  LIR_Opr   _receiver;
   42.11 +  LIR_Opr   _method_handle_invoke_SP_save_opr;  // Used in LIR_OpVisitState::visit to store the reference to FrameMap::method_handle_invoke_SP_save_opr.
   42.12  
   42.13   public:
   42.14    LIR_OpJavaCall(LIR_Code code, ciMethod* method,
   42.15 @@ -1043,14 +1044,18 @@
   42.16                   CodeEmitInfo* info)
   42.17    : LIR_OpCall(code, addr, result, arguments, info)
   42.18    , _receiver(receiver)
   42.19 -  , _method(method)          { assert(is_in_range(code, begin_opJavaCall, end_opJavaCall), "code check"); }
   42.20 +  , _method(method)
   42.21 +  , _method_handle_invoke_SP_save_opr(LIR_OprFact::illegalOpr)
   42.22 +  { assert(is_in_range(code, begin_opJavaCall, end_opJavaCall), "code check"); }
   42.23  
   42.24    LIR_OpJavaCall(LIR_Code code, ciMethod* method,
   42.25                   LIR_Opr receiver, LIR_Opr result, intptr_t vtable_offset,
   42.26                   LIR_OprList* arguments, CodeEmitInfo* info)
   42.27    : LIR_OpCall(code, (address)vtable_offset, result, arguments, info)
   42.28    , _receiver(receiver)
   42.29 -  , _method(method)          { assert(is_in_range(code, begin_opJavaCall, end_opJavaCall), "code check"); }
   42.30 +  , _method(method)
   42.31 +  , _method_handle_invoke_SP_save_opr(LIR_OprFact::illegalOpr)
   42.32 +  { assert(is_in_range(code, begin_opJavaCall, end_opJavaCall), "code check"); }
   42.33  
   42.34    LIR_Opr receiver() const                       { return _receiver; }
   42.35    ciMethod* method() const                       { return _method;   }
    43.1 --- a/src/share/vm/c1/c1_LIRAssembler.cpp	Thu May 20 08:32:11 2010 -0700
    43.2 +++ b/src/share/vm/c1/c1_LIRAssembler.cpp	Mon May 24 14:15:14 2010 -0700
    43.3 @@ -301,9 +301,9 @@
    43.4  }
    43.5  
    43.6  
    43.7 -void LIR_Assembler::add_call_info(int pc_offset, CodeEmitInfo* cinfo, bool is_method_handle_invoke) {
    43.8 +void LIR_Assembler::add_call_info(int pc_offset, CodeEmitInfo* cinfo) {
    43.9    flush_debug_info(pc_offset);
   43.10 -  cinfo->record_debug_info(compilation()->debug_info_recorder(), pc_offset, is_method_handle_invoke);
   43.11 +  cinfo->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
   43.12    if (cinfo->exception_handlers() != NULL) {
   43.13      compilation()->add_exception_handlers_for_pco(pc_offset, cinfo->exception_handlers());
   43.14    }
   43.15 @@ -413,12 +413,6 @@
   43.16  void LIR_Assembler::emit_call(LIR_OpJavaCall* op) {
   43.17    verify_oop_map(op->info());
   43.18  
   43.19 -  // JSR 292
   43.20 -  // Preserve the SP over MethodHandle call sites.
   43.21 -  if (op->is_method_handle_invoke()) {
   43.22 -    preserve_SP(op);
   43.23 -  }
   43.24 -
   43.25    if (os::is_MP()) {
   43.26      // must align calls sites, otherwise they can't be updated atomically on MP hardware
   43.27      align_call(op->code());
   43.28 @@ -444,10 +438,6 @@
   43.29    default: ShouldNotReachHere();
   43.30    }
   43.31  
   43.32 -  if (op->is_method_handle_invoke()) {
   43.33 -    restore_SP(op);
   43.34 -  }
   43.35 -
   43.36  #if defined(X86) && defined(TIERED)
   43.37    // C2 leave fpu stack dirty clean it
   43.38    if (UseSSE < 2) {
    44.1 --- a/src/share/vm/c1/c1_LIRAssembler.hpp	Thu May 20 08:32:11 2010 -0700
    44.2 +++ b/src/share/vm/c1/c1_LIRAssembler.hpp	Mon May 24 14:15:14 2010 -0700
    44.3 @@ -84,7 +84,7 @@
    44.4    Address as_Address_hi(LIR_Address* addr);
    44.5  
    44.6    // debug information
    44.7 -  void add_call_info(int pc_offset, CodeEmitInfo* cinfo, bool is_method_handle_invoke = false);
    44.8 +  void add_call_info(int pc_offset, CodeEmitInfo* cinfo);
    44.9    void add_debug_info_for_branch(CodeEmitInfo* info);
   44.10    void add_debug_info_for_div0(int pc_offset, CodeEmitInfo* cinfo);
   44.11    void add_debug_info_for_div0_here(CodeEmitInfo* info);
   44.12 @@ -212,10 +212,6 @@
   44.13    void ic_call(     LIR_OpJavaCall* op);
   44.14    void vtable_call( LIR_OpJavaCall* op);
   44.15  
   44.16 -  // JSR 292
   44.17 -  void preserve_SP(LIR_OpJavaCall* op);
   44.18 -  void restore_SP( LIR_OpJavaCall* op);
   44.19 -
   44.20    void osr_entry();
   44.21  
   44.22    void build_frame();
    45.1 --- a/src/share/vm/c1/c1_LIRGenerator.cpp	Thu May 20 08:32:11 2010 -0700
    45.2 +++ b/src/share/vm/c1/c1_LIRGenerator.cpp	Mon May 24 14:15:14 2010 -0700
    45.3 @@ -2371,9 +2371,17 @@
    45.4    bool optimized = x->target_is_loaded() && x->target_is_final();
    45.5    assert(receiver->is_illegal() || receiver->is_equal(LIR_Assembler::receiverOpr()), "must match");
    45.6  
    45.7 +  // JSR 292
    45.8 +  // Preserve the SP over MethodHandle call sites.
    45.9 +  ciMethod* target = x->target();
   45.10 +  if (target->is_method_handle_invoke()) {
   45.11 +    info->set_is_method_handle_invoke(true);
   45.12 +    __ move(FrameMap::stack_pointer(), FrameMap::method_handle_invoke_SP_save_opr());
   45.13 +  }
   45.14 +
   45.15    switch (x->code()) {
   45.16      case Bytecodes::_invokestatic:
   45.17 -      __ call_static(x->target(), result_register,
   45.18 +      __ call_static(target, result_register,
   45.19                       SharedRuntime::get_resolve_static_call_stub(),
   45.20                       arg_list, info);
   45.21        break;
   45.22 @@ -2383,17 +2391,17 @@
   45.23        // for final target we still produce an inline cache, in order
   45.24        // to be able to call mixed mode
   45.25        if (x->code() == Bytecodes::_invokespecial || optimized) {
   45.26 -        __ call_opt_virtual(x->target(), receiver, result_register,
   45.27 +        __ call_opt_virtual(target, receiver, result_register,
   45.28                              SharedRuntime::get_resolve_opt_virtual_call_stub(),
   45.29                              arg_list, info);
   45.30        } else if (x->vtable_index() < 0) {
   45.31 -        __ call_icvirtual(x->target(), receiver, result_register,
   45.32 +        __ call_icvirtual(target, receiver, result_register,
   45.33                            SharedRuntime::get_resolve_virtual_call_stub(),
   45.34                            arg_list, info);
   45.35        } else {
   45.36          int entry_offset = instanceKlass::vtable_start_offset() + x->vtable_index() * vtableEntry::size();
   45.37          int vtable_offset = entry_offset * wordSize + vtableEntry::method_offset_in_bytes();
   45.38 -        __ call_virtual(x->target(), receiver, result_register, vtable_offset, arg_list, info);
   45.39 +        __ call_virtual(target, receiver, result_register, vtable_offset, arg_list, info);
   45.40        }
   45.41        break;
   45.42      case Bytecodes::_invokedynamic: {
   45.43 @@ -2432,7 +2440,7 @@
   45.44        // Load target MethodHandle from CallSite object.
   45.45        __ load(new LIR_Address(tmp, java_dyn_CallSite::target_offset_in_bytes(), T_OBJECT), receiver);
   45.46  
   45.47 -      __ call_dynamic(x->target(), receiver, result_register,
   45.48 +      __ call_dynamic(target, receiver, result_register,
   45.49                        SharedRuntime::get_resolve_opt_virtual_call_stub(),
   45.50                        arg_list, info);
   45.51        break;
   45.52 @@ -2442,6 +2450,12 @@
   45.53        break;
   45.54    }
   45.55  
   45.56 +  // JSR 292
   45.57 +  // Restore the SP after MethodHandle call sites.
   45.58 +  if (target->is_method_handle_invoke()) {
   45.59 +    __ move(FrameMap::method_handle_invoke_SP_save_opr(), FrameMap::stack_pointer());
   45.60 +  }
   45.61 +
   45.62    if (x->type()->is_float() || x->type()->is_double()) {
   45.63      // Force rounding of results from non-strictfp when in strictfp
   45.64      // scope (or when we don't know the strictness of the callee, to
    46.1 --- a/src/share/vm/ci/ciMethod.cpp	Thu May 20 08:32:11 2010 -0700
    46.2 +++ b/src/share/vm/ci/ciMethod.cpp	Mon May 24 14:15:14 2010 -0700
    46.3 @@ -690,20 +690,32 @@
    46.4  
    46.5  // ------------------------------------------------------------------
    46.6  // invokedynamic support
    46.7 +
    46.8 +// ------------------------------------------------------------------
    46.9 +// ciMethod::is_method_handle_invoke
   46.10  //
   46.11 +// Return true if the method is a MethodHandle target.
   46.12  bool ciMethod::is_method_handle_invoke() const {
   46.13 -  check_is_loaded();
   46.14 -  bool flag = ((flags().as_int() & JVM_MH_INVOKE_BITS) == JVM_MH_INVOKE_BITS);
   46.15 +  bool flag = (holder()->name() == ciSymbol::java_dyn_MethodHandle() &&
   46.16 +               methodOopDesc::is_method_handle_invoke_name(name()->sid()));
   46.17  #ifdef ASSERT
   46.18 -  {
   46.19 -    VM_ENTRY_MARK;
   46.20 -    bool flag2 = get_methodOop()->is_method_handle_invoke();
   46.21 -    assert(flag == flag2, "consistent");
   46.22 +  if (is_loaded()) {
   46.23 +    bool flag2 = ((flags().as_int() & JVM_MH_INVOKE_BITS) == JVM_MH_INVOKE_BITS);
   46.24 +    {
   46.25 +      VM_ENTRY_MARK;
   46.26 +      bool flag3 = get_methodOop()->is_method_handle_invoke();
   46.27 +      assert(flag2 == flag3, "consistent");
   46.28 +      assert(flag  == flag3, "consistent");
   46.29 +    }
   46.30    }
   46.31  #endif //ASSERT
   46.32    return flag;
   46.33  }
   46.34  
   46.35 +// ------------------------------------------------------------------
   46.36 +// ciMethod::is_method_handle_adapter
   46.37 +//
   46.38 +// Return true if the method is a generated MethodHandle adapter.
   46.39  bool ciMethod::is_method_handle_adapter() const {
   46.40    check_is_loaded();
   46.41    VM_ENTRY_MARK;
    47.1 --- a/src/share/vm/ci/ciStreams.cpp	Thu May 20 08:32:11 2010 -0700
    47.2 +++ b/src/share/vm/ci/ciStreams.cpp	Mon May 24 14:15:14 2010 -0700
    47.3 @@ -1,5 +1,5 @@
    47.4  /*
    47.5 - * Copyright 1999-2009 Sun Microsystems, Inc.  All Rights Reserved.
    47.6 + * Copyright 1999-2010 Sun Microsystems, Inc.  All Rights Reserved.
    47.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    47.8   *
    47.9   * This code is free software; you can redistribute it and/or modify it
   47.10 @@ -81,27 +81,21 @@
   47.11  // providing accessors for constant pool items.
   47.12  
   47.13  // ------------------------------------------------------------------
   47.14 -// ciBytecodeStream::wide
   47.15 -//
   47.16 -// Special handling for the wide bytcode
   47.17 -Bytecodes::Code ciBytecodeStream::wide()
   47.18 -{
   47.19 -  // Get following bytecode; do not return wide
   47.20 -  Bytecodes::Code bc = (Bytecodes::Code)_pc[1];
   47.21 -  _pc += 2;                     // Skip both bytecodes
   47.22 -  _pc += 2;                     // Skip index always
   47.23 -  if( bc == Bytecodes::_iinc )
   47.24 -    _pc += 2;                   // Skip optional constant
   47.25 -  _was_wide = _pc;              // Flag last wide bytecode found
   47.26 -  return bc;
   47.27 -}
   47.28 -
   47.29 -// ------------------------------------------------------------------
   47.30 -// ciBytecodeStream::table
   47.31 +// ciBytecodeStream::next_wide_or_table
   47.32  //
   47.33  // Special handling for switch ops
   47.34 -Bytecodes::Code ciBytecodeStream::table( Bytecodes::Code bc ) {
   47.35 -  switch( bc ) {                // Check for special bytecode handling
   47.36 +Bytecodes::Code ciBytecodeStream::next_wide_or_table(Bytecodes::Code bc) {
   47.37 +  switch (bc) {                // Check for special bytecode handling
   47.38 +  case Bytecodes::_wide:
   47.39 +    // Special handling for the wide bytcode
   47.40 +    // Get following bytecode; do not return wide
   47.41 +    assert(Bytecodes::Code(_pc[0]) == Bytecodes::_wide, "");
   47.42 +    bc = Bytecodes::java_code(_raw_bc = (Bytecodes::Code)_pc[1]);
   47.43 +    assert(Bytecodes::wide_length_for(bc) > 2, "must make progress");
   47.44 +    _pc += Bytecodes::wide_length_for(bc);
   47.45 +    _was_wide = _pc;              // Flag last wide bytecode found
   47.46 +    assert(is_wide(), "accessor works right");
   47.47 +    break;
   47.48  
   47.49    case Bytecodes::_lookupswitch:
   47.50      _pc++;                      // Skip wide bytecode
   47.51 @@ -164,7 +158,7 @@
   47.52  int ciBytecodeStream::get_klass_index() const {
   47.53    switch(cur_bc()) {
   47.54    case Bytecodes::_ldc:
   47.55 -    return get_index();
   47.56 +    return get_index_u1();
   47.57    case Bytecodes::_ldc_w:
   47.58    case Bytecodes::_ldc2_w:
   47.59    case Bytecodes::_checkcast:
   47.60 @@ -173,7 +167,7 @@
   47.61    case Bytecodes::_multianewarray:
   47.62    case Bytecodes::_new:
   47.63    case Bytecodes::_newarray:
   47.64 -    return get_index_big();
   47.65 +    return get_index_u2();
   47.66    default:
   47.67      ShouldNotReachHere();
   47.68      return 0;
   47.69 @@ -199,10 +193,10 @@
   47.70  int ciBytecodeStream::get_constant_index() const {
   47.71    switch(cur_bc()) {
   47.72    case Bytecodes::_ldc:
   47.73 -    return get_index();
   47.74 +    return get_index_u1();
   47.75    case Bytecodes::_ldc_w:
   47.76    case Bytecodes::_ldc2_w:
   47.77 -    return get_index_big();
   47.78 +    return get_index_u2();
   47.79    default:
   47.80      ShouldNotReachHere();
   47.81      return 0;
   47.82 @@ -239,7 +233,7 @@
   47.83           cur_bc() == Bytecodes::_putfield ||
   47.84           cur_bc() == Bytecodes::_getstatic ||
   47.85           cur_bc() == Bytecodes::_putstatic, "wrong bc");
   47.86 -  return get_index_big();
   47.87 +  return get_index_u2_cpcache();
   47.88  }
   47.89  
   47.90  
   47.91 @@ -319,7 +313,9 @@
   47.92      ShouldNotReachHere();
   47.93    }
   47.94  #endif
   47.95 -  return get_index_int();
   47.96 +  if (has_index_u4())
   47.97 +    return get_index_u4();  // invokedynamic
   47.98 +  return get_index_u2_cpcache();
   47.99  }
  47.100  
  47.101  // ------------------------------------------------------------------
    48.1 --- a/src/share/vm/ci/ciStreams.hpp	Thu May 20 08:32:11 2010 -0700
    48.2 +++ b/src/share/vm/ci/ciStreams.hpp	Mon May 24 14:15:14 2010 -0700
    48.3 @@ -1,5 +1,5 @@
    48.4  /*
    48.5 - * Copyright 1999-2009 Sun Microsystems, Inc.  All Rights Reserved.
    48.6 + * Copyright 1999-2010 Sun Microsystems, Inc.  All Rights Reserved.
    48.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    48.8   *
    48.9   * This code is free software; you can redistribute it and/or modify it
   48.10 @@ -31,15 +31,19 @@
   48.11  // their original form during iteration.
   48.12  class ciBytecodeStream : StackObj {
   48.13  private:
   48.14 - // Handling for the weird bytecodes
   48.15 -  Bytecodes::Code wide();       // Handle wide bytecode
   48.16 -  Bytecodes::Code table(Bytecodes::Code); // Handle complicated inline table
   48.17 +  // Handling for the weird bytecodes
   48.18 +  Bytecodes::Code next_wide_or_table(Bytecodes::Code); // Handle _wide & complicated inline table
   48.19  
   48.20    static Bytecodes::Code check_java(Bytecodes::Code c) {
   48.21      assert(Bytecodes::is_java_code(c), "should not return _fast bytecodes");
   48.22      return c;
   48.23    }
   48.24  
   48.25 +  static Bytecodes::Code check_defined(Bytecodes::Code c) {
   48.26 +    assert(Bytecodes::is_defined(c), "");
   48.27 +    return c;
   48.28 +  }
   48.29 +
   48.30    ciMethod* _method;           // the method
   48.31    ciInstanceKlass* _holder;
   48.32    address _bc_start;            // Start of current bytecode for table
   48.33 @@ -50,11 +54,21 @@
   48.34    address _end;                    // Past end of bytecodes
   48.35    address _pc;                     // Current PC
   48.36    Bytecodes::Code _bc;             // Current bytecode
   48.37 +  Bytecodes::Code _raw_bc;         // Current bytecode, raw form
   48.38  
   48.39    void reset( address base, unsigned int size ) {
   48.40      _bc_start =_was_wide = 0;
   48.41      _start = _pc = base; _end = base + size; }
   48.42  
   48.43 +  void assert_wide(bool require_wide) const {
   48.44 +    if (require_wide)
   48.45 +         { assert(is_wide(),  "must be a wide instruction"); }
   48.46 +    else { assert(!is_wide(), "must not be a wide instruction"); }
   48.47 +  }
   48.48 +
   48.49 +  Bytecode* bytecode() const { return Bytecode_at(_bc_start); }
   48.50 +  Bytecode* next_bytecode() const { return Bytecode_at(_pc); }
   48.51 +
   48.52  public:
   48.53    // End-Of-Bytecodes
   48.54    static Bytecodes::Code EOBC() {
   48.55 @@ -92,11 +106,12 @@
   48.56    }
   48.57  
   48.58    address cur_bcp() const       { return _bc_start; }  // Returns bcp to current instruction
   48.59 -  int next_bci() const          { return _pc -_start; }
   48.60 +  int next_bci() const          { return _pc - _start; }
   48.61    int cur_bci() const           { return _bc_start - _start; }
   48.62    int instruction_size() const  { return _pc - _bc_start; }
   48.63  
   48.64    Bytecodes::Code cur_bc() const{ return check_java(_bc); }
   48.65 +  Bytecodes::Code cur_bc_raw() const { return check_defined(_raw_bc); }
   48.66    Bytecodes::Code next_bc()     { return Bytecodes::java_code((Bytecodes::Code)* _pc); }
   48.67  
   48.68    // Return current ByteCode and increment PC to next bytecode, skipping all
   48.69 @@ -109,85 +124,76 @@
   48.70  
   48.71      // Fetch Java bytecode
   48.72      // All rewritten bytecodes maintain the size of original bytecode.
   48.73 -    _bc = Bytecodes::java_code((Bytecodes::Code)*_pc);
   48.74 +    _bc = Bytecodes::java_code(_raw_bc = (Bytecodes::Code)*_pc);
   48.75      int csize = Bytecodes::length_for(_bc); // Expected size
   48.76 -
   48.77 -    if( _bc == Bytecodes::_wide ) {
   48.78 -      _bc=wide();                           // Handle wide bytecode
   48.79 -    } else if( csize == 0 ) {
   48.80 -      _bc=table(_bc);                       // Handle inline tables
   48.81 -    } else {
   48.82 -      _pc += csize;                         // Bump PC past bytecode
   48.83 +    _pc += csize;                           // Bump PC past bytecode
   48.84 +    if (csize == 0) {
   48.85 +      _bc = next_wide_or_table(_bc);
   48.86      }
   48.87      return check_java(_bc);
   48.88    }
   48.89  
   48.90    bool is_wide() const { return ( _pc == _was_wide ); }
   48.91  
   48.92 +  // Does this instruction contain an index which refes into the CP cache?
   48.93 +  bool uses_cp_cache() const { return Bytecodes::uses_cp_cache(cur_bc_raw()); }
   48.94 +
   48.95 +  int get_index_u1() const {
   48.96 +    return bytecode()->get_index_u1(cur_bc_raw());
   48.97 +  }
   48.98 +
   48.99    // Get a byte index following this bytecode.
  48.100    // If prefixed with a wide bytecode, get a wide index.
  48.101    int get_index() const {
  48.102 -    assert_index_size(is_wide() ? 2 : 1);
  48.103      return (_pc == _was_wide)   // was widened?
  48.104 -      ? Bytes::get_Java_u2(_bc_start+2) // yes, return wide index
  48.105 -      : _bc_start[1];           // no, return narrow index
  48.106 +      ? get_index_u2(true)      // yes, return wide index
  48.107 +      : get_index_u1();         // no, return narrow index
  48.108    }
  48.109  
  48.110 -  // Get 2-byte index (getfield/putstatic/etc)
  48.111 -  int get_index_big() const {
  48.112 -    assert_index_size(2);
  48.113 -    return Bytes::get_Java_u2(_bc_start+1);
  48.114 +  // Get 2-byte index (byte swapping depending on which bytecode)
  48.115 +  int get_index_u2(bool is_wide = false) const {
  48.116 +    return bytecode()->get_index_u2(cur_bc_raw(), is_wide);
  48.117    }
  48.118  
  48.119 -  // Get 2-byte index (or 4-byte, for invokedynamic)
  48.120 -  int get_index_int() const {
  48.121 -    return has_giant_index() ? get_index_giant() : get_index_big();
  48.122 +  // Get 2-byte index in native byte order.  (Rewriter::rewrite makes these.)
  48.123 +  int get_index_u2_cpcache() const {
  48.124 +    return bytecode()->get_index_u2_cpcache(cur_bc_raw());
  48.125    }
  48.126  
  48.127    // Get 4-byte index, for invokedynamic.
  48.128 -  int get_index_giant() const {
  48.129 -    assert_index_size(4);
  48.130 -    return Bytes::get_native_u4(_bc_start+1);
  48.131 +  int get_index_u4() const {
  48.132 +    return bytecode()->get_index_u4(cur_bc_raw());
  48.133    }
  48.134  
  48.135 -  bool has_giant_index() const { return (cur_bc() == Bytecodes::_invokedynamic); }
  48.136 +  bool has_index_u4() const {
  48.137 +    return bytecode()->has_index_u4(cur_bc_raw());
  48.138 +  }
  48.139  
  48.140    // Get dimensions byte (multinewarray)
  48.141    int get_dimensions() const { return *(unsigned char*)(_pc-1); }
  48.142  
  48.143    // Sign-extended index byte/short, no widening
  48.144 -  int get_byte() const { return (int8_t)(_pc[-1]); }
  48.145 -  int get_short() const { return (int16_t)Bytes::get_Java_u2(_pc-2); }
  48.146 -  int get_long() const  { return (int32_t)Bytes::get_Java_u4(_pc-4); }
  48.147 +  int get_constant_u1()                     const { return bytecode()->get_constant_u1(instruction_size()-1, cur_bc_raw()); }
  48.148 +  int get_constant_u2(bool is_wide = false) const { return bytecode()->get_constant_u2(instruction_size()-2, cur_bc_raw(), is_wide); }
  48.149  
  48.150    // Get a byte signed constant for "iinc".  Invalid for other bytecodes.
  48.151    // If prefixed with a wide bytecode, get a wide constant
  48.152 -  int get_iinc_con() const {return (_pc==_was_wide) ? get_short() :get_byte();}
  48.153 +  int get_iinc_con() const {return (_pc==_was_wide) ? (jshort) get_constant_u2(true) : (jbyte) get_constant_u1();}
  48.154  
  48.155    // 2-byte branch offset from current pc
  48.156 -  int get_dest( ) const {
  48.157 -    assert( Bytecodes::length_at(_bc_start) == sizeof(jshort)+1,  "get_dest called with bad bytecode" );
  48.158 -    return _bc_start-_start + (short)Bytes::get_Java_u2(_pc-2);
  48.159 +  int get_dest() const {
  48.160 +    return cur_bci() + bytecode()->get_offset_s2(cur_bc_raw());
  48.161    }
  48.162  
  48.163    // 2-byte branch offset from next pc
  48.164 -  int next_get_dest( ) const {
  48.165 -    address next_bc_start = _pc;
  48.166 -    assert( _pc < _end, "" );
  48.167 -    Bytecodes::Code next_bc = (Bytecodes::Code)*_pc;
  48.168 -    assert( next_bc != Bytecodes::_wide, "");
  48.169 -    int next_csize = Bytecodes::length_for(next_bc);
  48.170 -    assert( next_csize != 0, "" );
  48.171 -    assert( next_bc <= Bytecodes::_jsr_w, "");
  48.172 -    address next_pc = _pc + next_csize;
  48.173 -    assert( Bytecodes::length_at(next_bc_start) == sizeof(jshort)+1,  "next_get_dest called with bad bytecode" );
  48.174 -    return next_bc_start-_start + (short)Bytes::get_Java_u2(next_pc-2);
  48.175 +  int next_get_dest() const {
  48.176 +    assert(_pc < _end, "");
  48.177 +    return next_bci() + next_bytecode()->get_offset_s2(Bytecodes::_ifeq);
  48.178    }
  48.179  
  48.180    // 4-byte branch offset from current pc
  48.181 -  int get_far_dest( ) const {
  48.182 -    assert( Bytecodes::length_at(_bc_start) == sizeof(jint)+1, "dest4 called with bad bytecode" );
  48.183 -    return _bc_start-_start + (int)Bytes::get_Java_u4(_pc-4);
  48.184 +  int get_far_dest() const {
  48.185 +    return cur_bci() + bytecode()->get_offset_s4(cur_bc_raw());
  48.186    }
  48.187  
  48.188    // For a lookup or switch table, return target destination
  48.189 @@ -234,22 +240,6 @@
  48.190  
  48.191    ciCPCache*  get_cpcache();
  48.192    ciCallSite* get_call_site();
  48.193 -
  48.194 - private:
  48.195 -  void assert_index_size(int required_size) const {
  48.196 -#ifdef ASSERT
  48.197 -    int isize = instruction_size() - (is_wide() ? 1 : 0) - 1;
  48.198 -    if (isize == 2 &&  cur_bc() == Bytecodes::_iinc)
  48.199 -      isize = 1;
  48.200 -    else if (isize <= 2)
  48.201 -      ;                         // no change
  48.202 -    else if (has_giant_index())
  48.203 -      isize = 4;
  48.204 -    else
  48.205 -      isize = 2;
  48.206 -    assert(isize = required_size, "wrong index size");
  48.207 -#endif
  48.208 -  }
  48.209  };
  48.210  
  48.211  
    49.1 --- a/src/share/vm/ci/ciTypeFlow.cpp	Thu May 20 08:32:11 2010 -0700
    49.2 +++ b/src/share/vm/ci/ciTypeFlow.cpp	Mon May 24 14:15:14 2010 -0700
    49.3 @@ -1,5 +1,5 @@
    49.4  /*
    49.5 - * Copyright 2000-2009 Sun Microsystems, Inc.  All Rights Reserved.
    49.6 + * Copyright 2000-2010 Sun Microsystems, Inc.  All Rights Reserved.
    49.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    49.8   *
    49.9   * This code is free software; you can redistribute it and/or modify it
   49.10 @@ -2132,6 +2132,7 @@
   49.11    if (!Bytecodes::can_trap(str.cur_bc()))  return false;
   49.12  
   49.13    switch (str.cur_bc()) {
   49.14 +    // %%% FIXME: ldc of Class can generate an exception
   49.15      case Bytecodes::_ldc:
   49.16      case Bytecodes::_ldc_w:
   49.17      case Bytecodes::_ldc2_w:
    50.1 --- a/src/share/vm/classfile/verifier.cpp	Thu May 20 08:32:11 2010 -0700
    50.2 +++ b/src/share/vm/classfile/verifier.cpp	Mon May 24 14:15:14 2010 -0700
    50.3 @@ -1,5 +1,5 @@
    50.4  /*
    50.5 - * Copyright 1998-2009 Sun Microsystems, Inc.  All Rights Reserved.
    50.6 + * Copyright 1998-2010 Sun Microsystems, Inc.  All Rights Reserved.
    50.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    50.8   *
    50.9   * This code is free software; you can redistribute it and/or modify it
   50.10 @@ -410,13 +410,13 @@
   50.11            no_control_flow = false; break;
   50.12          case Bytecodes::_ldc :
   50.13            verify_ldc(
   50.14 -            opcode, bcs.get_index(), &current_frame,
   50.15 +            opcode, bcs.get_index_u1(), &current_frame,
   50.16              cp, bci, CHECK_VERIFY(this));
   50.17            no_control_flow = false; break;
   50.18          case Bytecodes::_ldc_w :
   50.19          case Bytecodes::_ldc2_w :
   50.20            verify_ldc(
   50.21 -            opcode, bcs.get_index_big(), &current_frame,
   50.22 +            opcode, bcs.get_index_u2(), &current_frame,
   50.23              cp, bci, CHECK_VERIFY(this));
   50.24            no_control_flow = false; break;
   50.25          case Bytecodes::_iload :
   50.26 @@ -1182,7 +1182,7 @@
   50.27            no_control_flow = false; break;
   50.28          case Bytecodes::_new :
   50.29          {
   50.30 -          index = bcs.get_index_big();
   50.31 +          index = bcs.get_index_u2();
   50.32            verify_cp_class_type(index, cp, CHECK_VERIFY(this));
   50.33            VerificationType new_class_type =
   50.34              cp_index_to_type(index, cp, CHECK_VERIFY(this));
   50.35 @@ -1202,7 +1202,7 @@
   50.36            no_control_flow = false; break;
   50.37          case Bytecodes::_anewarray :
   50.38            verify_anewarray(
   50.39 -            bcs.get_index_big(), cp, &current_frame, CHECK_VERIFY(this));
   50.40 +            bcs.get_index_u2(), cp, &current_frame, CHECK_VERIFY(this));
   50.41            no_control_flow = false; break;
   50.42          case Bytecodes::_arraylength :
   50.43            type = current_frame.pop_stack(
   50.44 @@ -1215,7 +1215,7 @@
   50.45            no_control_flow = false; break;
   50.46          case Bytecodes::_checkcast :
   50.47          {
   50.48 -          index = bcs.get_index_big();
   50.49 +          index = bcs.get_index_u2();
   50.50            verify_cp_class_type(index, cp, CHECK_VERIFY(this));
   50.51            current_frame.pop_stack(
   50.52              VerificationType::reference_check(), CHECK_VERIFY(this));
   50.53 @@ -1225,7 +1225,7 @@
   50.54            no_control_flow = false; break;
   50.55          }
   50.56          case Bytecodes::_instanceof : {
   50.57 -          index = bcs.get_index_big();
   50.58 +          index = bcs.get_index_u2();
   50.59            verify_cp_class_type(index, cp, CHECK_VERIFY(this));
   50.60            current_frame.pop_stack(
   50.61              VerificationType::reference_check(), CHECK_VERIFY(this));
   50.62 @@ -1240,7 +1240,7 @@
   50.63            no_control_flow = false; break;
   50.64          case Bytecodes::_multianewarray :
   50.65          {
   50.66 -          index = bcs.get_index_big();
   50.67 +          index = bcs.get_index_u2();
   50.68            u2 dim = *(bcs.bcp()+3);
   50.69            verify_cp_class_type(index, cp, CHECK_VERIFY(this));
   50.70            VerificationType new_array_type =
   50.71 @@ -1299,7 +1299,7 @@
   50.72    while (!bcs.is_last_bytecode()) {
   50.73      if (bcs.raw_next() != Bytecodes::_illegal) {
   50.74        int bci = bcs.bci();
   50.75 -      if (bcs.code() == Bytecodes::_new) {
   50.76 +      if (bcs.raw_code() == Bytecodes::_new) {
   50.77          code_data[bci] = NEW_OFFSET;
   50.78        } else {
   50.79          code_data[bci] = BYTECODE_OFFSET;
   50.80 @@ -1654,7 +1654,7 @@
   50.81    int keys, delta;
   50.82    current_frame->pop_stack(
   50.83      VerificationType::integer_type(), CHECK_VERIFY(this));
   50.84 -  if (bcs->code() == Bytecodes::_tableswitch) {
   50.85 +  if (bcs->raw_code() == Bytecodes::_tableswitch) {
   50.86      jint low = (jint)Bytes::get_Java_u4(aligned_bcp + jintSize);
   50.87      jint high = (jint)Bytes::get_Java_u4(aligned_bcp + 2*jintSize);
   50.88      if (low > high) {
   50.89 @@ -1710,7 +1710,7 @@
   50.90                                                StackMapFrame* current_frame,
   50.91                                                constantPoolHandle cp,
   50.92                                                TRAPS) {
   50.93 -  u2 index = bcs->get_index_big();
   50.94 +  u2 index = bcs->get_index_u2();
   50.95    verify_cp_type(index, cp, 1 << JVM_CONSTANT_Fieldref, CHECK_VERIFY(this));
   50.96  
   50.97    // Get field name and signature
   50.98 @@ -1750,7 +1750,7 @@
   50.99      &sig_stream, field_type, CHECK_VERIFY(this));
  50.100    u2 bci = bcs->bci();
  50.101    bool is_assignable;
  50.102 -  switch (bcs->code()) {
  50.103 +  switch (bcs->raw_code()) {
  50.104      case Bytecodes::_getstatic: {
  50.105        for (int i = 0; i < n; i++) {
  50.106          current_frame->push_stack(field_type[i], CHECK_VERIFY(this));
  50.107 @@ -1870,7 +1870,7 @@
  50.108          ref_class_type.name(), CHECK_VERIFY(this));
  50.109        methodOop m = instanceKlass::cast(ref_klass)->uncached_lookup_method(
  50.110          vmSymbols::object_initializer_name(),
  50.111 -        cp->signature_ref_at(bcs->get_index_big()));
  50.112 +        cp->signature_ref_at(bcs->get_index_u2()));
  50.113        instanceKlassHandle mh(THREAD, m->method_holder());
  50.114        if (m->is_protected() && !mh->is_same_class_package(_klass())) {
  50.115          bool assignable = current_type().is_assignable_from(
  50.116 @@ -1893,8 +1893,8 @@
  50.117      bool *this_uninit, VerificationType return_type,
  50.118      constantPoolHandle cp, TRAPS) {
  50.119    // Make sure the constant pool item is the right type
  50.120 -  u2 index = bcs->get_index_big();
  50.121 -  Bytecodes::Code opcode = bcs->code();
  50.122 +  u2 index = bcs->get_index_u2();
  50.123 +  Bytecodes::Code opcode = bcs->raw_code();
  50.124    unsigned int types = (opcode == Bytecodes::_invokeinterface
  50.125                                  ? 1 << JVM_CONSTANT_InterfaceMethodref
  50.126                        : opcode == Bytecodes::_invokedynamic
    51.1 --- a/src/share/vm/code/codeBlob.cpp	Thu May 20 08:32:11 2010 -0700
    51.2 +++ b/src/share/vm/code/codeBlob.cpp	Mon May 24 14:15:14 2010 -0700
    51.3 @@ -66,8 +66,6 @@
    51.4    _relocation_size       = locs_size;
    51.5    _instructions_offset   = align_code_offset(header_size + locs_size);
    51.6    _data_offset           = size;
    51.7 -  _oops_offset           = size;
    51.8 -  _oops_length           =  0;
    51.9    _frame_size            =  0;
   51.10    set_oop_maps(NULL);
   51.11  }
   51.12 @@ -94,9 +92,6 @@
   51.13    _relocation_size       = round_to(cb->total_relocation_size(), oopSize);
   51.14    _instructions_offset   = align_code_offset(header_size + _relocation_size);
   51.15    _data_offset           = _instructions_offset + round_to(cb->total_code_size(), oopSize);
   51.16 -  _oops_offset           = _size - round_to(cb->total_oop_size(), oopSize);
   51.17 -  _oops_length           = 0;  // temporary, until the copy_oops handshake
   51.18 -  assert(_oops_offset >=   _data_offset, "codeBlob is too small");
   51.19    assert(_data_offset <= size, "codeBlob is too small");
   51.20  
   51.21    cb->copy_code_and_locs_to(this);
   51.22 @@ -131,99 +126,6 @@
   51.23  }
   51.24  
   51.25  
   51.26 -// Promote one word from an assembly-time handle to a live embedded oop.
   51.27 -inline void CodeBlob::initialize_immediate_oop(oop* dest, jobject handle) {
   51.28 -  if (handle == NULL ||
   51.29 -      // As a special case, IC oops are initialized to 1 or -1.
   51.30 -      handle == (jobject) Universe::non_oop_word()) {
   51.31 -    (*dest) = (oop)handle;
   51.32 -  } else {
   51.33 -    (*dest) = JNIHandles::resolve_non_null(handle);
   51.34 -  }
   51.35 -}
   51.36 -
   51.37 -
   51.38 -void CodeBlob::copy_oops(GrowableArray<jobject>* array) {
   51.39 -  assert(_oops_length == 0, "do this handshake just once, please");
   51.40 -  int length = array->length();
   51.41 -  assert((address)(oops_begin() + length) <= data_end(), "oops big enough");
   51.42 -  oop* dest = oops_begin();
   51.43 -  for (int index = 0 ; index < length; index++) {
   51.44 -    initialize_immediate_oop(&dest[index], array->at(index));
   51.45 -  }
   51.46 -  _oops_length = length;
   51.47 -
   51.48 -  // Now we can fix up all the oops in the code.
   51.49 -  // We need to do this in the code because
   51.50 -  // the assembler uses jobjects as placeholders.
   51.51 -  // The code and relocations have already been
   51.52 -  // initialized by the CodeBlob constructor,
   51.53 -  // so it is valid even at this early point to
   51.54 -  // iterate over relocations and patch the code.
   51.55 -  fix_oop_relocations(NULL, NULL, /*initialize_immediates=*/ true);
   51.56 -}
   51.57 -
   51.58 -
   51.59 -relocInfo::relocType CodeBlob::reloc_type_for_address(address pc) {
   51.60 -  RelocIterator iter(this, pc, pc+1);
   51.61 -  while (iter.next()) {
   51.62 -    return (relocInfo::relocType) iter.type();
   51.63 -  }
   51.64 -  // No relocation info found for pc
   51.65 -  ShouldNotReachHere();
   51.66 -  return relocInfo::none; // dummy return value
   51.67 -}
   51.68 -
   51.69 -
   51.70 -bool CodeBlob::is_at_poll_return(address pc) {
   51.71 -  RelocIterator iter(this, pc, pc+1);
   51.72 -  while (iter.next()) {
   51.73 -    if (iter.type() == relocInfo::poll_return_type)
   51.74 -      return true;
   51.75 -  }
   51.76 -  return false;
   51.77 -}
   51.78 -
   51.79 -
   51.80 -bool CodeBlob::is_at_poll_or_poll_return(address pc) {
   51.81 -  RelocIterator iter(this, pc, pc+1);
   51.82 -  while (iter.next()) {
   51.83 -    relocInfo::relocType t = iter.type();
   51.84 -    if (t == relocInfo::poll_return_type || t == relocInfo::poll_type)
   51.85 -      return true;
   51.86 -  }
   51.87 -  return false;
   51.88 -}
   51.89 -
   51.90 -
   51.91 -void CodeBlob::fix_oop_relocations(address begin, address end,
   51.92 -                                   bool initialize_immediates) {
   51.93 -  // re-patch all oop-bearing instructions, just in case some oops moved
   51.94 -  RelocIterator iter(this, begin, end);
   51.95 -  while (iter.next()) {
   51.96 -    if (iter.type() == relocInfo::oop_type) {
   51.97 -      oop_Relocation* reloc = iter.oop_reloc();
   51.98 -      if (initialize_immediates && reloc->oop_is_immediate()) {
   51.99 -        oop* dest = reloc->oop_addr();
  51.100 -        initialize_immediate_oop(dest, (jobject) *dest);
  51.101 -      }
  51.102 -      // Refresh the oop-related bits of this instruction.
  51.103 -      reloc->fix_oop_relocation();
  51.104 -    }
  51.105 -
  51.106 -    // There must not be any interfering patches or breakpoints.
  51.107 -    assert(!(iter.type() == relocInfo::breakpoint_type
  51.108 -             && iter.breakpoint_reloc()->active()),
  51.109 -           "no active breakpoint");
  51.110 -  }
  51.111 -}
  51.112 -
  51.113 -void CodeBlob::do_unloading(BoolObjectClosure* is_alive,
  51.114 -                            OopClosure* keep_alive,
  51.115 -                            bool unloading_occurred) {
  51.116 -  ShouldNotReachHere();
  51.117 -}
  51.118 -
  51.119  OopMap* CodeBlob::oop_map_for_return_address(address return_address) {
  51.120    address pc = return_address ;
  51.121    assert (oop_maps() != NULL, "nope");
    52.1 --- a/src/share/vm/code/codeBlob.hpp	Thu May 20 08:32:11 2010 -0700
    52.2 +++ b/src/share/vm/code/codeBlob.hpp	Mon May 24 14:15:14 2010 -0700
    52.3 @@ -54,17 +54,12 @@
    52.4                                                   // that range. There is a similar range(s) on returns
    52.5                                                   // which we don't detect.
    52.6    int        _data_offset;                       // offset to where data region begins
    52.7 -  int        _oops_offset;                       // offset to where embedded oop table begins (inside data)
    52.8 -  int        _oops_length;                       // number of embedded oops
    52.9    int        _frame_size;                        // size of stack frame
   52.10    OopMapSet* _oop_maps;                          // OopMap for this CodeBlob
   52.11    CodeComments _comments;
   52.12  
   52.13    friend class OopRecorder;
   52.14  
   52.15 -  void fix_oop_relocations(address begin, address end, bool initialize_immediates);
   52.16 -  inline void initialize_immediate_oop(oop* dest, jobject handle);
   52.17 -
   52.18   public:
   52.19    // Returns the space needed for CodeBlob
   52.20    static unsigned int allocation_size(CodeBuffer* cb, int header_size);
   52.21 @@ -115,14 +110,11 @@
   52.22    address    instructions_end() const            { return (address)    header_begin() + _data_offset; }
   52.23    address    data_begin() const                  { return (address)    header_begin() + _data_offset; }
   52.24    address    data_end() const                    { return (address)    header_begin() + _size; }
   52.25 -  oop*       oops_begin() const                  { return (oop*)      (header_begin() + _oops_offset); }
   52.26 -  oop*       oops_end() const                    { return                oops_begin() + _oops_length; }
   52.27  
   52.28    // Offsets
   52.29    int relocation_offset() const                  { return _header_size; }
   52.30    int instructions_offset() const                { return _instructions_offset; }
   52.31    int data_offset() const                        { return _data_offset; }
   52.32 -  int oops_offset() const                        { return _oops_offset; }
   52.33  
   52.34    // Sizes
   52.35    int size() const                               { return _size; }
   52.36 @@ -130,40 +122,16 @@
   52.37    int relocation_size() const                    { return (address) relocation_end() - (address) relocation_begin(); }
   52.38    int instructions_size() const                  { return instructions_end() - instructions_begin();  }
   52.39    int data_size() const                          { return data_end() - data_begin(); }
   52.40 -  int oops_size() const                          { return (address) oops_end() - (address) oops_begin(); }
   52.41  
   52.42    // Containment
   52.43    bool blob_contains(address addr) const         { return header_begin()       <= addr && addr < data_end(); }
   52.44    bool relocation_contains(relocInfo* addr) const{ return relocation_begin()   <= addr && addr < relocation_end(); }
   52.45    bool instructions_contains(address addr) const { return instructions_begin() <= addr && addr < instructions_end(); }
   52.46    bool data_contains(address addr) const         { return data_begin()         <= addr && addr < data_end(); }
   52.47 -  bool oops_contains(oop* addr) const            { return oops_begin()         <= addr && addr < oops_end(); }
   52.48    bool contains(address addr) const              { return instructions_contains(addr); }
   52.49    bool is_frame_complete_at(address addr) const  { return instructions_contains(addr) &&
   52.50                                                            addr >= instructions_begin() + _frame_complete_offset; }
   52.51  
   52.52 -  // Relocation support
   52.53 -  void fix_oop_relocations(address begin, address end) {
   52.54 -    fix_oop_relocations(begin, end, false);
   52.55 -  }
   52.56 -  void fix_oop_relocations() {
   52.57 -    fix_oop_relocations(NULL, NULL, false);
   52.58 -  }
   52.59 -  relocInfo::relocType reloc_type_for_address(address pc);
   52.60 -  bool is_at_poll_return(address pc);
   52.61 -  bool is_at_poll_or_poll_return(address pc);
   52.62 -
   52.63 -  // Support for oops in scopes and relocs:
   52.64 -  // Note: index 0 is reserved for null.
   52.65 -  oop  oop_at(int index) const                   { return index == 0? (oop)NULL: *oop_addr_at(index); }
   52.66 -  oop* oop_addr_at(int index) const{             // for GC
   52.67 -    // relocation indexes are biased by 1 (because 0 is reserved)
   52.68 -    assert(index > 0 && index <= _oops_length, "must be a valid non-zero index");
   52.69 -    return &oops_begin()[index-1];
   52.70 -  }
   52.71 -
   52.72 -  void copy_oops(GrowableArray<jobject>* oops);
   52.73 -
   52.74    // CodeCache support: really only used by the nmethods, but in order to get
   52.75    // asserts and certain bookkeeping to work in the CodeCache they are defined
   52.76    // virtual here.
   52.77 @@ -175,12 +143,6 @@
   52.78  
   52.79    // GC support
   52.80    virtual bool is_alive() const                  = 0;
   52.81 -  virtual void do_unloading(BoolObjectClosure* is_alive,
   52.82 -                            OopClosure* keep_alive,
   52.83 -                            bool unloading_occurred);
   52.84 -  virtual void oops_do(OopClosure* f) = 0;
   52.85 -  // (All CodeBlob subtypes other than NMethod currently have
   52.86 -  // an empty oops_do() method.
   52.87  
   52.88    // OopMap for frame
   52.89    OopMapSet* oop_maps() const                    { return _oop_maps; }
   52.90 @@ -245,11 +207,6 @@
   52.91    // GC/Verification support
   52.92    void preserve_callee_argument_oops(frame fr, const RegisterMap* reg_map, OopClosure* f)  { /* nothing to do */ }
   52.93    bool is_alive() const                          { return true; }
   52.94 -  void do_unloading(BoolObjectClosure* is_alive,
   52.95 -                    OopClosure* keep_alive,
   52.96 -                    bool unloading_occurred)     { /* do nothing */ }
   52.97 -
   52.98 -  void oops_do(OopClosure* f)                    { /* do nothing*/ }
   52.99  
  52.100    void verify();
  52.101    void print() const                             PRODUCT_RETURN;
  52.102 @@ -334,10 +291,6 @@
  52.103    // GC/Verification support
  52.104    void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f)  { /* nothing to do */ }
  52.105    bool is_alive() const                          { return true; }
  52.106 -  void do_unloading(BoolObjectClosure* is_alive,
  52.107 -                    OopClosure* keep_alive,
  52.108 -                    bool unloading_occurred)     { /* do nothing */ }
  52.109 -  void oops_do(OopClosure* f) { /* do-nothing*/ }
  52.110  
  52.111    void verify();
  52.112    void print() const                             PRODUCT_RETURN;
  52.113 @@ -363,9 +316,6 @@
  52.114     {};
  52.115  
  52.116     bool is_alive() const                         { return true; }
  52.117 -   void do_unloading(BoolObjectClosure* is_alive,
  52.118 -                     OopClosure* keep_alive,
  52.119 -                     bool unloading_occurred)    { /* do-nothing*/ }
  52.120  
  52.121     void verify(); // does nothing
  52.122     void print() const                            PRODUCT_RETURN;
  52.123 @@ -423,9 +373,6 @@
  52.124    // GC for args
  52.125    void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) { /* Nothing to do */ }
  52.126  
  52.127 -  // Iteration
  52.128 -  void oops_do(OopClosure* f) {}
  52.129 -
  52.130    // Printing
  52.131    void print_value_on(outputStream* st) const PRODUCT_RETURN;
  52.132  
  52.133 @@ -477,9 +424,6 @@
  52.134  
  52.135    // Typing
  52.136    bool is_uncommon_trap_stub() const             { return true; }
  52.137 -
  52.138 -  // Iteration
  52.139 -  void oops_do(OopClosure* f) {}
  52.140  };
  52.141  
  52.142  
  52.143 @@ -512,9 +456,6 @@
  52.144  
  52.145    // Typing
  52.146    bool is_exception_stub() const                 { return true; }
  52.147 -
  52.148 -  // Iteration
  52.149 -  void oops_do(OopClosure* f) {}
  52.150  };
  52.151  #endif // COMPILER2
  52.152  
  52.153 @@ -548,7 +489,4 @@
  52.154  
  52.155    // Typing
  52.156    bool is_safepoint_stub() const                 { return true; }
  52.157 -
  52.158 -  // Iteration
  52.159 -  void oops_do(OopClosure* f) {}
  52.160  };
    53.1 --- a/src/share/vm/code/codeCache.cpp	Thu May 20 08:32:11 2010 -0700
    53.2 +++ b/src/share/vm/code/codeCache.cpp	Mon May 24 14:15:14 2010 -0700
    53.3 @@ -74,12 +74,12 @@
    53.4      total_size       += cb->size();
    53.5      header_size      += cb->header_size();
    53.6      relocation_size  += cb->relocation_size();
    53.7 -    scopes_oop_size  += cb->oops_size();
    53.8      if (cb->is_nmethod()) {
    53.9 -      nmethod *nm = (nmethod*)cb;
   53.10 +      nmethod* nm = cb->as_nmethod_or_null();
   53.11        code_size        += nm->code_size();
   53.12        stub_size        += nm->stub_size();
   53.13  
   53.14 +      scopes_oop_size  += nm->oops_size();
   53.15        scopes_data_size += nm->scopes_data_size();
   53.16        scopes_pcs_size  += nm->scopes_pcs_size();
   53.17      } else {
   53.18 @@ -262,14 +262,14 @@
   53.19  }
   53.20  
   53.21  
   53.22 -// Mark code blobs for unloading if they contain otherwise
   53.23 -// unreachable oops.
   53.24 +// Mark nmethods for unloading if they contain otherwise unreachable
   53.25 +// oops.
   53.26  void CodeCache::do_unloading(BoolObjectClosure* is_alive,
   53.27                               OopClosure* keep_alive,
   53.28                               bool unloading_occurred) {
   53.29    assert_locked_or_safepoint(CodeCache_lock);
   53.30 -  FOR_ALL_ALIVE_BLOBS(cb) {
   53.31 -    cb->do_unloading(is_alive, keep_alive, unloading_occurred);
   53.32 +  FOR_ALL_ALIVE_NMETHODS(nm) {
   53.33 +    nm->do_unloading(is_alive, keep_alive, unloading_occurred);
   53.34    }
   53.35  }
   53.36  
   53.37 @@ -509,9 +509,9 @@
   53.38        if (needs_cache_clean()) {
   53.39          nm->cleanup_inline_caches();
   53.40        }
   53.41 -      debug_only(nm->verify();)
   53.42 +      DEBUG_ONLY(nm->verify());
   53.43 +      nm->fix_oop_relocations();
   53.44      }
   53.45 -    cb->fix_oop_relocations();
   53.46    }
   53.47    set_needs_cache_clean(false);
   53.48    prune_scavenge_root_nmethods();
    54.1 --- a/src/share/vm/code/compiledIC.cpp	Thu May 20 08:32:11 2010 -0700
    54.2 +++ b/src/share/vm/code/compiledIC.cpp	Mon May 24 14:15:14 2010 -0700
    54.3 @@ -1,5 +1,5 @@
    54.4  /*
    54.5 - * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
    54.6 + * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
    54.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    54.8   *
    54.9   * This code is free software; you can redistribute it and/or modify it
   54.10 @@ -441,11 +441,11 @@
   54.11  }
   54.12  
   54.13  
   54.14 -inline static RelocIterator parse_ic(CodeBlob* code, address ic_call, oop* &_oop_addr, bool *is_optimized) {
   54.15 +inline static RelocIterator parse_ic(nmethod* nm, address ic_call, oop* &_oop_addr, bool *is_optimized) {
   54.16     address  first_oop = NULL;
   54.17     // Mergers please note: Sun SC5.x CC insists on an lvalue for a reference parameter.
   54.18 -   CodeBlob *code1 = code;
   54.19 -   return virtual_call_Relocation::parse_ic(code1, ic_call, first_oop, _oop_addr, is_optimized);
   54.20 +   nmethod* tmp_nm = nm;
   54.21 +   return virtual_call_Relocation::parse_ic(tmp_nm, ic_call, first_oop, _oop_addr, is_optimized);
   54.22  }
   54.23  
   54.24  CompiledIC::CompiledIC(NativeCall* ic_call)
    55.1 --- a/src/share/vm/code/nmethod.cpp	Thu May 20 08:32:11 2010 -0700
    55.2 +++ b/src/share/vm/code/nmethod.cpp	Mon May 24 14:15:14 2010 -0700
    55.3 @@ -99,12 +99,12 @@
    55.4      code_size           += nm->code_size();
    55.5      stub_size           += nm->stub_size();
    55.6      consts_size         += nm->consts_size();
    55.7 +    oops_size           += nm->oops_size();
    55.8      scopes_data_size    += nm->scopes_data_size();
    55.9      scopes_pcs_size     += nm->scopes_pcs_size();
   55.10      dependencies_size   += nm->dependencies_size();
   55.11      handler_table_size  += nm->handler_table_size();
   55.12      nul_chk_table_size  += nm->nul_chk_table_size();
   55.13 -    oops_size += nm->oops_size();
   55.14    }
   55.15    void print_nmethod_stats() {
   55.16      if (nmethod_count == 0)  return;
   55.17 @@ -114,12 +114,12 @@
   55.18      if (code_size != 0)           tty->print_cr(" main code      = %d", code_size);
   55.19      if (stub_size != 0)           tty->print_cr(" stub code      = %d", stub_size);
   55.20      if (consts_size != 0)         tty->print_cr(" constants      = %d", consts_size);
   55.21 +    if (oops_size != 0)           tty->print_cr(" oops           = %d", oops_size);
   55.22      if (scopes_data_size != 0)    tty->print_cr(" scopes data    = %d", scopes_data_size);
   55.23      if (scopes_pcs_size != 0)     tty->print_cr(" scopes pcs     = %d", scopes_pcs_size);
   55.24      if (dependencies_size != 0)   tty->print_cr(" dependencies   = %d", dependencies_size);
   55.25      if (handler_table_size != 0)  tty->print_cr(" handler table  = %d", handler_table_size);
   55.26      if (nul_chk_table_size != 0)  tty->print_cr(" nul chk table  = %d", nul_chk_table_size);
   55.27 -    if (oops_size != 0)           tty->print_cr(" oops           = %d", oops_size);
   55.28    }
   55.29  
   55.30    int native_nmethod_count;
   55.31 @@ -600,7 +600,8 @@
   55.32  #endif // def HAVE_DTRACE_H
   55.33      _stub_offset             = data_offset();
   55.34      _consts_offset           = data_offset();
   55.35 -    _scopes_data_offset      = data_offset();
   55.36 +    _oops_offset             = data_offset();
   55.37 +    _scopes_data_offset      = _oops_offset          + round_to(code_buffer->total_oop_size(), oopSize);
   55.38      _scopes_pcs_offset       = _scopes_data_offset;
   55.39      _dependencies_offset     = _scopes_pcs_offset;
   55.40      _handler_table_offset    = _dependencies_offset;
   55.41 @@ -690,7 +691,8 @@
   55.42      _orig_pc_offset          = 0;
   55.43      _stub_offset             = data_offset();
   55.44      _consts_offset           = data_offset();
   55.45 -    _scopes_data_offset      = data_offset();
   55.46 +    _oops_offset             = data_offset();
   55.47 +    _scopes_data_offset      = _oops_offset          + round_to(code_buffer->total_oop_size(), oopSize);
   55.48      _scopes_pcs_offset       = _scopes_data_offset;
   55.49      _dependencies_offset     = _scopes_pcs_offset;
   55.50      _handler_table_offset    = _dependencies_offset;
   55.51 @@ -805,8 +807,9 @@
   55.52        _unwind_handler_offset   = -1;
   55.53      }
   55.54      _consts_offset           = instructions_offset() + code_buffer->total_offset_of(code_buffer->consts()->start());
   55.55 -    _scopes_data_offset      = data_offset();
   55.56 -    _scopes_pcs_offset       = _scopes_data_offset   + round_to(debug_info->data_size         (), oopSize);
   55.57 +    _oops_offset             = data_offset();
   55.58 +    _scopes_data_offset      = _oops_offset          + round_to(code_buffer->total_oop_size (), oopSize);
   55.59 +    _scopes_pcs_offset       = _scopes_data_offset   + round_to(debug_info->data_size       (), oopSize);
   55.60      _dependencies_offset     = _scopes_pcs_offset    + adjust_pcs_size(debug_info->pcs_size());
   55.61      _handler_table_offset    = _dependencies_offset  + round_to(dependencies->size_in_bytes (), oopSize);
   55.62      _nul_chk_table_offset    = _handler_table_offset + round_to(handler_table->size_in_bytes(), oopSize);
   55.63 @@ -990,6 +993,79 @@
   55.64  }
   55.65  
   55.66  
   55.67 +// Promote one word from an assembly-time handle to a live embedded oop.
   55.68 +inline void nmethod::initialize_immediate_oop(oop* dest, jobject handle) {
   55.69 +  if (handle == NULL ||
   55.70 +      // As a special case, IC oops are initialized to 1 or -1.
   55.71 +      handle == (jobject) Universe::non_oop_word()) {
   55.72 +    (*dest) = (oop) handle;
   55.73 +  } else {
   55.74 +    (*dest) = JNIHandles::resolve_non_null(handle);
   55.75 +  }
   55.76 +}
   55.77 +
   55.78 +
   55.79 +void nmethod::copy_oops(GrowableArray<jobject>* array) {
   55.80 +  //assert(oops_size() == 0, "do this handshake just once, please");
   55.81 +  int length = array->length();
   55.82 +  assert((address)(oops_begin() + length) <= data_end(), "oops big enough");
   55.83 +  oop* dest = oops_begin();
   55.84 +  for (int index = 0 ; index < length; index++) {
   55.85 +    initialize_immediate_oop(&dest[index], array->at(index));
   55.86 +  }
   55.87 +
   55.88 +  // Now we can fix up all the oops in the code.  We need to do this
   55.89 +  // in the code because the assembler uses jobjects as placeholders.
   55.90 +  // The code and relocations have already been initialized by the
   55.91 +  // CodeBlob constructor, so it is valid even at this early point to
   55.92 +  // iterate over relocations and patch the code.
   55.93 +  fix_oop_relocations(NULL, NULL, /*initialize_immediates=*/ true);
   55.94 +}
   55.95 +
   55.96 +
   55.97 +bool nmethod::is_at_poll_return(address pc) {
   55.98 +  RelocIterator iter(this, pc, pc+1);
   55.99 +  while (iter.next()) {
  55.100 +    if (iter.type() == relocInfo::poll_return_type)
  55.101 +      return true;
  55.102 +  }
  55.103 +  return false;
  55.104 +}
  55.105 +
  55.106 +
  55.107 +bool nmethod::is_at_poll_or_poll_return(address pc) {
  55.108 +  RelocIterator iter(this, pc, pc+1);
  55.109 +  while (iter.next()) {
  55.110 +    relocInfo::relocType t = iter.type();
  55.111 +    if (t == relocInfo::poll_return_type || t == relocInfo::poll_type)
  55.112 +      return true;
  55.113 +  }
  55.114 +  return false;
  55.115 +}
  55.116 +
  55.117 +
  55.118 +void nmethod::fix_oop_relocations(address begin, address end, bool initialize_immediates) {
  55.119 +  // re-patch all oop-bearing instructions, just in case some oops moved
  55.120 +  RelocIterator iter(this, begin, end);
  55.121 +  while (iter.next()) {
  55.122 +    if (iter.type() == relocInfo::oop_type) {
  55.123 +      oop_Relocation* reloc = iter.oop_reloc();
  55.124 +      if (initialize_immediates && reloc->oop_is_immediate()) {
  55.125 +        oop* dest = reloc->oop_addr();
  55.126 +        initialize_immediate_oop(dest, (jobject) *dest);
  55.127 +      }
  55.128 +      // Refresh the oop-related bits of this instruction.
  55.129 +      reloc->fix_oop_relocation();
  55.130 +    }
  55.131 +
  55.132 +    // There must not be any interfering patches or breakpoints.
  55.133 +    assert(!(iter.type() == relocInfo::breakpoint_type
  55.134 +             && iter.breakpoint_reloc()->active()),
  55.135 +           "no active breakpoint");
  55.136 +  }
  55.137 +}
  55.138 +
  55.139 +
  55.140  ScopeDesc* nmethod::scope_desc_at(address pc) {
  55.141    PcDesc* pd = pc_desc_at(pc);
  55.142    guarantee(pd != NULL, "scope must be present");
  55.143 @@ -2282,6 +2358,10 @@
  55.144                                                consts_begin(),
  55.145                                                consts_end(),
  55.146                                                consts_size());
  55.147 +  if (oops_size         () > 0) tty->print_cr(" oops           [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
  55.148 +                                              oops_begin(),
  55.149 +                                              oops_end(),
  55.150 +                                              oops_size());
  55.151    if (scopes_data_size  () > 0) tty->print_cr(" scopes data    [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
  55.152                                                scopes_data_begin(),
  55.153                                                scopes_data_end(),
    56.1 --- a/src/share/vm/code/nmethod.hpp	Thu May 20 08:32:11 2010 -0700
    56.2 +++ b/src/share/vm/code/nmethod.hpp	Mon May 24 14:15:14 2010 -0700
    56.3 @@ -105,6 +105,7 @@
    56.4  //  [Relocation]
    56.5  //  - relocation information
    56.6  //  - constant part          (doubles, longs and floats used in nmethod)
    56.7 +//  - oop table
    56.8  //  [Code]
    56.9  //  - code body
   56.10  //  - exception handler
   56.11 @@ -161,6 +162,7 @@
   56.12  #endif // def HAVE_DTRACE_H
   56.13    int _stub_offset;
   56.14    int _consts_offset;
   56.15 +  int _oops_offset;                       // offset to where embedded oop table begins (inside data)
   56.16    int _scopes_data_offset;
   56.17    int _scopes_pcs_offset;
   56.18    int _dependencies_offset;
   56.19 @@ -347,7 +349,10 @@
   56.20    address stub_begin            () const          { return           header_begin() + _stub_offset          ; }
   56.21    address stub_end              () const          { return           header_begin() + _consts_offset        ; }
   56.22    address consts_begin          () const          { return           header_begin() + _consts_offset        ; }
   56.23 -  address consts_end            () const          { return           header_begin() + _scopes_data_offset   ; }
   56.24 +  address consts_end            () const          { return           header_begin() + _oops_offset          ; }
   56.25 +  oop*    oops_begin            () const          { return (oop*)   (header_begin() + _oops_offset)         ; }
   56.26 +  oop*    oops_end              () const          { return (oop*)   (header_begin() + _scopes_data_offset)  ; }
   56.27 +
   56.28    address scopes_data_begin     () const          { return           header_begin() + _scopes_data_offset   ; }
   56.29    address scopes_data_end       () const          { return           header_begin() + _scopes_pcs_offset    ; }
   56.30    PcDesc* scopes_pcs_begin      () const          { return (PcDesc*)(header_begin() + _scopes_pcs_offset   ); }
   56.31 @@ -359,20 +364,24 @@
   56.32    address nul_chk_table_begin   () const          { return           header_begin() + _nul_chk_table_offset ; }
   56.33    address nul_chk_table_end     () const          { return           header_begin() + _nmethod_end_offset   ; }
   56.34  
   56.35 -  int code_size         () const                  { return      code_end         () -      code_begin         (); }
   56.36 -  int stub_size         () const                  { return      stub_end         () -      stub_begin         (); }
   56.37 -  int consts_size       () const                  { return      consts_end       () -      consts_begin       (); }
   56.38 -  int scopes_data_size  () const                  { return      scopes_data_end  () -      scopes_data_begin  (); }
   56.39 -  int scopes_pcs_size   () const                  { return (intptr_t)scopes_pcs_end   () - (intptr_t)scopes_pcs_begin   (); }
   56.40 -  int dependencies_size () const                  { return      dependencies_end () -      dependencies_begin (); }
   56.41 -  int handler_table_size() const                  { return      handler_table_end() -      handler_table_begin(); }
   56.42 -  int nul_chk_table_size() const                  { return      nul_chk_table_end() -      nul_chk_table_begin(); }
   56.43 +  // Sizes
   56.44 +  int code_size         () const                  { return            code_end         () -            code_begin         (); }
   56.45 +  int stub_size         () const                  { return            stub_end         () -            stub_begin         (); }
   56.46 +  int consts_size       () const                  { return            consts_end       () -            consts_begin       (); }
   56.47 +  int oops_size         () const                  { return (address)  oops_end         () - (address)  oops_begin         (); }
   56.48 +  int scopes_data_size  () const                  { return            scopes_data_end  () -            scopes_data_begin  (); }
   56.49 +  int scopes_pcs_size   () const                  { return (intptr_t) scopes_pcs_end   () - (intptr_t) scopes_pcs_begin   (); }
   56.50 +  int dependencies_size () const                  { return            dependencies_end () -            dependencies_begin (); }
   56.51 +  int handler_table_size() const                  { return            handler_table_end() -            handler_table_begin(); }
   56.52 +  int nul_chk_table_size() const                  { return            nul_chk_table_end() -            nul_chk_table_begin(); }
   56.53  
   56.54    int total_size        () const;
   56.55  
   56.56 +  // Containment
   56.57    bool code_contains         (address addr) const { return code_begin         () <= addr && addr < code_end         (); }
   56.58    bool stub_contains         (address addr) const { return stub_begin         () <= addr && addr < stub_end         (); }
   56.59    bool consts_contains       (address addr) const { return consts_begin       () <= addr && addr < consts_end       (); }
   56.60 +  bool oops_contains         (oop*    addr) const { return oops_begin         () <= addr && addr < oops_end         (); }
   56.61    bool scopes_data_contains  (address addr) const { return scopes_data_begin  () <= addr && addr < scopes_data_end  (); }
   56.62    bool scopes_pcs_contains   (PcDesc* addr) const { return scopes_pcs_begin   () <= addr && addr < scopes_pcs_end   (); }
   56.63    bool handler_table_contains(address addr) const { return handler_table_begin() <= addr && addr < handler_table_end(); }
   56.64 @@ -431,6 +440,29 @@
   56.65    int   version() const                           { return flags.version; }
   56.66    void  set_version(int v);
   56.67  
   56.68 +  // Support for oops in scopes and relocs:
   56.69 +  // Note: index 0 is reserved for null.
   56.70 +  oop   oop_at(int index) const                   { return index == 0 ? (oop) NULL: *oop_addr_at(index); }
   56.71 +  oop*  oop_addr_at(int index) const {  // for GC
   56.72 +    // relocation indexes are biased by 1 (because 0 is reserved)
   56.73 +    assert(index > 0 && index <= oops_size(), "must be a valid non-zero index");
   56.74 +    return &oops_begin()[index - 1];
   56.75 +  }
   56.76 +
   56.77 +  void copy_oops(GrowableArray<jobject>* oops);
   56.78 +
   56.79 +  // Relocation support
   56.80 +private:
   56.81 +  void fix_oop_relocations(address begin, address end, bool initialize_immediates);
   56.82 +  inline void initialize_immediate_oop(oop* dest, jobject handle);
   56.83 +
   56.84 +public:
   56.85 +  void fix_oop_relocations(address begin, address end) { fix_oop_relocations(begin, end, false); }
   56.86 +  void fix_oop_relocations()                           { fix_oop_relocations(NULL, NULL, false); }
   56.87 +
   56.88 +  bool is_at_poll_return(address pc);
   56.89 +  bool is_at_poll_or_poll_return(address pc);
   56.90 +
   56.91    // Non-perm oop support
   56.92    bool  on_scavenge_root_list() const                  { return (_scavenge_root_state & 1) != 0; }
   56.93   protected:
   56.94 @@ -511,8 +543,8 @@
   56.95  
   56.96    void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map,
   56.97                                       OopClosure* f);
   56.98 -  virtual void oops_do(OopClosure* f) { oops_do(f, false); }
   56.99 -  void         oops_do(OopClosure* f, bool do_strong_roots_only);
  56.100 +  void oops_do(OopClosure* f) { oops_do(f, false); }
  56.101 +  void oops_do(OopClosure* f, bool do_strong_roots_only);
  56.102    bool detect_scavenge_root_oops();
  56.103    void verify_scavenge_root_oops() PRODUCT_RETURN;
  56.104  
    57.1 --- a/src/share/vm/code/oopRecorder.cpp	Thu May 20 08:32:11 2010 -0700
    57.2 +++ b/src/share/vm/code/oopRecorder.cpp	Mon May 24 14:15:14 2010 -0700
    57.3 @@ -1,5 +1,5 @@
    57.4  /*
    57.5 - * Copyright 1998-2007 Sun Microsystems, Inc.  All Rights Reserved.
    57.6 + * Copyright 1998-2010 Sun Microsystems, Inc.  All Rights Reserved.
    57.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    57.8   *
    57.9   * This code is free software; you can redistribute it and/or modify it
   57.10 @@ -50,10 +50,10 @@
   57.11    return _handles->length() * sizeof(oop);
   57.12  }
   57.13  
   57.14 -void OopRecorder::copy_to(CodeBlob* code) {
   57.15 +void OopRecorder::copy_to(nmethod* nm) {
   57.16    assert(_complete, "must be frozen");
   57.17    maybe_initialize();  // get non-null handles, even if we have no oops
   57.18 -  code->copy_oops(_handles);
   57.19 +  nm->copy_oops(_handles);
   57.20  }
   57.21  
   57.22  void OopRecorder::maybe_initialize() {
    58.1 --- a/src/share/vm/code/oopRecorder.hpp	Thu May 20 08:32:11 2010 -0700
    58.2 +++ b/src/share/vm/code/oopRecorder.hpp	Mon May 24 14:15:14 2010 -0700
    58.3 @@ -1,5 +1,5 @@
    58.4  /*
    58.5 - * Copyright 1998-2005 Sun Microsystems, Inc.  All Rights Reserved.
    58.6 + * Copyright 1998-2010 Sun Microsystems, Inc.  All Rights Reserved.
    58.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    58.8   *
    58.9   * This code is free software; you can redistribute it and/or modify it
   58.10 @@ -70,8 +70,8 @@
   58.11      return _handles->length() + first_index;
   58.12    }
   58.13  
   58.14 -  // copy the generated oop table to CodeBlob
   58.15 -  void copy_to(CodeBlob* code);  // => code->copy_oops(_handles)
   58.16 +  // copy the generated oop table to nmethod
   58.17 +  void copy_to(nmethod* nm);  // => nm->copy_oops(_handles)
   58.18  
   58.19    bool is_unused() { return _handles == NULL && !_complete; }
   58.20  #ifdef ASSERT
    59.1 --- a/src/share/vm/code/relocInfo.cpp	Thu May 20 08:32:11 2010 -0700
    59.2 +++ b/src/share/vm/code/relocInfo.cpp	Mon May 24 14:15:14 2010 -0700
    59.3 @@ -1,5 +1,5 @@
    59.4  /*
    59.5 - * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
    59.6 + * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
    59.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    59.8   *
    59.9   * This code is free software; you can redistribute it and/or modify it
   59.10 @@ -115,24 +115,25 @@
   59.11  // ----------------------------------------------------------------------------------------------------
   59.12  // Implementation of RelocIterator
   59.13  
   59.14 -void RelocIterator::initialize(CodeBlob* cb, address begin, address limit) {
   59.15 +void RelocIterator::initialize(nmethod* nm, address begin, address limit) {
   59.16    initialize_misc();
   59.17  
   59.18 -  if (cb == NULL && begin != NULL) {
   59.19 -    // allow CodeBlob to be deduced from beginning address
   59.20 -    cb = CodeCache::find_blob(begin);
   59.21 +  if (nm == NULL && begin != NULL) {
   59.22 +    // allow nmethod to be deduced from beginning address
   59.23 +    CodeBlob* cb = CodeCache::find_blob(begin);
   59.24 +    nm = cb->as_nmethod_or_null();
   59.25    }
   59.26 -  assert(cb != NULL, "must be able to deduce nmethod from other arguments");
   59.27 +  assert(nm != NULL, "must be able to deduce nmethod from other arguments");
   59.28  
   59.29 -  _code    = cb;
   59.30 -  _current = cb->relocation_begin()-1;
   59.31 -  _end     = cb->relocation_end();
   59.32 -  _addr    = (address) cb->instructions_begin();
   59.33 +  _code    = nm;
   59.34 +  _current = nm->relocation_begin() - 1;
   59.35 +  _end     = nm->relocation_end();
   59.36 +  _addr    = (address) nm->instructions_begin();
   59.37  
   59.38    assert(!has_current(), "just checking");
   59.39 -  address code_end = cb->instructions_end();
   59.40 +  address code_end = nm->instructions_end();
   59.41  
   59.42 -  assert(begin == NULL || begin >= cb->instructions_begin(), "in bounds");
   59.43 +  assert(begin == NULL || begin >= nm->instructions_begin(), "in bounds");
   59.44   // FIX THIS  assert(limit == NULL || limit <= code_end,     "in bounds");
   59.45    set_limits(begin, limit);
   59.46  }
   59.47 @@ -754,7 +755,7 @@
   59.48      // oop is stored in the code stream
   59.49      return (oop*) pd_address_in_code();
   59.50    } else {
   59.51 -    // oop is stored in table at CodeBlob::oops_begin
   59.52 +    // oop is stored in table at nmethod::oops_begin
   59.53      return code()->oop_addr_at(n);
   59.54    }
   59.55  }
   59.56 @@ -776,26 +777,28 @@
   59.57  }
   59.58  
   59.59  
   59.60 -RelocIterator virtual_call_Relocation::parse_ic(CodeBlob* &code, address &ic_call, address &first_oop,
   59.61 +RelocIterator virtual_call_Relocation::parse_ic(nmethod* &nm, address &ic_call, address &first_oop,
   59.62                                                  oop* &oop_addr, bool *is_optimized) {
   59.63    assert(ic_call != NULL, "ic_call address must be set");
   59.64    assert(ic_call != NULL || first_oop != NULL, "must supply a non-null input");
   59.65 -  if (code == NULL) {
   59.66 +  if (nm == NULL) {
   59.67 +    CodeBlob* code;
   59.68      if (ic_call != NULL) {
   59.69        code = CodeCache::find_blob(ic_call);
   59.70      } else if (first_oop != NULL) {
   59.71        code = CodeCache::find_blob(first_oop);
   59.72      }
   59.73 -    assert(code != NULL, "address to parse must be in CodeBlob");
   59.74 +    nm = code->as_nmethod_or_null();
   59.75 +    assert(nm != NULL, "address to parse must be in nmethod");
   59.76    }
   59.77 -  assert(ic_call   == NULL || code->contains(ic_call),   "must be in CodeBlob");
   59.78 -  assert(first_oop == NULL || code->contains(first_oop), "must be in CodeBlob");
   59.79 +  assert(ic_call   == NULL || nm->contains(ic_call),   "must be in nmethod");
   59.80 +  assert(first_oop == NULL || nm->contains(first_oop), "must be in nmethod");
   59.81  
   59.82    address oop_limit = NULL;
   59.83  
   59.84    if (ic_call != NULL) {
   59.85      // search for the ic_call at the given address
   59.86 -    RelocIterator iter(code, ic_call, ic_call+1);
   59.87 +    RelocIterator iter(nm, ic_call, ic_call+1);
   59.88      bool ret = iter.next();
   59.89      assert(ret == true, "relocInfo must exist at this address");
   59.90      assert(iter.addr() == ic_call, "must find ic_call");
   59.91 @@ -814,7 +817,7 @@
   59.92    }
   59.93  
   59.94    // search for the first_oop, to get its oop_addr
   59.95 -  RelocIterator all_oops(code, first_oop);
   59.96 +  RelocIterator all_oops(nm, first_oop);
   59.97    RelocIterator iter = all_oops;
   59.98    iter.set_limit(first_oop+1);
   59.99    bool found_oop = false;
  59.100 @@ -842,7 +845,7 @@
  59.101        }
  59.102      }
  59.103      guarantee(!did_reset, "cannot find ic_call");
  59.104 -    iter = RelocIterator(code); // search the whole CodeBlob
  59.105 +    iter = RelocIterator(nm); // search the whole nmethod
  59.106      did_reset = true;
  59.107    }
  59.108  
  59.109 @@ -1175,9 +1178,9 @@
  59.110  
  59.111  // For the debugger:
  59.112  extern "C"
  59.113 -void print_blob_locs(CodeBlob* cb) {
  59.114 -  cb->print();
  59.115 -  RelocIterator iter(cb);
  59.116 +void print_blob_locs(nmethod* nm) {
  59.117 +  nm->print();
  59.118 +  RelocIterator iter(nm);
  59.119    iter.print();
  59.120  }
  59.121  extern "C"
    60.1 --- a/src/share/vm/code/relocInfo.hpp	Thu May 20 08:32:11 2010 -0700
    60.2 +++ b/src/share/vm/code/relocInfo.hpp	Mon May 24 14:15:14 2010 -0700
    60.3 @@ -512,7 +512,7 @@
    60.4    address    _limit;   // stop producing relocations after this _addr
    60.5    relocInfo* _current; // the current relocation information
    60.6    relocInfo* _end;     // end marker; we're done iterating when _current == _end
    60.7 -  CodeBlob*  _code;    // compiled method containing _addr
    60.8 +  nmethod*   _code;    // compiled method containing _addr
    60.9    address    _addr;    // instruction to which the relocation applies
   60.10    short      _databuf; // spare buffer for compressed data
   60.11    short*     _data;    // pointer to the relocation's data
   60.12 @@ -549,7 +549,7 @@
   60.13  
   60.14    address compute_section_start(int n) const;  // out-of-line helper
   60.15  
   60.16 -  void initialize(CodeBlob* nm, address begin, address limit);
   60.17 +  void initialize(nmethod* nm, address begin, address limit);
   60.18  
   60.19    friend class PatchingRelocIterator;
   60.20    // make an uninitialized one, for PatchingRelocIterator:
   60.21 @@ -557,7 +557,7 @@
   60.22  
   60.23   public:
   60.24    // constructor
   60.25 -  RelocIterator(CodeBlob* cb,    address begin = NULL, address limit = NULL);
   60.26 +  RelocIterator(nmethod* nm,     address begin = NULL, address limit = NULL);
   60.27    RelocIterator(CodeSection* cb, address begin = NULL, address limit = NULL);
   60.28  
   60.29    // get next reloc info, return !eos
   60.30 @@ -592,7 +592,7 @@
   60.31    relocType    type()         const { return current()->type(); }
   60.32    int          format()       const { return (relocInfo::have_format) ? current()->format() : 0; }
   60.33    address      addr()         const { return _addr; }
   60.34 -  CodeBlob*    code()         const { return _code; }
   60.35 +  nmethod*     code()         const { return _code; }
   60.36    short*       data()         const { return _data; }
   60.37    int          datalen()      const { return _datalen; }
   60.38    bool     has_current()      const { return _datalen >= 0; }
   60.39 @@ -790,9 +790,9 @@
   60.40  
   60.41   public:
   60.42    // accessors which only make sense for a bound Relocation
   60.43 -  address   addr()         const { return binding()->addr(); }
   60.44 -  CodeBlob* code()         const { return binding()->code(); }
   60.45 -  bool      addr_in_const() const { return binding()->addr_in_const(); }
   60.46 +  address  addr()         const { return binding()->addr(); }
   60.47 +  nmethod* code()         const { return binding()->code(); }
   60.48 +  bool     addr_in_const() const { return binding()->addr_in_const(); }
   60.49   protected:
   60.50    short*   data()         const { return binding()->data(); }
   60.51    int      datalen()      const { return binding()->datalen(); }
   60.52 @@ -982,12 +982,12 @@
   60.53  
   60.54    // Figure out where an ic_call is hiding, given a set-oop or call.
   60.55    // Either ic_call or first_oop must be non-null; the other is deduced.
   60.56 -  // Code if non-NULL must be the CodeBlob, else it is deduced.
   60.57 +  // Code if non-NULL must be the nmethod, else it is deduced.
   60.58    // The address of the patchable oop is also deduced.
   60.59    // The returned iterator will enumerate over the oops and the ic_call,
   60.60    // as well as any other relocations that happen to be in that span of code.
   60.61    // Recognize relevant set_oops with:  oop_reloc()->oop_addr() == oop_addr.
   60.62 -  static RelocIterator parse_ic(CodeBlob* &code, address &ic_call, address &first_oop, oop* &oop_addr, bool *is_optimized);
   60.63 +  static RelocIterator parse_ic(nmethod* &nm, address &ic_call, address &first_oop, oop* &oop_addr, bool *is_optimized);
   60.64  };
   60.65  
   60.66  
   60.67 @@ -1304,8 +1304,8 @@
   60.68  APPLY_TO_RELOCATIONS(EACH_CASE);
   60.69  #undef EACH_CASE
   60.70  
   60.71 -inline RelocIterator::RelocIterator(CodeBlob* cb, address begin, address limit) {
   60.72 -  initialize(cb, begin, limit);
   60.73 +inline RelocIterator::RelocIterator(nmethod* nm, address begin, address limit) {
   60.74 +  initialize(nm, begin, limit);
   60.75  }
   60.76  
   60.77  // if you are going to patch code, you should use this subclass of
   60.78 @@ -1323,8 +1323,8 @@
   60.79    void        operator=(const RelocIterator&);
   60.80  
   60.81   public:
   60.82 -  PatchingRelocIterator(CodeBlob* cb, address begin =NULL, address limit =NULL)
   60.83 -    : RelocIterator(cb, begin, limit)                { prepass();  }
   60.84 +  PatchingRelocIterator(nmethod* nm, address begin = NULL, address limit = NULL)
   60.85 +    : RelocIterator(nm, begin, limit)                { prepass();  }
   60.86  
   60.87    ~PatchingRelocIterator()                           { postpass(); }
   60.88  };
    61.1 --- a/src/share/vm/includeDB_core	Thu May 20 08:32:11 2010 -0700
    61.2 +++ b/src/share/vm/includeDB_core	Mon May 24 14:15:14 2010 -0700
    61.3 @@ -827,6 +827,7 @@
    61.4  ciStreams.cpp                           ciStreams.hpp
    61.5  ciStreams.cpp                           ciUtilities.hpp
    61.6  
    61.7 +ciStreams.hpp                           bytecode.hpp
    61.8  ciStreams.hpp                           ciClassList.hpp
    61.9  ciStreams.hpp                           ciExceptionHandler.hpp
   61.10  ciStreams.hpp                           ciInstanceKlass.hpp
    62.1 --- a/src/share/vm/interpreter/bytecode.cpp	Thu May 20 08:32:11 2010 -0700
    62.2 +++ b/src/share/vm/interpreter/bytecode.cpp	Mon May 24 14:15:14 2010 -0700
    62.3 @@ -1,5 +1,5 @@
    62.4  /*
    62.5 - * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
    62.6 + * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
    62.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    62.8   *
    62.9   * This code is free software; you can redistribute it and/or modify it
   62.10 @@ -26,19 +26,12 @@
   62.11  #include "incls/_bytecode.cpp.incl"
   62.12  
   62.13  // Implementation of Bytecode
   62.14 -// Should eventually get rid of these functions and use ThisRelativeObj methods instead
   62.15  
   62.16 -void Bytecode::set_code(Bytecodes::Code code) {
   62.17 -  Bytecodes::check(code);
   62.18 -  *addr_at(0) = u_char(code);
   62.19 -}
   62.20 -
   62.21 -
   62.22 -bool Bytecode::check_must_rewrite() const {
   62.23 -  assert(Bytecodes::can_rewrite(code()), "post-check only");
   62.24 +bool Bytecode::check_must_rewrite(Bytecodes::Code code) const {
   62.25 +  assert(Bytecodes::can_rewrite(code), "post-check only");
   62.26  
   62.27    // Some codes are conditionally rewriting.  Look closely at them.
   62.28 -  switch (code()) {
   62.29 +  switch (code) {
   62.30    case Bytecodes::_aload_0:
   62.31      // Even if RewriteFrequentPairs is turned on,
   62.32      // the _aload_0 code might delay its rewrite until
   62.33 @@ -58,14 +51,85 @@
   62.34  }
   62.35  
   62.36  
   62.37 +#ifdef ASSERT
   62.38 +
   62.39 +void Bytecode::assert_same_format_as(Bytecodes::Code testbc, bool is_wide) const {
   62.40 +  Bytecodes::Code thisbc = Bytecodes::cast(byte_at(0));
   62.41 +  if (thisbc == Bytecodes::_breakpoint)  return;  // let the assertion fail silently
   62.42 +  if (is_wide) {
   62.43 +    assert(thisbc == Bytecodes::_wide, "expected a wide instruction");
   62.44 +    thisbc = Bytecodes::cast(byte_at(1));
   62.45 +    if (thisbc == Bytecodes::_breakpoint)  return;
   62.46 +  }
   62.47 +  int thisflags = Bytecodes::flags(testbc, is_wide) & Bytecodes::_all_fmt_bits;
   62.48 +  int testflags = Bytecodes::flags(thisbc, is_wide) & Bytecodes::_all_fmt_bits;
   62.49 +  if (thisflags != testflags)
   62.50 +    tty->print_cr("assert_same_format_as(%d) failed on bc=%d%s; %d != %d",
   62.51 +                  (int)testbc, (int)thisbc, (is_wide?"/wide":""), testflags, thisflags);
   62.52 +  assert(thisflags == testflags, "expected format");
   62.53 +}
   62.54 +
   62.55 +void Bytecode::assert_index_size(int size, Bytecodes::Code bc, bool is_wide) {
   62.56 +  int have_fmt = (Bytecodes::flags(bc, is_wide)
   62.57 +                  & (Bytecodes::_fmt_has_u2 | Bytecodes::_fmt_has_u4 |
   62.58 +                     Bytecodes::_fmt_not_simple |
   62.59 +                     // Not an offset field:
   62.60 +                     Bytecodes::_fmt_has_o));
   62.61 +  int need_fmt = -1;
   62.62 +  switch (size) {
   62.63 +  case 1: need_fmt = 0;                      break;
   62.64 +  case 2: need_fmt = Bytecodes::_fmt_has_u2; break;
   62.65 +  case 4: need_fmt = Bytecodes::_fmt_has_u4; break;
   62.66 +  }
   62.67 +  if (is_wide)  need_fmt |= Bytecodes::_fmt_not_simple;
   62.68 +  if (have_fmt != need_fmt) {
   62.69 +    tty->print_cr("assert_index_size %d: bc=%d%s %d != %d", size, bc, (is_wide?"/wide":""), have_fmt, need_fmt);
   62.70 +    assert(have_fmt == need_fmt, "assert_index_size");
   62.71 +  }
   62.72 +}
   62.73 +
   62.74 +void Bytecode::assert_offset_size(int size, Bytecodes::Code bc, bool is_wide) {
   62.75 +  int have_fmt = Bytecodes::flags(bc, is_wide) & Bytecodes::_all_fmt_bits;
   62.76 +  int need_fmt = -1;
   62.77 +  switch (size) {
   62.78 +  case 2: need_fmt = Bytecodes::_fmt_bo2; break;
   62.79 +  case 4: need_fmt = Bytecodes::_fmt_bo4; break;
   62.80 +  }
   62.81 +  if (is_wide)  need_fmt |= Bytecodes::_fmt_not_simple;
   62.82 +  if (have_fmt != need_fmt) {
   62.83 +    tty->print_cr("assert_offset_size %d: bc=%d%s %d != %d", size, bc, (is_wide?"/wide":""), have_fmt, need_fmt);
   62.84 +    assert(have_fmt == need_fmt, "assert_offset_size");
   62.85 +  }
   62.86 +}
   62.87 +
   62.88 +void Bytecode::assert_constant_size(int size, int where, Bytecodes::Code bc, bool is_wide) {
   62.89 +  int have_fmt = Bytecodes::flags(bc, is_wide) & (Bytecodes::_all_fmt_bits
   62.90 +                                                  // Ignore any 'i' field (for iinc):
   62.91 +                                                  & ~Bytecodes::_fmt_has_i);
   62.92 +  int need_fmt = -1;
   62.93 +  switch (size) {
   62.94 +  case 1: need_fmt = Bytecodes::_fmt_bc;                          break;
   62.95 +  case 2: need_fmt = Bytecodes::_fmt_bc | Bytecodes::_fmt_has_u2; break;
   62.96 +  }
   62.97 +  if (is_wide)  need_fmt |= Bytecodes::_fmt_not_simple;
   62.98 +  int length = is_wide ? Bytecodes::wide_length_for(bc) : Bytecodes::length_for(bc);
   62.99 +  if (have_fmt != need_fmt || where + size != length) {
  62.100 +    tty->print_cr("assert_constant_size %d @%d: bc=%d%s %d != %d", size, where, bc, (is_wide?"/wide":""), have_fmt, need_fmt);
  62.101 +  }
  62.102 +  assert(have_fmt == need_fmt, "assert_constant_size");
  62.103 +  assert(where + size == length, "assert_constant_size oob");
  62.104 +}
  62.105 +
  62.106 +void Bytecode::assert_native_index(Bytecodes::Code bc, bool is_wide) {
  62.107 +  assert((Bytecodes::flags(bc, is_wide) & Bytecodes::_fmt_has_nbo) != 0, "native index");
  62.108 +}
  62.109 +
  62.110 +#endif //ASSERT
  62.111  
  62.112  // Implementation of Bytecode_tableupswitch
  62.113  
  62.114  int Bytecode_tableswitch::dest_offset_at(int i) const {
  62.115 -  address x = aligned_addr_at(1);
  62.116 -  int x2 = aligned_offset(1 + (3 + i)*jintSize);
  62.117 -  int val = java_signed_word_at(x2);
  62.118 -  return java_signed_word_at(aligned_offset(1 + (3 + i)*jintSize));
  62.119 +  return get_Java_u4_at(aligned_offset(1 + (3 + i)*jintSize));
  62.120  }
  62.121  
  62.122  
  62.123 @@ -74,6 +138,7 @@
  62.124  void Bytecode_invoke::verify() const {
  62.125    Bytecodes::Code bc = adjusted_invoke_code();
  62.126    assert(is_valid(), "check invoke");
  62.127 +  assert(method()->constants()->cache() != NULL, "do not call this from verifier or rewriter");
  62.128  }
  62.129  
  62.130  
  62.131 @@ -116,27 +181,12 @@
  62.132  int Bytecode_invoke::index() const {
  62.133    // Note:  Rewriter::rewrite changes the Java_u2 of an invokedynamic to a native_u4,
  62.134    // at the same time it allocates per-call-site CP cache entries.
  62.135 -  if (has_giant_index())
  62.136 -    return Bytes::get_native_u4(bcp() + 1);
  62.137 +  Bytecodes::Code stdc = Bytecodes::java_code(code());
  62.138 +  Bytecode* invoke = Bytecode_at(bcp());
  62.139 +  if (invoke->has_index_u4(stdc))
  62.140 +    return invoke->get_index_u4(stdc);
  62.141    else
  62.142 -    return Bytes::get_Java_u2(bcp() + 1);
  62.143 -}
  62.144 -
  62.145 -
  62.146 -// Implementation of Bytecode_static
  62.147 -
  62.148 -void Bytecode_static::verify() const {
  62.149 -  assert(Bytecodes::java_code(code()) == Bytecodes::_putstatic
  62.150 -      || Bytecodes::java_code(code()) == Bytecodes::_getstatic, "check static");
  62.151 -}
  62.152 -
  62.153 -
  62.154 -BasicType Bytecode_static::result_type(methodOop method) const {
  62.155 -  int index = java_hwrd_at(1);
  62.156 -  constantPoolOop constants = method->constants();
  62.157 -  symbolOop field_type = constants->signature_ref_at(index);
  62.158 -  BasicType basic_type = FieldType::basic_type(field_type);
  62.159 -  return basic_type;
  62.160 +    return invoke->get_index_u2_cpcache(stdc);
  62.161  }
  62.162  
  62.163  
  62.164 @@ -156,7 +206,8 @@
  62.165  
  62.166  
  62.167  int Bytecode_field::index() const {
  62.168 -  return java_hwrd_at(1);
  62.169 +  Bytecode* invoke = Bytecode_at(bcp());
  62.170 +  return invoke->get_index_u2_cpcache(Bytecodes::_getfield);
  62.171  }
  62.172  
  62.173  
  62.174 @@ -164,7 +215,7 @@
  62.175  
  62.176  int Bytecode_loadconstant::index() const {
  62.177    Bytecodes::Code stdc = Bytecodes::java_code(code());
  62.178 -  return stdc == Bytecodes::_ldc ? java_byte_at(1) : java_hwrd_at(1);
  62.179 +  return stdc == Bytecodes::_ldc ? get_index_u1(stdc) : get_index_u2(stdc);
  62.180  }
  62.181  
  62.182  //------------------------------------------------------------------------------
    63.1 --- a/src/share/vm/interpreter/bytecode.hpp	Thu May 20 08:32:11 2010 -0700
    63.2 +++ b/src/share/vm/interpreter/bytecode.hpp	Mon May 24 14:15:14 2010 -0700
    63.3 @@ -1,5 +1,5 @@
    63.4  /*
    63.5 - * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
    63.6 + * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
    63.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    63.8   *
    63.9   * This code is free software; you can redistribute it and/or modify it
   63.10 @@ -26,92 +26,100 @@
   63.11  // relative to an objects 'this' pointer.
   63.12  
   63.13  class ThisRelativeObj VALUE_OBJ_CLASS_SPEC {
   63.14 - private:
   63.15 -  int     sign_extend        (int x, int size)   const     { const int s = (BytesPerInt - size)*BitsPerByte; return (x << s) >> s; }
   63.16 -
   63.17   public:
   63.18    // Address computation
   63.19    address addr_at            (int offset)        const     { return (address)this + offset; }
   63.20 +  int     byte_at            (int offset)        const     { return *(addr_at(offset)); }
   63.21    address aligned_addr_at    (int offset)        const     { return (address)round_to((intptr_t)addr_at(offset), jintSize); }
   63.22    int     aligned_offset     (int offset)        const     { return aligned_addr_at(offset) - addr_at(0); }
   63.23  
   63.24 -  // Java unsigned accessors (using Java spec byte ordering)
   63.25 -  int     java_byte_at       (int offset)        const     { return *(jubyte*)addr_at(offset); }
   63.26 -  int     java_hwrd_at       (int offset)        const     { return java_byte_at(offset) << (1 * BitsPerByte) | java_byte_at(offset + 1); }
   63.27 -  int     java_word_at       (int offset)        const     { return java_hwrd_at(offset) << (2 * BitsPerByte) | java_hwrd_at(offset + 2); }
   63.28 -
   63.29 -  // Java signed accessors (using Java spec byte ordering)
   63.30 -  int     java_signed_byte_at(int offset)        const     { return sign_extend(java_byte_at(offset), 1); }
   63.31 -  int     java_signed_hwrd_at(int offset)        const     { return sign_extend(java_hwrd_at(offset), 2); }
   63.32 -  int     java_signed_word_at(int offset)        const     { return             java_word_at(offset)    ; }
   63.33 -
   63.34 -  // Fast accessors (using the machine's natural byte ordering)
   63.35 -  int     fast_byte_at       (int offset)        const     { return *(jubyte *)addr_at(offset); }
   63.36 -  int     fast_hwrd_at       (int offset)        const     { return *(jushort*)addr_at(offset); }
   63.37 -  int     fast_word_at       (int offset)        const     { return *(juint  *)addr_at(offset); }
   63.38 -
   63.39 -  // Fast signed accessors (using the machine's natural byte ordering)
   63.40 -  int     fast_signed_byte_at(int offset)        const     { return *(jbyte *)addr_at(offset); }
   63.41 -  int     fast_signed_hwrd_at(int offset)        const     { return *(jshort*)addr_at(offset); }
   63.42 -  int     fast_signed_word_at(int offset)        const     { return *(jint  *)addr_at(offset); }
   63.43 -
   63.44 -  // Fast manipulators (using the machine's natural byte ordering)
   63.45 -  void    set_fast_byte_at   (int offset, int x) const     { *(jbyte *)addr_at(offset) = (jbyte )x; }
   63.46 -  void    set_fast_hwrd_at   (int offset, int x) const     { *(jshort*)addr_at(offset) = (jshort)x; }
   63.47 -  void    set_fast_word_at   (int offset, int x) const     { *(jint  *)addr_at(offset) = (jint  )x; }
   63.48 +  // Word access:
   63.49 +  int     get_Java_u2_at     (int offset)        const     { return Bytes::get_Java_u2(addr_at(offset)); }
   63.50 +  int     get_Java_u4_at     (int offset)        const     { return Bytes::get_Java_u4(addr_at(offset)); }
   63.51 +  int     get_native_u2_at   (int offset)        const     { return Bytes::get_native_u2(addr_at(offset)); }
   63.52 +  int     get_native_u4_at   (int offset)        const     { return Bytes::get_native_u4(addr_at(offset)); }
   63.53  };
   63.54  
   63.55  
   63.56  // The base class for different kinds of bytecode abstractions.
   63.57  // Provides the primitive operations to manipulate code relative
   63.58  // to an objects 'this' pointer.
   63.59 +// FIXME: Make this a ResourceObj, include the enclosing methodOop, and cache the opcode.
   63.60  
   63.61  class Bytecode: public ThisRelativeObj {
   63.62   protected:
   63.63    u_char byte_at(int offset) const               { return *addr_at(offset); }
   63.64 -  bool check_must_rewrite() const;
   63.65 +  bool check_must_rewrite(Bytecodes::Code bc) const;
   63.66  
   63.67   public:
   63.68    // Attributes
   63.69    address bcp() const                            { return addr_at(0); }
   63.70 -  address next_bcp() const                       { return addr_at(0) + Bytecodes::length_at(bcp()); }
   63.71    int instruction_size() const                   { return Bytecodes::length_at(bcp()); }
   63.72  
   63.73 +  // Warning: Use code() with caution on live bytecode streams.  4926272
   63.74    Bytecodes::Code code() const                   { return Bytecodes::code_at(addr_at(0)); }
   63.75    Bytecodes::Code java_code() const              { return Bytecodes::java_code(code()); }
   63.76 -  bool must_rewrite() const                      { return Bytecodes::can_rewrite(code()) && check_must_rewrite(); }
   63.77 -  bool is_active_breakpoint() const              { return Bytecodes::is_active_breakpoint_at(bcp()); }
   63.78 -
   63.79 -  int     one_byte_index() const                 { assert_index_size(1); return byte_at(1); }
   63.80 -  int     two_byte_index() const                 { assert_index_size(2); return (byte_at(1) << 8) + byte_at(2); }
   63.81 -
   63.82 -  int     offset() const                         { return (two_byte_index() << 16) >> 16; }
   63.83 -  address destination() const                    { return bcp() + offset(); }
   63.84 -
   63.85 -  // Attribute modification
   63.86 -  void    set_code(Bytecodes::Code code);
   63.87 +  bool must_rewrite(Bytecodes::Code code) const  { return Bytecodes::can_rewrite(code) && check_must_rewrite(code); }
   63.88  
   63.89    // Creation
   63.90    inline friend Bytecode* Bytecode_at(address bcp);
   63.91  
   63.92 - private:
   63.93 -  void assert_index_size(int required_size) const {
   63.94 -#ifdef ASSERT
   63.95 -    int isize = instruction_size() - 1;
   63.96 -    if (isize == 2 && code() == Bytecodes::_iinc)
   63.97 -      isize = 1;
   63.98 -    else if (isize <= 2)
   63.99 -      ;                         // no change
  63.100 -    else if (code() == Bytecodes::_invokedynamic)
  63.101 -      isize = 4;
  63.102 -    else
  63.103 -      isize = 2;
  63.104 -    assert(isize = required_size, "wrong index size");
  63.105 -#endif
  63.106 +  // Static functions for parsing bytecodes in place.
  63.107 +  int get_index_u1(Bytecodes::Code bc) const {
  63.108 +    assert_same_format_as(bc); assert_index_size(1, bc);
  63.109 +    return *(jubyte*)addr_at(1);
  63.110 +  }
  63.111 +  int get_index_u2(Bytecodes::Code bc, bool is_wide = false) const {
  63.112 +    assert_same_format_as(bc, is_wide); assert_index_size(2, bc, is_wide);
  63.113 +    address p = addr_at(is_wide ? 2 : 1);
  63.114 +    if (can_use_native_byte_order(bc, is_wide))
  63.115 +          return Bytes::get_native_u2(p);
  63.116 +    else  return Bytes::get_Java_u2(p);
  63.117 +  }
  63.118 +  int get_index_u2_cpcache(Bytecodes::Code bc) const {
  63.119 +    assert_same_format_as(bc); assert_index_size(2, bc); assert_native_index(bc);
  63.120 +    return Bytes::get_native_u2(addr_at(1)) DEBUG_ONLY(+ constantPoolOopDesc::CPCACHE_INDEX_TAG);
  63.121 +  }
  63.122 +  int get_index_u4(Bytecodes::Code bc) const {
  63.123 +    assert_same_format_as(bc); assert_index_size(4, bc);
  63.124 +    assert(can_use_native_byte_order(bc), "");
  63.125 +    return Bytes::get_native_u4(addr_at(1));
  63.126 +  }
  63.127 +  bool has_index_u4(Bytecodes::Code bc) const {
  63.128 +    return bc == Bytecodes::_invokedynamic;
  63.129 +  }
  63.130 +
  63.131 +  int get_offset_s2(Bytecodes::Code bc) const {
  63.132 +    assert_same_format_as(bc); assert_offset_size(2, bc);
  63.133 +    return (jshort) Bytes::get_Java_u2(addr_at(1));
  63.134 +  }
  63.135 +  int get_offset_s4(Bytecodes::Code bc) const {
  63.136 +    assert_same_format_as(bc); assert_offset_size(4, bc);
  63.137 +    return (jint) Bytes::get_Java_u4(addr_at(1));
  63.138 +  }
  63.139 +
  63.140 +  int get_constant_u1(int offset, Bytecodes::Code bc) const {
  63.141 +    assert_same_format_as(bc); assert_constant_size(1, offset, bc);
  63.142 +    return *(jbyte*)addr_at(offset);
  63.143 +  }
  63.144 +  int get_constant_u2(int offset, Bytecodes::Code bc, bool is_wide = false) const {
  63.145 +    assert_same_format_as(bc, is_wide); assert_constant_size(2, offset, bc, is_wide);
  63.146 +    return (jshort) Bytes::get_Java_u2(addr_at(offset));
  63.147 +  }
  63.148 +
  63.149 +  // These are used locally and also from bytecode streams.
  63.150 +  void assert_same_format_as(Bytecodes::Code testbc, bool is_wide = false) const NOT_DEBUG_RETURN;
  63.151 +  static void assert_index_size(int required_size, Bytecodes::Code bc, bool is_wide = false) NOT_DEBUG_RETURN;
  63.152 +  static void assert_offset_size(int required_size, Bytecodes::Code bc, bool is_wide = false) NOT_DEBUG_RETURN;
  63.153 +  static void assert_constant_size(int required_size, int where, Bytecodes::Code bc, bool is_wide = false) NOT_DEBUG_RETURN;
  63.154 +  static void assert_native_index(Bytecodes::Code bc, bool is_wide = false) NOT_DEBUG_RETURN;
  63.155 +  static bool can_use_native_byte_order(Bytecodes::Code bc, bool is_wide = false) {
  63.156 +    return (!Bytes::is_Java_byte_ordering_different() || Bytecodes::native_byte_order(bc /*, is_wide*/));
  63.157    }
  63.158  };
  63.159  
  63.160  inline Bytecode* Bytecode_at(address bcp) {
  63.161 +  // Warning: Use with caution on live bytecode streams.  4926272
  63.162    return (Bytecode*)bcp;
  63.163  }
  63.164  
  63.165 @@ -124,8 +132,8 @@
  63.166    int  _offset;
  63.167  
  63.168   public:
  63.169 -  int  match() const                             { return java_signed_word_at(0 * jintSize); }
  63.170 -  int  offset() const                            { return java_signed_word_at(1 * jintSize); }
  63.171 +  int  match() const                             { return get_Java_u4_at(0 * jintSize); }
  63.172 +  int  offset() const                            { return get_Java_u4_at(1 * jintSize); }
  63.173  };
  63.174  
  63.175  
  63.176 @@ -134,8 +142,8 @@
  63.177    void verify() const PRODUCT_RETURN;
  63.178  
  63.179    // Attributes
  63.180 -  int  default_offset() const                    { return java_signed_word_at(aligned_offset(1 + 0*jintSize)); }
  63.181 -  int  number_of_pairs() const                   { return java_signed_word_at(aligned_offset(1 + 1*jintSize)); }
  63.182 +  int  default_offset() const                    { return get_Java_u4_at(aligned_offset(1 + 0*jintSize)); }
  63.183 +  int  number_of_pairs() const                   { return get_Java_u4_at(aligned_offset(1 + 1*jintSize)); }
  63.184    LookupswitchPair* pair_at(int i) const         { assert(0 <= i && i < number_of_pairs(), "pair index out of bounds");
  63.185                                                     return (LookupswitchPair*)aligned_addr_at(1 + (1 + i)*2*jintSize); }
  63.186    // Creation
  63.187 @@ -154,9 +162,9 @@
  63.188    void verify() const PRODUCT_RETURN;
  63.189  
  63.190    // Attributes
  63.191 -  int  default_offset() const                    { return java_signed_word_at(aligned_offset(1 + 0*jintSize)); }
  63.192 -  int  low_key() const                           { return java_signed_word_at(aligned_offset(1 + 1*jintSize)); }
  63.193 -  int  high_key() const                          { return java_signed_word_at(aligned_offset(1 + 2*jintSize)); }
  63.194 +  int  default_offset() const                    { return get_Java_u4_at(aligned_offset(1 + 0*jintSize)); }
  63.195 +  int  low_key() const                           { return get_Java_u4_at(aligned_offset(1 + 1*jintSize)); }
  63.196 +  int  high_key() const                          { return get_Java_u4_at(aligned_offset(1 + 2*jintSize)); }
  63.197    int  dest_offset_at(int i) const;
  63.198    int  length()                                  { return high_key()-low_key()+1; }
  63.199  
  63.200 @@ -206,7 +214,6 @@
  63.201    bool is_invokedynamic() const                  { return adjusted_invoke_code() == Bytecodes::_invokedynamic; }
  63.202  
  63.203    bool has_receiver() const                      { return !is_invokestatic() && !is_invokedynamic(); }
  63.204 -  bool has_giant_index() const                   { return is_invokedynamic(); }
  63.205  
  63.206    bool is_valid() const                          { return is_invokeinterface() ||
  63.207                                                            is_invokevirtual()   ||
  63.208 @@ -252,26 +259,6 @@
  63.209  }
  63.210  
  63.211  
  63.212 -// Abstraction for {get,put}static
  63.213 -
  63.214 -class Bytecode_static: public Bytecode {
  63.215 - public:
  63.216 -  void verify() const;
  63.217 -
  63.218 -  // Returns the result type of the send by inspecting the field ref
  63.219 -  BasicType result_type(methodOop method) const;
  63.220 -
  63.221 -  // Creation
  63.222 -  inline friend Bytecode_static* Bytecode_static_at(const methodOop method, address bcp);
  63.223 -};
  63.224 -
  63.225 -inline Bytecode_static* Bytecode_static_at(const methodOop method, address bcp) {
  63.226 -  Bytecode_static* b = (Bytecode_static*)bcp;
  63.227 -  debug_only(b->verify());
  63.228 -  return b;
  63.229 -}
  63.230 -
  63.231 -
  63.232  // Abstraction for checkcast
  63.233  
  63.234  class Bytecode_checkcast: public Bytecode {
  63.235 @@ -279,7 +266,7 @@
  63.236    void verify() const { assert(Bytecodes::java_code(code()) == Bytecodes::_checkcast, "check checkcast"); }
  63.237  
  63.238    // Returns index
  63.239 -  long index() const   { return java_hwrd_at(1); };
  63.240 +  long index() const   { return get_index_u2(Bytecodes::_checkcast); };
  63.241  
  63.242    // Creation
  63.243    inline friend Bytecode_checkcast* Bytecode_checkcast_at(address bcp);
  63.244 @@ -299,7 +286,7 @@
  63.245    void verify() const { assert(code() == Bytecodes::_instanceof, "check instanceof"); }
  63.246  
  63.247    // Returns index
  63.248 -  long index() const   { return java_hwrd_at(1); };
  63.249 +  long index() const   { return get_index_u2(Bytecodes::_instanceof); };
  63.250  
  63.251    // Creation
  63.252    inline friend Bytecode_instanceof* Bytecode_instanceof_at(address bcp);
  63.253 @@ -317,7 +304,7 @@
  63.254    void verify() const { assert(java_code() == Bytecodes::_new, "check new"); }
  63.255  
  63.256    // Returns index
  63.257 -  long index() const   { return java_hwrd_at(1); };
  63.258 +  long index() const   { return get_index_u2(Bytecodes::_new); };
  63.259  
  63.260    // Creation
  63.261    inline friend Bytecode_new* Bytecode_new_at(address bcp);
  63.262 @@ -335,7 +322,7 @@
  63.263    void verify() const { assert(java_code() == Bytecodes::_multianewarray, "check new"); }
  63.264  
  63.265    // Returns index
  63.266 -  long index() const   { return java_hwrd_at(1); };
  63.267 +  long index() const   { return get_index_u2(Bytecodes::_multianewarray); };
  63.268  
  63.269    // Creation
  63.270    inline friend Bytecode_multianewarray* Bytecode_multianewarray_at(address bcp);
  63.271 @@ -353,7 +340,7 @@
  63.272    void verify() const { assert(java_code() == Bytecodes::_anewarray, "check anewarray"); }
  63.273  
  63.274    // Returns index
  63.275 -  long index() const   { return java_hwrd_at(1); };
  63.276 +  long index() const   { return get_index_u2(Bytecodes::_anewarray); };
  63.277  
  63.278    // Creation
  63.279    inline friend Bytecode_anewarray* Bytecode_anewarray_at(address bcp);
    64.1 --- a/src/share/vm/interpreter/bytecodeStream.cpp	Thu May 20 08:32:11 2010 -0700
    64.2 +++ b/src/share/vm/interpreter/bytecodeStream.cpp	Mon May 24 14:15:14 2010 -0700
    64.3 @@ -1,5 +1,5 @@
    64.4  /*
    64.5 - * Copyright 1997-2008 Sun Microsystems, Inc.  All Rights Reserved.
    64.6 + * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
    64.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    64.8   *
    64.9   * This code is free software; you can redistribute it and/or modify it
   64.10 @@ -48,6 +48,25 @@
   64.11        }
   64.12      }
   64.13    }
   64.14 -  _code = code;
   64.15 +  _raw_code = code;
   64.16    return code;
   64.17  }
   64.18 +
   64.19 +#ifdef ASSERT
   64.20 +void BaseBytecodeStream::assert_raw_index_size(int size) const {
   64.21 +  if (raw_code() == Bytecodes::_invokedynamic && is_raw()) {
   64.22 +    // in raw mode, pretend indy is "bJJ__"
   64.23 +    assert(size == 2, "raw invokedynamic instruction has 2-byte index only");
   64.24 +  } else {
   64.25 +    bytecode()->assert_index_size(size, raw_code(), is_wide());
   64.26 +  }
   64.27 +}
   64.28 +
   64.29 +void BaseBytecodeStream::assert_raw_stream(bool want_raw) const {
   64.30 +  if (want_raw) {
   64.31 +    assert( is_raw(), "this function only works on raw streams");
   64.32 +  } else {
   64.33 +    assert(!is_raw(), "this function only works on non-raw streams");
   64.34 +  }
   64.35 +}
   64.36 +#endif //ASSERT
    65.1 --- a/src/share/vm/interpreter/bytecodeStream.hpp	Thu May 20 08:32:11 2010 -0700
    65.2 +++ b/src/share/vm/interpreter/bytecodeStream.hpp	Mon May 24 14:15:14 2010 -0700
    65.3 @@ -1,5 +1,5 @@
    65.4  /*
    65.5 - * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
    65.6 + * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
    65.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    65.8   *
    65.9   * This code is free software; you can redistribute it and/or modify it
   65.10 @@ -32,13 +32,13 @@
   65.11  // while ((c = s.next()) >= 0) {
   65.12  //   ...
   65.13  // }
   65.14 -//
   65.15 +
   65.16  // A RawBytecodeStream is a simple version of BytecodeStream.
   65.17  // It is used ONLY when we know the bytecodes haven't been rewritten
   65.18 -// yet, such as in the rewriter or the verifier. Currently only the
   65.19 -// verifier uses this class.
   65.20 +// yet, such as in the rewriter or the verifier.
   65.21  
   65.22 -class RawBytecodeStream: StackObj {
   65.23 +// Here is the common base class for both RawBytecodeStream and BytecodeStream:
   65.24 +class BaseBytecodeStream: StackObj {
   65.25   protected:
   65.26    // stream buffer
   65.27    methodHandle    _method;                       // read from method directly
   65.28 @@ -49,15 +49,17 @@
   65.29    int             _end_bci;                      // bci after the current iteration interval
   65.30  
   65.31    // last bytecode read
   65.32 -  Bytecodes::Code _code;
   65.33 +  Bytecodes::Code _raw_code;
   65.34    bool            _is_wide;
   65.35 +  bool            _is_raw;                       // false in 'cooked' BytecodeStream
   65.36 +
   65.37 +  // Construction
   65.38 +  BaseBytecodeStream(methodHandle method) : _method(method) {
   65.39 +    set_interval(0, _method->code_size());
   65.40 +    _is_raw = false;
   65.41 +  }
   65.42  
   65.43   public:
   65.44 -  // Construction
   65.45 -  RawBytecodeStream(methodHandle method) : _method(method) {
   65.46 -    set_interval(0, _method->code_size());
   65.47 -  }
   65.48 -
   65.49    // Iteration control
   65.50    void set_interval(int beg_bci, int end_bci) {
   65.51      // iterate over the interval [beg_bci, end_bci)
   65.52 @@ -72,6 +74,46 @@
   65.53      set_interval(beg_bci, _method->code_size());
   65.54    }
   65.55  
   65.56 +  bool is_raw() const { return _is_raw; }
   65.57 +
   65.58 +  // Stream attributes
   65.59 +  methodHandle    method() const                 { return _method; }
   65.60 +
   65.61 +  int             bci() const                    { return _bci; }
   65.62 +  int             next_bci() const               { return _next_bci; }
   65.63 +  int             end_bci() const                { return _end_bci; }
   65.64 +
   65.65 +  Bytecodes::Code raw_code() const               { return _raw_code; }
   65.66 +  bool            is_wide() const                { return _is_wide; }
   65.67 +  int             instruction_size() const       { return (_next_bci - _bci); }
   65.68 +  bool            is_last_bytecode() const       { return _next_bci >= _end_bci; }
   65.69 +
   65.70 +  address         bcp() const                    { return method()->code_base() + _bci; }
   65.71 +  Bytecode*       bytecode() const               { return Bytecode_at(bcp()); }
   65.72 +
   65.73 +  // State changes
   65.74 +  void            set_next_bci(int bci)          { assert(0 <= bci && bci <= method()->code_size(), "illegal bci"); _next_bci = bci; }
   65.75 +
   65.76 +  // Bytecode-specific attributes
   65.77 +  int             dest() const                   { return bci() + bytecode()->get_offset_s2(raw_code()); }
   65.78 +  int             dest_w() const                 { return bci() + bytecode()->get_offset_s4(raw_code()); }
   65.79 +
   65.80 +  // One-byte indices.
   65.81 +  int             get_index_u1() const           { assert_raw_index_size(1); return *(jubyte*)(bcp()+1); }
   65.82 +
   65.83 + protected:
   65.84 +  void assert_raw_index_size(int size) const NOT_DEBUG_RETURN;
   65.85 +  void assert_raw_stream(bool want_raw) const NOT_DEBUG_RETURN;
   65.86 +};
   65.87 +
   65.88 +class RawBytecodeStream: public BaseBytecodeStream {
   65.89 + public:
   65.90 +  // Construction
   65.91 +  RawBytecodeStream(methodHandle method) : BaseBytecodeStream(method) {
   65.92 +    _is_raw = true;
   65.93 +  }
   65.94 +
   65.95 + public:
   65.96    // Iteration
   65.97    // Use raw_next() rather than next() for faster method reference
   65.98    Bytecodes::Code raw_next() {
   65.99 @@ -80,7 +122,7 @@
  65.100      _bci = _next_bci;
  65.101      assert(!is_last_bytecode(), "caller should check is_last_bytecode()");
  65.102  
  65.103 -    address bcp = RawBytecodeStream::bcp();
  65.104 +    address bcp = this->bcp();
  65.105      code        = Bytecodes::code_or_bp_at(bcp);
  65.106  
  65.107      // set next bytecode position
  65.108 @@ -90,84 +132,49 @@
  65.109               && code != Bytecodes::_lookupswitch, "can't be special bytecode");
  65.110        _is_wide = false;
  65.111        _next_bci += l;
  65.112 -      _code = code;
  65.113 +      _raw_code = code;
  65.114        return code;
  65.115 -    } else if (code == Bytecodes::_wide && _bci + 1 >= _end_bci) {
  65.116 -      return Bytecodes::_illegal;
  65.117      } else {
  65.118        return raw_next_special(code);
  65.119      }
  65.120    }
  65.121    Bytecodes::Code raw_next_special(Bytecodes::Code code);
  65.122  
  65.123 -  // Stream attributes
  65.124 -  methodHandle    method() const                 { return _method; }
  65.125 -
  65.126 -  int             bci() const                    { return _bci; }
  65.127 -  int             next_bci() const               { return _next_bci; }
  65.128 -  int             end_bci() const                { return _end_bci; }
  65.129 -
  65.130 -  Bytecodes::Code code() const                   { return _code; }
  65.131 -  bool            is_wide() const                { return _is_wide; }
  65.132 -  int             instruction_size() const       { return (_next_bci - _bci); }
  65.133 -  bool            is_last_bytecode() const       { return _next_bci >= _end_bci; }
  65.134 -
  65.135 -  address         bcp() const                    { return method()->code_base() + _bci; }
  65.136 -  address         next_bcp()                     { return method()->code_base() + _next_bci; }
  65.137 -
  65.138 -  // State changes
  65.139 -  void            set_next_bci(int bci)          { assert(0 <= bci && bci <= method()->code_size(), "illegal bci"); _next_bci = bci; }
  65.140 -
  65.141 -  // Bytecode-specific attributes
  65.142 -  int             dest() const                   { return bci() + (short)Bytes::get_Java_u2(bcp() + 1); }
  65.143 -  int             dest_w() const                 { return bci() + (int  )Bytes::get_Java_u4(bcp() + 1); }
  65.144 -
  65.145 -  // Unsigned indices, widening
  65.146 -  int             get_index() const              { assert_index_size(is_wide() ? 2 : 1);
  65.147 -                                                   return (is_wide()) ? Bytes::get_Java_u2(bcp() + 2) : bcp()[1]; }
  65.148 -  int             get_index_big() const          { assert_index_size(2);
  65.149 -                                                   return (int)Bytes::get_Java_u2(bcp() + 1);  }
  65.150 -  int             get_index_int() const          { return has_giant_index() ? get_index_giant() : get_index_big(); }
  65.151 -  int             get_index_giant() const        { assert_index_size(4); return Bytes::get_native_u4(bcp() + 1); }
  65.152 -  int             has_giant_index() const        { return (code() == Bytecodes::_invokedynamic); }
  65.153 +  // Unsigned indices, widening, with no swapping of bytes
  65.154 +  int             get_index() const          { return (is_wide()) ? get_index_u2_raw(bcp() + 2) : get_index_u1(); }
  65.155 +  // Get an unsigned 2-byte index, with no swapping of bytes.
  65.156 +  int             get_index_u2() const       { assert(!is_wide(), ""); return get_index_u2_raw(bcp() + 1);  }
  65.157  
  65.158   private:
  65.159 -  void assert_index_size(int required_size) const {
  65.160 -#ifdef ASSERT
  65.161 -    int isize = instruction_size() - (int)_is_wide - 1;
  65.162 -    if (isize == 2 && code() == Bytecodes::_iinc)
  65.163 -      isize = 1;
  65.164 -    else if (isize <= 2)
  65.165 -      ;                         // no change
  65.166 -    else if (has_giant_index())
  65.167 -      isize = 4;
  65.168 -    else
  65.169 -      isize = 2;
  65.170 -    assert(isize = required_size, "wrong index size");
  65.171 -#endif
  65.172 +  int get_index_u2_raw(address p) const {
  65.173 +    assert_raw_index_size(2); assert_raw_stream(true);
  65.174 +    return Bytes::get_Java_u2(p);
  65.175    }
  65.176  };
  65.177  
  65.178  // In BytecodeStream, non-java bytecodes will be translated into the
  65.179  // corresponding java bytecodes.
  65.180  
  65.181 -class BytecodeStream: public RawBytecodeStream {
  65.182 +class BytecodeStream: public BaseBytecodeStream {
  65.183 +  Bytecodes::Code _code;
  65.184 +
  65.185   public:
  65.186    // Construction
  65.187 -  BytecodeStream(methodHandle method) : RawBytecodeStream(method) { }
  65.188 +  BytecodeStream(methodHandle method) : BaseBytecodeStream(method) { }
  65.189  
  65.190    // Iteration
  65.191    Bytecodes::Code next() {
  65.192 -    Bytecodes::Code code;
  65.193 +    Bytecodes::Code raw_code, code;
  65.194      // set reading position
  65.195      _bci = _next_bci;
  65.196      if (is_last_bytecode()) {
  65.197        // indicate end of bytecode stream
  65.198 -      code = Bytecodes::_illegal;
  65.199 +      raw_code = code = Bytecodes::_illegal;
  65.200      } else {
  65.201        // get bytecode
  65.202 -      address bcp = BytecodeStream::bcp();
  65.203 -      code        = Bytecodes::java_code_at(bcp);
  65.204 +      address bcp = this->bcp();
  65.205 +      raw_code = Bytecodes::code_at(bcp);
  65.206 +      code = Bytecodes::java_code(raw_code);
  65.207        // set next bytecode position
  65.208        //
  65.209        // note that we cannot advance before having the
  65.210 @@ -181,14 +188,29 @@
  65.211        _is_wide      = false;
  65.212        // check for special (uncommon) cases
  65.213        if (code == Bytecodes::_wide) {
  65.214 -        code = (Bytecodes::Code)bcp[1];
  65.215 +        raw_code = (Bytecodes::Code)bcp[1];
  65.216 +        code = raw_code;  // wide BCs are always Java-normal
  65.217          _is_wide = true;
  65.218        }
  65.219        assert(Bytecodes::is_java_code(code), "sanity check");
  65.220      }
  65.221 +    _raw_code = raw_code;
  65.222      _code = code;
  65.223      return _code;
  65.224    }
  65.225  
  65.226    bool            is_active_breakpoint() const   { return Bytecodes::is_active_breakpoint_at(bcp()); }
  65.227 +  Bytecodes::Code code() const                   { return _code; }
  65.228 +
  65.229 +  // Unsigned indices, widening
  65.230 +  int             get_index() const              { return is_wide() ? bytecode()->get_index_u2(raw_code(), true) : get_index_u1(); }
  65.231 +  // Get an unsigned 2-byte index, swapping the bytes if necessary.
  65.232 +  int             get_index_u2() const           { assert_raw_stream(false);
  65.233 +                                                   return bytecode()->get_index_u2(raw_code(), false); }
  65.234 +  // Get an unsigned 2-byte index in native order.
  65.235 +  int             get_index_u2_cpcache() const   { assert_raw_stream(false);
  65.236 +                                                   return bytecode()->get_index_u2_cpcache(raw_code()); }
  65.237 +  int             get_index_u4() const           { assert_raw_stream(false);
  65.238 +                                                   return bytecode()->get_index_u4(raw_code()); }
  65.239 +  int             has_index_u4() const           { return bytecode()->get_index_u4(raw_code()); }
  65.240  };
    66.1 --- a/src/share/vm/interpreter/bytecodeTracer.cpp	Thu May 20 08:32:11 2010 -0700
    66.2 +++ b/src/share/vm/interpreter/bytecodeTracer.cpp	Mon May 24 14:15:14 2010 -0700
    66.3 @@ -1,5 +1,5 @@
    66.4  /*
    66.5 - * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
    66.6 + * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
    66.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    66.8   *
    66.9   * This code is free software; you can redistribute it and/or modify it
   66.10 @@ -39,6 +39,7 @@
   66.11    // (Also, ensure that occasional false positives are benign.)
   66.12    methodOop _current_method;
   66.13    bool      _is_wide;
   66.14 +  Bytecodes::Code _code;
   66.15    address   _next_pc;                // current decoding position
   66.16  
   66.17    void      align()                  { _next_pc = (address)round_to((intptr_t)_next_pc, sizeof(jint)); }
   66.18 @@ -46,23 +47,26 @@
   66.19    short     get_short()              { short i=Bytes::get_Java_u2(_next_pc); _next_pc+=2; return i; }
   66.20    int       get_int()                { int i=Bytes::get_Java_u4(_next_pc); _next_pc+=4; return i; }
   66.21  
   66.22 -  int       get_index()              { return *(address)_next_pc++; }
   66.23 -  int       get_big_index()          { int i=Bytes::get_Java_u2(_next_pc); _next_pc+=2; return i; }
   66.24 -  int       get_giant_index()        { int i=Bytes::get_native_u4(_next_pc); _next_pc+=4; return i; }
   66.25 -  int       get_index_special()      { return (is_wide()) ? get_big_index() : get_index(); }
   66.26 +  int       get_index_u1()           { return *(address)_next_pc++; }
   66.27 +  int       get_index_u2()           { int i=Bytes::get_Java_u2(_next_pc); _next_pc+=2; return i; }
   66.28 +  int       get_index_u2_cpcache()   { int i=Bytes::get_native_u2(_next_pc); _next_pc+=2; return i + constantPoolOopDesc::CPCACHE_INDEX_TAG; }
   66.29 +  int       get_index_u4()           { int i=Bytes::get_native_u4(_next_pc); _next_pc+=4; return i; }
   66.30 +  int       get_index_special()      { return (is_wide()) ? get_index_u2() : get_index_u1(); }
   66.31    methodOop method()                 { return _current_method; }
   66.32    bool      is_wide()                { return _is_wide; }
   66.33 +  Bytecodes::Code raw_code()         { return Bytecodes::Code(_code); }
   66.34  
   66.35  
   66.36 -  bool      check_index(int i, bool in_cp_cache, int& cp_index, outputStream* st = tty);
   66.37 +  bool      check_index(int i, int& cp_index, outputStream* st = tty);
   66.38    void      print_constant(int i, outputStream* st = tty);
   66.39    void      print_field_or_method(int i, outputStream* st = tty);
   66.40 -  void      print_attributes(Bytecodes::Code code, int bci, outputStream* st = tty);
   66.41 +  void      print_attributes(int bci, outputStream* st = tty);
   66.42    void      bytecode_epilog(int bci, outputStream* st = tty);
   66.43  
   66.44   public:
   66.45    BytecodePrinter() {
   66.46      _is_wide = false;
   66.47 +    _code = Bytecodes::_illegal;
   66.48    }
   66.49  
   66.50    // This method is called while executing the raw bytecodes, so none of
   66.51 @@ -89,7 +93,8 @@
   66.52      } else {
   66.53        code = Bytecodes::code_at(bcp);
   66.54      }
   66.55 -    int bci = bcp - method->code_base();
   66.56 +    _code = code;
   66.57 +     int bci = bcp - method->code_base();
   66.58      st->print("[%d] ", (int) Thread::current()->osthread()->thread_id());
   66.59      if (Verbose) {
   66.60        st->print("%8d  %4d  " INTPTR_FORMAT " " INTPTR_FORMAT " %s",
   66.61 @@ -99,10 +104,11 @@
   66.62             BytecodeCounter::counter_value(), bci, Bytecodes::name(code));
   66.63      }
   66.64      _next_pc = is_wide() ? bcp+2 : bcp+1;
   66.65 -    print_attributes(code, bci);
   66.66 +    print_attributes(bci);
   66.67      // Set is_wide for the next one, since the caller of this doesn't skip
   66.68      // the next bytecode.
   66.69      _is_wide = (code == Bytecodes::_wide);
   66.70 +    _code = Bytecodes::_illegal;
   66.71    }
   66.72  
   66.73    // Used for methodOop::print_codes().  The input bcp comes from
   66.74 @@ -116,6 +122,7 @@
   66.75      if (is_wide()) {
   66.76        code = Bytecodes::code_at(bcp+1);
   66.77      }
   66.78 +    _code = code;
   66.79      int bci = bcp - method->code_base();
   66.80      // Print bytecode index and name
   66.81      if (is_wide()) {
   66.82 @@ -124,7 +131,7 @@
   66.83        st->print("%d %s", bci, Bytecodes::name(code));
   66.84      }
   66.85      _next_pc = is_wide() ? bcp+2 : bcp+1;
   66.86 -    print_attributes(code, bci, st);
   66.87 +    print_attributes(bci, st);
   66.88      bytecode_epilog(bci, st);
   66.89    }
   66.90  };
   66.91 @@ -185,12 +192,13 @@
   66.92    }
   66.93  }
   66.94  
   66.95 -bool BytecodePrinter::check_index(int i, bool in_cp_cache, int& cp_index, outputStream* st) {
   66.96 +bool BytecodePrinter::check_index(int i, int& cp_index, outputStream* st) {
   66.97    constantPoolOop constants = method()->constants();
   66.98    int ilimit = constants->length(), climit = 0;
   66.99 +  Bytecodes::Code code = raw_code();
  66.100  
  66.101    constantPoolCacheOop cache = NULL;
  66.102 -  if (in_cp_cache) {
  66.103 +  if (Bytecodes::uses_cp_cache(code)) {
  66.104      cache = constants->cache();
  66.105      if (cache != NULL) {
  66.106        //climit = cache->length();  // %%% private!
  66.107 @@ -201,7 +209,7 @@
  66.108      }
  66.109    }
  66.110  
  66.111 -  if (in_cp_cache && constantPoolCacheOopDesc::is_secondary_index(i)) {
  66.112 +  if (cache != NULL && constantPoolCacheOopDesc::is_secondary_index(i)) {
  66.113      i = constantPoolCacheOopDesc::decode_secondary_index(i);
  66.114      st->print(" secondary cache[%d] of", i);
  66.115      if (i >= 0 && i < climit) {
  66.116 @@ -218,8 +226,6 @@
  66.117    }
  66.118  
  66.119    if (cache != NULL) {
  66.120 -    i = Bytes::swap_u2(i);
  66.121 -    if (WizardMode)  st->print(" (swap=%d)", i);
  66.122      goto check_cache_index;
  66.123    }
  66.124  
  66.125 @@ -234,6 +240,17 @@
  66.126    return false;
  66.127  
  66.128   check_cache_index:
  66.129 +#ifdef ASSERT
  66.130 +  {
  66.131 +    const int CPCACHE_INDEX_TAG = constantPoolOopDesc::CPCACHE_INDEX_TAG;
  66.132 +    if (i >= CPCACHE_INDEX_TAG && i < climit + CPCACHE_INDEX_TAG) {
  66.133 +      i -= CPCACHE_INDEX_TAG;
  66.134 +    } else {
  66.135 +      st->print_cr(" CP[%d] missing bias?", i);
  66.136 +      return false;
  66.137 +    }
  66.138 +  }
  66.139 +#endif //ASSERT
  66.140    if (i >= 0 && i < climit) {
  66.141      if (cache->entry_at(i)->is_secondary_entry()) {
  66.142        st->print_cr(" secondary entry?");
  66.143 @@ -248,7 +265,7 @@
  66.144  
  66.145  void BytecodePrinter::print_constant(int i, outputStream* st) {
  66.146    int orig_i = i;
  66.147 -  if (!check_index(orig_i, false, i, st))  return;
  66.148 +  if (!check_index(orig_i, i, st))  return;
  66.149  
  66.150    constantPoolOop constants = method()->constants();
  66.151    constantTag tag = constants->tag_at(i);
  66.152 @@ -279,7 +296,7 @@
  66.153  
  66.154  void BytecodePrinter::print_field_or_method(int i, outputStream* st) {
  66.155    int orig_i = i;
  66.156 -  if (!check_index(orig_i, true, i, st))  return;
  66.157 +  if (!check_index(orig_i, i, st))  return;
  66.158  
  66.159    constantPoolOop constants = method()->constants();
  66.160    constantTag tag = constants->tag_at(i);
  66.161 @@ -303,9 +320,9 @@
  66.162  }
  66.163  
  66.164  
  66.165 -void BytecodePrinter::print_attributes(Bytecodes::Code code, int bci, outputStream* st) {
  66.166 +void BytecodePrinter::print_attributes(int bci, outputStream* st) {
  66.167    // Show attributes of pre-rewritten codes
  66.168 -  code = Bytecodes::java_code(code);
  66.169 +  Bytecodes::Code code = Bytecodes::java_code(raw_code());
  66.170    // If the code doesn't have any fields there's nothing to print.
  66.171    // note this is ==1 because the tableswitch and lookupswitch are
  66.172    // zero size (for some reason) and we want to print stuff out for them.
  66.173 @@ -323,12 +340,12 @@
  66.174        st->print_cr(" " INT32_FORMAT, get_short());
  66.175        break;
  66.176      case Bytecodes::_ldc:
  66.177 -      print_constant(get_index(), st);
  66.178 +      print_constant(get_index_u1(), st);
  66.179        break;
  66.180  
  66.181      case Bytecodes::_ldc_w:
  66.182      case Bytecodes::_ldc2_w:
  66.183 -      print_constant(get_big_index(), st);
  66.184 +      print_constant(get_index_u2(), st);
  66.185        break;
  66.186  
  66.187      case Bytecodes::_iload:
  66.188 @@ -352,7 +369,7 @@
  66.189        break;
  66.190  
  66.191      case Bytecodes::_newarray: {
  66.192 -        BasicType atype = (BasicType)get_index();
  66.193 +        BasicType atype = (BasicType)get_index_u1();
  66.194          const char* str = type2name(atype);
  66.195          if (str == NULL || atype == T_OBJECT || atype == T_ARRAY) {
  66.196            assert(false, "Unidentified basic type");
  66.197 @@ -361,15 +378,15 @@
  66.198        }
  66.199        break;
  66.200      case Bytecodes::_anewarray: {
  66.201 -        int klass_index = get_big_index();
  66.202 +        int klass_index = get_index_u2();
  66.203          constantPoolOop constants = method()->constants();
  66.204          symbolOop name = constants->klass_name_at(klass_index);
  66.205          st->print_cr(" %s ", name->as_C_string());
  66.206        }
  66.207        break;
  66.208      case Bytecodes::_multianewarray: {
  66.209 -        int klass_index = get_big_index();
  66.210 -        int nof_dims = get_index();
  66.211 +        int klass_index = get_index_u2();
  66.212 +        int nof_dims = get_index_u1();
  66.213          constantPoolOop constants = method()->constants();
  66.214          symbolOop name = constants->klass_name_at(klass_index);
  66.215          st->print_cr(" %s %d", name->as_C_string(), nof_dims);
  66.216 @@ -451,31 +468,31 @@
  66.217      case Bytecodes::_getstatic:
  66.218      case Bytecodes::_putfield:
  66.219      case Bytecodes::_getfield:
  66.220 -      print_field_or_method(get_big_index(), st);
  66.221 +      print_field_or_method(get_index_u2_cpcache(), st);
  66.222        break;
  66.223  
  66.224      case Bytecodes::_invokevirtual:
  66.225      case Bytecodes::_invokespecial:
  66.226      case Bytecodes::_invokestatic:
  66.227 -      print_field_or_method(get_big_index(), st);
  66.228 +      print_field_or_method(get_index_u2_cpcache(), st);
  66.229        break;
  66.230  
  66.231      case Bytecodes::_invokeinterface:
  66.232 -      { int i = get_big_index();
  66.233 -        int n = get_index();
  66.234 -        get_index();            // ignore zero byte
  66.235 +      { int i = get_index_u2_cpcache();
  66.236 +        int n = get_index_u1();
  66.237 +        get_byte();            // ignore zero byte
  66.238          print_field_or_method(i, st);
  66.239        }
  66.240        break;
  66.241  
  66.242      case Bytecodes::_invokedynamic:
  66.243 -      print_field_or_method(get_giant_index(), st);
  66.244 +      print_field_or_method(get_index_u4(), st);
  66.245        break;
  66.246  
  66.247      case Bytecodes::_new:
  66.248      case Bytecodes::_checkcast:
  66.249      case Bytecodes::_instanceof:
  66.250 -      { int i = get_big_index();
  66.251 +      { int i = get_index_u2();
  66.252          constantPoolOop constants = method()->constants();
  66.253          symbolOop name = constants->klass_name_at(i);
  66.254          st->print_cr(" %d <%s>", i, name->as_C_string());
    67.1 --- a/src/share/vm/interpreter/bytecodes.cpp	Thu May 20 08:32:11 2010 -0700
    67.2 +++ b/src/share/vm/interpreter/bytecodes.cpp	Mon May 24 14:15:14 2010 -0700
    67.3 @@ -1,5 +1,5 @@
    67.4  /*
    67.5 - * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
    67.6 + * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
    67.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    67.8   *
    67.9   * This code is free software; you can redistribute it and/or modify it
   67.10 @@ -35,14 +35,11 @@
   67.11  
   67.12  bool            Bytecodes::_is_initialized = false;
   67.13  const char*     Bytecodes::_name          [Bytecodes::number_of_codes];
   67.14 -const char*     Bytecodes::_format        [Bytecodes::number_of_codes];
   67.15 -const char*     Bytecodes::_wide_format   [Bytecodes::number_of_codes];
   67.16  BasicType       Bytecodes::_result_type   [Bytecodes::number_of_codes];
   67.17  s_char          Bytecodes::_depth         [Bytecodes::number_of_codes];
   67.18 -u_char          Bytecodes::_length        [Bytecodes::number_of_codes];
   67.19 -bool            Bytecodes::_can_trap      [Bytecodes::number_of_codes];
   67.20 +u_char          Bytecodes::_lengths       [Bytecodes::number_of_codes];
   67.21  Bytecodes::Code Bytecodes::_java_code     [Bytecodes::number_of_codes];
   67.22 -bool            Bytecodes::_can_rewrite   [Bytecodes::number_of_codes];
   67.23 +u_short         Bytecodes::_flags         [(1<<BitsPerByte)*2];
   67.24  
   67.25  
   67.26  Bytecodes::Code Bytecodes::code_at(methodOop method, int bci) {
   67.27 @@ -122,15 +119,22 @@
   67.28  
   67.29  void Bytecodes::def(Code code, const char* name, const char* format, const char* wide_format, BasicType result_type, int depth, bool can_trap, Code java_code) {
   67.30    assert(wide_format == NULL || format != NULL, "short form must exist if there's a wide form");
   67.31 +  int len  = (format      != NULL ? (int) strlen(format)      : 0);
   67.32 +  int wlen = (wide_format != NULL ? (int) strlen(wide_format) : 0);
   67.33    _name          [code] = name;
   67.34 -  _format        [code] = format;
   67.35 -  _wide_format   [code] = wide_format;
   67.36    _result_type   [code] = result_type;
   67.37    _depth         [code] = depth;
   67.38 -  _can_trap      [code] = can_trap;
   67.39 -  _length        [code] = format != NULL ? (u_char)strlen(format) : 0;
   67.40 +  _lengths       [code] = (wlen << 4) | (len & 0xF);
   67.41    _java_code     [code] = java_code;
   67.42 -  if (java_code != code)  _can_rewrite[java_code] = true;
   67.43 +  int bc_flags = 0;
   67.44 +  if (can_trap)           bc_flags |= _bc_can_trap;
   67.45 +  if (java_code != code)  bc_flags |= _bc_can_rewrite;
   67.46 +  _flags[(u1)code+0*(1<<BitsPerByte)] = compute_flags(format,      bc_flags);
   67.47 +  _flags[(u1)code+1*(1<<BitsPerByte)] = compute_flags(wide_format, bc_flags);
   67.48 +  assert(is_defined(code)      == (format != NULL),      "");
   67.49 +  assert(wide_is_defined(code) == (wide_format != NULL), "");
   67.50 +  assert(length_for(code)      == len, "");
   67.51 +  assert(wide_length_for(code) == wlen, "");
   67.52  }
   67.53  
   67.54  
   67.55 @@ -138,23 +142,92 @@
   67.56  //
   67.57  // b: bytecode
   67.58  // c: signed constant, Java byte-ordering
   67.59 -// i: unsigned index , Java byte-ordering
   67.60 -// j: unsigned index , native byte-ordering
   67.61 -// o: branch offset  , Java byte-ordering
   67.62 +// i: unsigned local index, Java byte-ordering (I = native byte ordering)
   67.63 +// j: unsigned CP cache index, Java byte-ordering (J = native byte ordering)
   67.64 +// k: unsigned CP index, Java byte-ordering
   67.65 +// o: branch offset, Java byte-ordering
   67.66  // _: unused/ignored
   67.67  // w: wide bytecode
   67.68  //
   67.69 -// Note: Right now the format strings are used for 2 purposes:
   67.70 +// Note: The format strings are used for 2 purposes:
   67.71  //       1. to specify the length of the bytecode
   67.72  //          (= number of characters in format string)
   67.73 -//       2. to specify the bytecode attributes
   67.74 -//
   67.75 -//       The bytecode attributes are currently used only for bytecode tracing
   67.76 -//       (see BytecodeTracer); thus if more specific format information is
   67.77 -//       used, one would also have to adjust the bytecode tracer.
   67.78 +//       2. to derive bytecode format flags (_fmt_has_k, etc.)
   67.79  //
   67.80  // Note: For bytecodes with variable length, the format string is the empty string.
   67.81  
   67.82 +int Bytecodes::compute_flags(const char* format, int more_flags) {
   67.83 +  if (format == NULL)  return 0;  // not even more_flags
   67.84 +  int flags = more_flags;
   67.85 +  const char* fp = format;
   67.86 +  switch (*fp) {
   67.87 +  case '\0':
   67.88 +    flags |= _fmt_not_simple; // but variable
   67.89 +    break;
   67.90 +  case 'b':
   67.91 +    flags |= _fmt_not_variable;  // but simple
   67.92 +    ++fp;  // skip 'b'
   67.93 +    break;
   67.94 +  case 'w':
   67.95 +    flags |= _fmt_not_variable | _fmt_not_simple;
   67.96 +    ++fp;  // skip 'w'
   67.97 +    guarantee(*fp == 'b', "wide format must start with 'wb'");
   67.98 +    ++fp;  // skip 'b'
   67.99 +    break;
  67.100 +  }
  67.101 +
  67.102 +  int has_nbo = 0, has_jbo = 0, has_size = 0;
  67.103 +  for (;;) {
  67.104 +    int this_flag = 0;
  67.105 +    char fc = *fp++;
  67.106 +    switch (fc) {
  67.107 +    case '\0':  // end of string
  67.108 +      assert(flags == (jchar)flags, "change _format_flags");
  67.109 +      return flags;
  67.110 +
  67.111 +    case '_': continue;         // ignore these
  67.112 +
  67.113 +    case 'j': this_flag = _fmt_has_j; has_jbo = 1; break;
  67.114 +    case 'k': this_flag = _fmt_has_k; has_jbo = 1; break;
  67.115 +    case 'i': this_flag = _fmt_has_i; has_jbo = 1; break;
  67.116 +    case 'c': this_flag = _fmt_has_c; has_jbo = 1; break;
  67.117 +    case 'o': this_flag = _fmt_has_o; has_jbo = 1; break;
  67.118 +
  67.119 +    // uppercase versions mark native byte order (from Rewriter)
  67.120 +    // actually, only the 'J' case happens currently
  67.121 +    case 'J': this_flag = _fmt_has_j; has_nbo = 1; break;
  67.122 +    case 'K': this_flag = _fmt_has_k; has_nbo = 1; break;
  67.123 +    case 'I': this_flag = _fmt_has_i; has_nbo = 1; break;
  67.124 +    case 'C': this_flag = _fmt_has_c; has_nbo = 1; break;
  67.125 +    case 'O': this_flag = _fmt_has_o; has_nbo = 1; break;
  67.126 +    default:  guarantee(false, "bad char in format");
  67.127 +    }
  67.128 +
  67.129 +    flags |= this_flag;
  67.130 +
  67.131 +    guarantee(!(has_jbo && has_nbo), "mixed byte orders in format");
  67.132 +    if (has_nbo)
  67.133 +      flags |= _fmt_has_nbo;
  67.134 +
  67.135 +    int this_size = 1;
  67.136 +    if (*fp == fc) {
  67.137 +      // advance beyond run of the same characters
  67.138 +      this_size = 2;
  67.139 +      while (*++fp == fc)  this_size++;
  67.140 +      switch (this_size) {
  67.141 +      case 2: flags |= _fmt_has_u2; break;
  67.142 +      case 4: flags |= _fmt_has_u4; break;
  67.143 +      default: guarantee(false, "bad rep count in format");
  67.144 +      }
  67.145 +    }
  67.146 +    guarantee(has_size == 0 ||                     // no field yet
  67.147 +              this_size == has_size ||             // same size
  67.148 +              this_size < has_size && *fp == '\0', // last field can be short
  67.149 +              "mixed field sizes in format");
  67.150 +    has_size = this_size;
  67.151 +  }
  67.152 +}
  67.153 +
  67.154  void Bytecodes::initialize() {
  67.155    if (_is_initialized) return;
  67.156    assert(number_of_codes <= 256, "too many bytecodes");
  67.157 @@ -189,9 +262,9 @@
  67.158    def(_dconst_1            , "dconst_1"            , "b"    , NULL    , T_DOUBLE ,  2, false);
  67.159    def(_bipush              , "bipush"              , "bc"   , NULL    , T_INT    ,  1, false);
  67.160    def(_sipush              , "sipush"              , "bcc"  , NULL    , T_INT    ,  1, false);
  67.161 -  def(_ldc                 , "ldc"                 , "bi"   , NULL    , T_ILLEGAL,  1, true );
  67.162 -  def(_ldc_w               , "ldc_w"               , "bii"  , NULL    , T_ILLEGAL,  1, true );
  67.163 -  def(_ldc2_w              , "ldc2_w"              , "bii"  , NULL    , T_ILLEGAL,  2, true );
  67.164 +  def(_ldc                 , "ldc"                 , "bk"   , NULL    , T_ILLEGAL,  1, true );
  67.165 +  def(_ldc_w               , "ldc_w"               , "bkk"  , NULL    , T_ILLEGAL,  1, true );
  67.166 +  def(_ldc2_w              , "ldc2_w"              , "bkk"  , NULL    , T_ILLEGAL,  2, true );
  67.167    def(_iload               , "iload"               , "bi"   , "wbii"  , T_INT    ,  1, false);
  67.168    def(_lload               , "lload"               , "bi"   , "wbii"  , T_LONG   ,  2, false);
  67.169    def(_fload               , "fload"               , "bi"   , "wbii"  , T_FLOAT  ,  1, false);
  67.170 @@ -349,26 +422,26 @@
  67.171    def(_dreturn             , "dreturn"             , "b"    , NULL    , T_DOUBLE , -2, true);
  67.172    def(_areturn             , "areturn"             , "b"    , NULL    , T_OBJECT , -1, true);
  67.173    def(_return              , "return"              , "b"    , NULL    , T_VOID   ,  0, true);
  67.174 -  def(_getstatic           , "getstatic"           , "bjj"  , NULL    , T_ILLEGAL,  1, true );
  67.175 -  def(_putstatic           , "putstatic"           , "bjj"  , NULL    , T_ILLEGAL, -1, true );
  67.176 -  def(_getfield            , "getfield"            , "bjj"  , NULL    , T_ILLEGAL,  0, true );
  67.177 -  def(_putfield            , "putfield"            , "bjj"  , NULL    , T_ILLEGAL, -2, true );
  67.178 -  def(_invokevirtual       , "invokevirtual"       , "bjj"  , NULL    , T_ILLEGAL, -1, true);
  67.179 -  def(_invokespecial       , "invokespecial"       , "bjj"  , NULL    , T_ILLEGAL, -1, true);
  67.180 -  def(_invokestatic        , "invokestatic"        , "bjj"  , NULL    , T_ILLEGAL,  0, true);
  67.181 -  def(_invokeinterface     , "invokeinterface"     , "bjj__", NULL    , T_ILLEGAL, -1, true);
  67.182 -  def(_invokedynamic       , "invokedynamic"       , "bjjjj", NULL    , T_ILLEGAL,  0, true );
  67.183 -  def(_new                 , "new"                 , "bii"  , NULL    , T_OBJECT ,  1, true );
  67.184 +  def(_getstatic           , "getstatic"           , "bJJ"  , NULL    , T_ILLEGAL,  1, true );
  67.185 +  def(_putstatic           , "putstatic"           , "bJJ"  , NULL    , T_ILLEGAL, -1, true );
  67.186 +  def(_getfield            , "getfield"            , "bJJ"  , NULL    , T_ILLEGAL,  0, true );
  67.187 +  def(_putfield            , "putfield"            , "bJJ"  , NULL    , T_ILLEGAL, -2, true );
  67.188 +  def(_invokevirtual       , "invokevirtual"       , "bJJ"  , NULL    , T_ILLEGAL, -1, true);
  67.189 +  def(_invokespecial       , "invokespecial"       , "bJJ"  , NULL    , T_ILLEGAL, -1, true);
  67.190 +  def(_invokestatic        , "invokestatic"        , "bJJ"  , NULL    , T_ILLEGAL,  0, true);
  67.191 +  def(_invokeinterface     , "invokeinterface"     , "bJJ__", NULL    , T_ILLEGAL, -1, true);
  67.192 +  def(_invokedynamic       , "invokedynamic"       , "bJJJJ", NULL    , T_ILLEGAL,  0, true );
  67.193 +  def(_new                 , "new"                 , "bkk"  , NULL    , T_OBJECT ,  1, true );
  67.194    def(_newarray            , "newarray"            , "bc"   , NULL    , T_OBJECT ,  0, true );
  67.195 -  def(_anewarray           , "anewarray"           , "bii"  , NULL    , T_OBJECT ,  0, true );
  67.196 +  def(_anewarray           , "anewarray"           , "bkk"  , NULL    , T_OBJECT ,  0, true );
  67.197    def(_arraylength         , "arraylength"         , "b"    , NULL    , T_VOID   ,  0, true );
  67.198    def(_athrow              , "athrow"              , "b"    , NULL    , T_VOID   , -1, true );
  67.199 -  def(_checkcast           , "checkcast"           , "bii"  , NULL    , T_OBJECT ,  0, true );
  67.200 -  def(_instanceof          , "instanceof"          , "bii"  , NULL    , T_INT    ,  0, true );
  67.201 +  def(_checkcast           , "checkcast"           , "bkk"  , NULL    , T_OBJECT ,  0, true );
  67.202 +  def(_instanceof          , "instanceof"          , "bkk"  , NULL    , T_INT    ,  0, true );
  67.203    def(_monitorenter        , "monitorenter"        , "b"    , NULL    , T_VOID   , -1, true );
  67.204    def(_monitorexit         , "monitorexit"         , "b"    , NULL    , T_VOID   , -1, true );
  67.205    def(_wide                , "wide"                , ""     , NULL    , T_VOID   ,  0, false);
  67.206 -  def(_multianewarray      , "multianewarray"      , "biic" , NULL    , T_OBJECT ,  1, true );
  67.207 +  def(_multianewarray      , "multianewarray"      , "bkkc" , NULL    , T_OBJECT ,  1, true );
  67.208    def(_ifnull              , "ifnull"              , "boo"  , NULL    , T_VOID   , -1, false);
  67.209    def(_ifnonnull           , "ifnonnull"           , "boo"  , NULL    , T_VOID   , -1, false);
  67.210    def(_goto_w              , "goto_w"              , "boooo", NULL    , T_VOID   ,  0, false);
  67.211 @@ -378,35 +451,35 @@
  67.212    //  JVM bytecodes
  67.213    //  bytecode               bytecode name           format   wide f.   result tp  stk traps  std code
  67.214  
  67.215 -  def(_fast_agetfield      , "fast_agetfield"      , "bjj"  , NULL    , T_OBJECT ,  0, true , _getfield       );
  67.216 -  def(_fast_bgetfield      , "fast_bgetfield"      , "bjj"  , NULL    , T_INT    ,  0, true , _getfield       );
  67.217 -  def(_fast_cgetfield      , "fast_cgetfield"      , "bjj"  , NULL    , T_CHAR   ,  0, true , _getfield       );
  67.218 -  def(_fast_dgetfield      , "fast_dgetfield"      , "bjj"  , NULL    , T_DOUBLE ,  0, true , _getfield       );
  67.219 -  def(_fast_fgetfield      , "fast_fgetfield"      , "bjj"  , NULL    , T_FLOAT  ,  0, true , _getfield       );
  67.220 -  def(_fast_igetfield      , "fast_igetfield"      , "bjj"  , NULL    , T_INT    ,  0, true , _getfield       );
  67.221 -  def(_fast_lgetfield      , "fast_lgetfield"      , "bjj"  , NULL    , T_LONG   ,  0, true , _getfield       );
  67.222 -  def(_fast_sgetfield      , "fast_sgetfield"      , "bjj"  , NULL    , T_SHORT  ,  0, true , _getfield       );
  67.223 +  def(_fast_agetfield      , "fast_agetfield"      , "bJJ"  , NULL    , T_OBJECT ,  0, true , _getfield       );
  67.224 +  def(_fast_bgetfield      , "fast_bgetfield"      , "bJJ"  , NULL    , T_INT    ,  0, true , _getfield       );
  67.225 +  def(_fast_cgetfield      , "fast_cgetfield"      , "bJJ"  , NULL    , T_CHAR   ,  0, true , _getfield       );
  67.226 +  def(_fast_dgetfield      , "fast_dgetfield"      , "bJJ"  , NULL    , T_DOUBLE ,  0, true , _getfield       );
  67.227 +  def(_fast_fgetfield      , "fast_fgetfield"      , "bJJ"  , NULL    , T_FLOAT  ,  0, true , _getfield       );
  67.228 +  def(_fast_igetfield      , "fast_igetfield"      , "bJJ"  , NULL    , T_INT    ,  0, true , _getfield       );
  67.229 +  def(_fast_lgetfield      , "fast_lgetfield"      , "bJJ"  , NULL    , T_LONG   ,  0, true , _getfield       );
  67.230 +  def(_fast_sgetfield      , "fast_sgetfield"      , "bJJ"  , NULL    , T_SHORT  ,  0, true , _getfield       );
  67.231  
  67.232 -  def(_fast_aputfield      , "fast_aputfield"      , "bjj"  , NULL    , T_OBJECT ,  0, true , _putfield       );
  67.233 -  def(_fast_bputfield      , "fast_bputfield"      , "bjj"  , NULL    , T_INT    ,  0, true , _putfield       );
  67.234 -  def(_fast_cputfield      , "fast_cputfield"      , "bjj"  , NULL    , T_CHAR   ,  0, true , _putfield       );
  67.235 -  def(_fast_dputfield      , "fast_dputfield"      , "bjj"  , NULL    , T_DOUBLE ,  0, true , _putfield       );
  67.236 -  def(_fast_fputfield      , "fast_fputfield"      , "bjj"  , NULL    , T_FLOAT  ,  0, true , _putfield       );
  67.237 -  def(_fast_iputfield      , "fast_iputfield"      , "bjj"  , NULL    , T_INT    ,  0, true , _putfield       );
  67.238 -  def(_fast_lputfield      , "fast_lputfield"      , "bjj"  , NULL    , T_LONG   ,  0, true , _putfield       );
  67.239 -  def(_fast_sputfield      , "fast_sputfield"      , "bjj"  , NULL    , T_SHORT  ,  0, true , _putfield       );
  67.240 +  def(_fast_aputfield      , "fast_aputfield"      , "bJJ"  , NULL    , T_OBJECT ,  0, true , _putfield       );
  67.241 +  def(_fast_bputfield      , "fast_bputfield"      , "bJJ"  , NULL    , T_INT    ,  0, true , _putfield       );
  67.242 +  def(_fast_cputfield      , "fast_cputfield"      , "bJJ"  , NULL    , T_CHAR   ,  0, true , _putfield       );
  67.243 +  def(_fast_dputfield      , "fast_dputfield"      , "bJJ"  , NULL    , T_DOUBLE ,  0, true , _putfield       );
  67.244 +  def(_fast_fputfield      , "fast_fputfield"      , "bJJ"  , NULL    , T_FLOAT  ,  0, true , _putfield       );
  67.245 +  def(_fast_iputfield      , "fast_iputfield"      , "bJJ"  , NULL    , T_INT    ,  0, true , _putfield       );
  67.246 +  def(_fast_lputfield      , "fast_lputfield"      , "bJJ"  , NULL    , T_LONG   ,  0, true , _putfield       );
  67.247 +  def(_fast_sputfield      , "fast_sputfield"      , "bJJ"  , NULL    , T_SHORT  ,  0, true , _putfield       );
  67.248  
  67.249    def(_fast_aload_0        , "fast_aload_0"        , "b"    , NULL    , T_OBJECT ,  1, true , _aload_0        );
  67.250 -  def(_fast_iaccess_0      , "fast_iaccess_0"      , "b_jj" , NULL    , T_INT    ,  1, true , _aload_0        );
  67.251 -  def(_fast_aaccess_0      , "fast_aaccess_0"      , "b_jj" , NULL    , T_OBJECT ,  1, true , _aload_0        );
  67.252 -  def(_fast_faccess_0      , "fast_faccess_0"      , "b_jj" , NULL    , T_OBJECT ,  1, true , _aload_0        );
  67.253 +  def(_fast_iaccess_0      , "fast_iaccess_0"      , "b_JJ" , NULL    , T_INT    ,  1, true , _aload_0        );
  67.254 +  def(_fast_aaccess_0      , "fast_aaccess_0"      , "b_JJ" , NULL    , T_OBJECT ,  1, true , _aload_0        );
  67.255 +  def(_fast_faccess_0      , "fast_faccess_0"      , "b_JJ" , NULL    , T_OBJECT ,  1, true , _aload_0        );
  67.256  
  67.257    def(_fast_iload          , "fast_iload"          , "bi"   , NULL    , T_INT    ,  1, false, _iload);
  67.258    def(_fast_iload2         , "fast_iload2"         , "bi_i" , NULL    , T_INT    ,  2, false, _iload);
  67.259    def(_fast_icaload        , "fast_icaload"        , "bi_"  , NULL    , T_INT    ,  0, false, _iload);
  67.260  
  67.261    // Faster method invocation.
  67.262 -  def(_fast_invokevfinal   , "fast_invokevfinal"   , "bjj"  , NULL    , T_ILLEGAL, -1, true, _invokevirtual   );
  67.263 +  def(_fast_invokevfinal   , "fast_invokevfinal"   , "bJJ"  , NULL    , T_ILLEGAL, -1, true, _invokevirtual   );
  67.264  
  67.265    def(_fast_linearswitch   , "fast_linearswitch"   , ""     , NULL    , T_VOID   , -1, false, _lookupswitch   );
  67.266    def(_fast_binaryswitch   , "fast_binaryswitch"   , ""     , NULL    , T_VOID   , -1, false, _lookupswitch   );
    68.1 --- a/src/share/vm/interpreter/bytecodes.hpp	Thu May 20 08:32:11 2010 -0700
    68.2 +++ b/src/share/vm/interpreter/bytecodes.hpp	Mon May 24 14:15:14 2010 -0700
    68.3 @@ -1,5 +1,5 @@
    68.4  /*
    68.5 - * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
    68.6 + * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
    68.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    68.8   *
    68.9   * This code is free software; you can redistribute it and/or modify it
   68.10 @@ -280,17 +280,43 @@
   68.11      number_of_codes
   68.12    };
   68.13  
   68.14 +  // Flag bits derived from format strings, can_trap, can_rewrite, etc.:
   68.15 +  enum Flags {
   68.16 +    // semantic flags:
   68.17 +    _bc_can_trap      = 1<<0,     // bytecode execution can trap or block
   68.18 +    _bc_can_rewrite   = 1<<1,     // bytecode execution has an alternate form
   68.19 +
   68.20 +    // format bits (determined only by the format string):
   68.21 +    _fmt_has_c        = 1<<2,     // constant, such as sipush "bcc"
   68.22 +    _fmt_has_j        = 1<<3,     // constant pool cache index, such as getfield "bjj"
   68.23 +    _fmt_has_k        = 1<<4,     // constant pool index, such as ldc "bk"
   68.24 +    _fmt_has_i        = 1<<5,     // local index, such as iload
   68.25 +    _fmt_has_o        = 1<<6,     // offset, such as ifeq
   68.26 +    _fmt_has_nbo      = 1<<7,     // contains native-order field(s)
   68.27 +    _fmt_has_u2       = 1<<8,     // contains double-byte field(s)
   68.28 +    _fmt_has_u4       = 1<<9,     // contains quad-byte field
   68.29 +    _fmt_not_variable = 1<<10,    // not of variable length (simple or wide)
   68.30 +    _fmt_not_simple   = 1<<11,    // either wide or variable length
   68.31 +    _all_fmt_bits     = (_fmt_not_simple*2 - _fmt_has_c),
   68.32 +
   68.33 +    // Example derived format syndromes:
   68.34 +    _fmt_b      = _fmt_not_variable,
   68.35 +    _fmt_bc     = _fmt_b | _fmt_has_c,
   68.36 +    _fmt_bi     = _fmt_b | _fmt_has_i,
   68.37 +    _fmt_bkk    = _fmt_b | _fmt_has_k | _fmt_has_u2,
   68.38 +    _fmt_bJJ    = _fmt_b | _fmt_has_j | _fmt_has_u2 | _fmt_has_nbo,
   68.39 +    _fmt_bo2    = _fmt_b | _fmt_has_o | _fmt_has_u2,
   68.40 +    _fmt_bo4    = _fmt_b | _fmt_has_o | _fmt_has_u4
   68.41 +  };
   68.42 +
   68.43   private:
   68.44    static bool        _is_initialized;
   68.45    static const char* _name          [number_of_codes];
   68.46 -  static const char* _format        [number_of_codes];
   68.47 -  static const char* _wide_format   [number_of_codes];
   68.48    static BasicType   _result_type   [number_of_codes];
   68.49    static s_char      _depth         [number_of_codes];
   68.50 -  static u_char      _length        [number_of_codes];
   68.51 -  static bool        _can_trap      [number_of_codes];
   68.52 +  static u_char      _lengths       [number_of_codes];
   68.53    static Code        _java_code     [number_of_codes];
   68.54 -  static bool        _can_rewrite   [number_of_codes];
   68.55 +  static jchar       _flags         [(1<<BitsPerByte)*2]; // all second page for wide formats
   68.56  
   68.57    static void        def(Code code, const char* name, const char* format, const char* wide_format, BasicType result_type, int depth, bool can_trap);
   68.58    static void        def(Code code, const char* name, const char* format, const char* wide_format, BasicType result_type, int depth, bool can_trap, Code java_code);
   68.59 @@ -322,24 +348,18 @@
   68.60     static Code       non_breakpoint_code_at(address bcp, methodOop method = NULL);
   68.61  
   68.62    // Bytecode attributes
   68.63 -  static bool        is_defined     (int  code)    { return 0 <= code && code < number_of_codes && _format[code] != NULL; }
   68.64 -  static bool        wide_is_defined(int  code)    { return is_defined(code) && _wide_format[code] != NULL; }
   68.65 +  static bool        is_defined     (int  code)    { return 0 <= code && code < number_of_codes && flags(code, false) != 0; }
   68.66 +  static bool        wide_is_defined(int  code)    { return is_defined(code) && flags(code, true) != 0; }
   68.67    static const char* name           (Code code)    { check(code);      return _name          [code]; }
   68.68 -  static const char* format         (Code code)    { check(code);      return _format        [code]; }
   68.69 -  static const char* wide_format    (Code code)    { return _wide_format[code]; }
   68.70    static BasicType   result_type    (Code code)    { check(code);      return _result_type   [code]; }
   68.71    static int         depth          (Code code)    { check(code);      return _depth         [code]; }
   68.72 -  static int         length_for     (Code code)    { return _length[code]; }
   68.73 -  static bool        can_trap       (Code code)    { check(code);      return _can_trap      [code]; }
   68.74 +  static int         length_for     (Code code)    { check(code);      return _lengths       [code] & 0xF; }
   68.75 +  static int         wide_length_for(Code code)    { check(code);      return _lengths       [code] >> 4; }
   68.76 +  static bool        can_trap       (Code code)    { check(code);      return has_all_flags(code, _bc_can_trap, false); }
   68.77    static Code        java_code      (Code code)    { check(code);      return _java_code     [code]; }
   68.78 -  static bool        can_rewrite    (Code code)    { check(code);      return _can_rewrite   [code]; }
   68.79 -  static int         wide_length_for(Code code)    {
   68.80 -    if (!is_defined(code)) {
   68.81 -      return 0;
   68.82 -    }
   68.83 -    const char* wf = wide_format(code);
   68.84 -    return (wf == NULL) ? 0 : (int)strlen(wf);
   68.85 -  }
   68.86 +  static bool        can_rewrite    (Code code)    { check(code);      return has_all_flags(code, _bc_can_rewrite, false); }
   68.87 +  static bool        native_byte_order(Code code)  { check(code);      return has_all_flags(code, _fmt_has_nbo, false); }
   68.88 +  static bool        uses_cp_cache  (Code code)    { check(code);      return has_all_flags(code, _fmt_has_j, false); }
   68.89    // if 'end' is provided, it indicates the end of the code buffer which
   68.90    // should not be read past when parsing.
   68.91    static int         special_length_at(address bcp, address end = NULL);
   68.92 @@ -355,6 +375,16 @@
   68.93  
   68.94    static bool        is_zero_const  (Code code)    { return (code == _aconst_null || code == _iconst_0
   68.95                                                             || code == _fconst_0 || code == _dconst_0); }
   68.96 +  static int         compute_flags  (const char* format, int more_flags = 0);  // compute the flags
   68.97 +  static int         flags          (int code, bool is_wide) {
   68.98 +    assert(code == (u_char)code, "must be a byte");
   68.99 +    return _flags[code + (is_wide ? (1<<BitsPerByte) : 0)];
  68.100 +  }
  68.101 +  static int         format_bits    (Code code, bool is_wide) { return flags(code, is_wide) & _all_fmt_bits; }
  68.102 +  static bool        has_all_flags  (Code code, int test_flags, bool is_wide) {
  68.103 +    return (flags(code, is_wide) & test_flags) == test_flags;
  68.104 +  }
  68.105 +
  68.106    // Initialization
  68.107    static void        initialize     ();
  68.108  };
    69.1 --- a/src/share/vm/interpreter/interpreter.cpp	Thu May 20 08:32:11 2010 -0700
    69.2 +++ b/src/share/vm/interpreter/interpreter.cpp	Mon May 24 14:15:14 2010 -0700
    69.3 @@ -1,5 +1,5 @@
    69.4  /*
    69.5 - * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
    69.6 + * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
    69.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    69.8   *
    69.9   * This code is free software; you can redistribute it and/or modify it
   69.10 @@ -226,8 +226,9 @@
   69.11  // not yet been executed (in Java semantics, not in actual operation).
   69.12  bool AbstractInterpreter::is_not_reached(methodHandle method, int bci) {
   69.13    address bcp = method->bcp_from(bci);
   69.14 +  Bytecodes::Code code = Bytecodes::code_at(bcp, method());
   69.15  
   69.16 -  if (!Bytecode_at(bcp)->must_rewrite()) {
   69.17 +  if (!Bytecode_at(bcp)->must_rewrite(code)) {
   69.18      // might have been reached
   69.19      return false;
   69.20    }
    70.1 --- a/src/share/vm/interpreter/interpreterRuntime.cpp	Thu May 20 08:32:11 2010 -0700
    70.2 +++ b/src/share/vm/interpreter/interpreterRuntime.cpp	Mon May 24 14:15:14 2010 -0700
    70.3 @@ -63,7 +63,7 @@
    70.4  IRT_ENTRY(void, InterpreterRuntime::ldc(JavaThread* thread, bool wide))
    70.5    // access constant pool
    70.6    constantPoolOop pool = method(thread)->constants();
    70.7 -  int index = wide ? two_byte_index(thread) : one_byte_index(thread);
    70.8 +  int index = wide ? get_index_u2(thread, Bytecodes::_ldc_w) : get_index_u1(thread, Bytecodes::_ldc);
    70.9    constantTag tag = pool->tag_at(index);
   70.10  
   70.11    if (tag.is_unresolved_klass() || tag.is_klass()) {
   70.12 @@ -135,7 +135,7 @@
   70.13  IRT_ENTRY(void, InterpreterRuntime::multianewarray(JavaThread* thread, jint* first_size_address))
   70.14    // We may want to pass in more arguments - could make this slightly faster
   70.15    constantPoolOop constants = method(thread)->constants();
   70.16 -  int          i = two_byte_index(thread);
   70.17 +  int          i = get_index_u2(thread, Bytecodes::_multianewarray);
   70.18    klassOop klass = constants->klass_at(i, CHECK);
   70.19    int   nof_dims = number_of_dimensions(thread);
   70.20    assert(oop(klass)->is_klass(), "not a class");
   70.21 @@ -169,7 +169,7 @@
   70.22  // Quicken instance-of and check-cast bytecodes
   70.23  IRT_ENTRY(void, InterpreterRuntime::quicken_io_cc(JavaThread* thread))
   70.24    // Force resolving; quicken the bytecode
   70.25 -  int which = two_byte_index(thread);
   70.26 +  int which = get_index_u2(thread, Bytecodes::_checkcast);
   70.27    constantPoolOop cpool = method(thread)->constants();
   70.28    // We'd expect to assert that we're only here to quicken bytecodes, but in a multithreaded
   70.29    // program we might have seen an unquick'd bytecode in the interpreter but have another
   70.30 @@ -463,7 +463,7 @@
   70.31  
   70.32    {
   70.33      JvmtiHideSingleStepping jhss(thread);
   70.34 -    LinkResolver::resolve_field(info, pool, two_byte_index(thread),
   70.35 +    LinkResolver::resolve_field(info, pool, get_index_u2_cpcache(thread, bytecode),
   70.36                                  bytecode, false, CHECK);
   70.37    } // end JvmtiHideSingleStepping
   70.38  
   70.39 @@ -634,7 +634,7 @@
   70.40    {
   70.41      JvmtiHideSingleStepping jhss(thread);
   70.42      LinkResolver::resolve_invoke(info, receiver, pool,
   70.43 -                                 two_byte_index(thread), bytecode, CHECK);
   70.44 +                                 get_index_u2_cpcache(thread, bytecode), bytecode, CHECK);
   70.45      if (JvmtiExport::can_hotswap_or_post_breakpoint()) {
   70.46        int retry_count = 0;
   70.47        while (info.resolved_method()->is_old()) {
   70.48 @@ -645,7 +645,7 @@
   70.49                    "Could not resolve to latest version of redefined method");
   70.50          // method is redefined in the middle of resolve so re-try.
   70.51          LinkResolver::resolve_invoke(info, receiver, pool,
   70.52 -                                     two_byte_index(thread), bytecode, CHECK);
   70.53 +                                     get_index_u2_cpcache(thread, bytecode), bytecode, CHECK);
   70.54        }
   70.55      }
   70.56    } // end JvmtiHideSingleStepping
   70.57 @@ -704,7 +704,7 @@
   70.58      caller_bci = caller_method->bci_from(caller_bcp);
   70.59      site_index = Bytes::get_native_u4(caller_bcp+1);
   70.60    }
   70.61 -  assert(site_index == four_byte_index(thread), "");
   70.62 +  assert(site_index == InterpreterRuntime::bytecode(thread)->get_index_u4(bytecode), "");
   70.63    assert(constantPoolCacheOopDesc::is_secondary_index(site_index), "proper format");
   70.64    // there is a second CPC entries that is of interest; it caches signature info:
   70.65    int main_index = pool->cache()->secondary_entry_at(site_index)->main_entry_index();
    71.1 --- a/src/share/vm/interpreter/interpreterRuntime.hpp	Thu May 20 08:32:11 2010 -0700
    71.2 +++ b/src/share/vm/interpreter/interpreterRuntime.hpp	Mon May 24 14:15:14 2010 -0700
    71.3 @@ -1,5 +1,5 @@
    71.4  /*
    71.5 - * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
    71.6 + * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
    71.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    71.8   *
    71.9   * This code is free software; you can redistribute it and/or modify it
   71.10 @@ -40,9 +40,13 @@
   71.11      return Bytecodes::code_at(bcp(thread), method(thread));
   71.12    }
   71.13    static bool      already_resolved(JavaThread *thread) { return cache_entry(thread)->is_resolved(code(thread)); }
   71.14 -  static int       one_byte_index(JavaThread *thread)   { return bcp(thread)[1]; }
   71.15 -  static int       two_byte_index(JavaThread *thread)   { return Bytes::get_Java_u2(bcp(thread) + 1); }
   71.16 -  static int       four_byte_index(JavaThread *thread)  { return Bytes::get_native_u4(bcp(thread) + 1); }
   71.17 +  static Bytecode* bytecode(JavaThread *thread)      { return Bytecode_at(bcp(thread)); }
   71.18 +  static int       get_index_u1(JavaThread *thread, Bytecodes::Code bc)
   71.19 +                                                        { return bytecode(thread)->get_index_u1(bc); }
   71.20 +  static int       get_index_u2(JavaThread *thread, Bytecodes::Code bc)
   71.21 +                                                        { return bytecode(thread)->get_index_u2(bc); }
   71.22 +  static int       get_index_u2_cpcache(JavaThread *thread, Bytecodes::Code bc)
   71.23 +                                                        { return bytecode(thread)->get_index_u2_cpcache(bc); }
   71.24    static int       number_of_dimensions(JavaThread *thread)  { return bcp(thread)[3]; }
   71.25  
   71.26    static ConstantPoolCacheEntry* cache_entry_at(JavaThread *thread, int i)  { return method(thread)->constants()->cache()->entry_at(i); }
    72.1 --- a/src/share/vm/interpreter/rewriter.cpp	Thu May 20 08:32:11 2010 -0700
    72.2 +++ b/src/share/vm/interpreter/rewriter.cpp	Mon May 24 14:15:14 2010 -0700
    72.3 @@ -1,5 +1,5 @@
    72.4  /*
    72.5 - * Copyright 1998-2009 Sun Microsystems, Inc.  All Rights Reserved.
    72.6 + * Copyright 1998-2010 Sun Microsystems, Inc.  All Rights Reserved.
    72.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    72.8   *
    72.9   * This code is free software; you can redistribute it and/or modify it
   72.10 @@ -103,16 +103,15 @@
   72.11  
   72.12  
   72.13  // Rewrite a classfile-order CP index into a native-order CPC index.
   72.14 -int Rewriter::rewrite_member_reference(address bcp, int offset) {
   72.15 +void Rewriter::rewrite_member_reference(address bcp, int offset) {
   72.16    address p = bcp + offset;
   72.17    int  cp_index    = Bytes::get_Java_u2(p);
   72.18    int  cache_index = cp_entry_to_cp_cache(cp_index);
   72.19    Bytes::put_native_u2(p, cache_index);
   72.20 -  return cp_index;
   72.21  }
   72.22  
   72.23  
   72.24 -void Rewriter::rewrite_invokedynamic(address bcp, int offset, int delete_me) {
   72.25 +void Rewriter::rewrite_invokedynamic(address bcp, int offset) {
   72.26    address p = bcp + offset;
   72.27    assert(p[-1] == Bytecodes::_invokedynamic, "");
   72.28    int cp_index = Bytes::get_Java_u2(p);
   72.29 @@ -178,7 +177,7 @@
   72.30          case Bytecodes::_lookupswitch   : {
   72.31  #ifndef CC_INTERP
   72.32            Bytecode_lookupswitch* bc = Bytecode_lookupswitch_at(bcp);
   72.33 -          bc->set_code(
   72.34 +          (*bcp) = (
   72.35              bc->number_of_pairs() < BinarySwitchThreshold
   72.36              ? Bytecodes::_fast_linearswitch
   72.37              : Bytecodes::_fast_binaryswitch
   72.38 @@ -197,7 +196,7 @@
   72.39            rewrite_member_reference(bcp, prefix_length+1);
   72.40            break;
   72.41          case Bytecodes::_invokedynamic:
   72.42 -          rewrite_invokedynamic(bcp, prefix_length+1, int(sizeof"@@@@DELETE ME"));
   72.43 +          rewrite_invokedynamic(bcp, prefix_length+1);
   72.44            break;
   72.45          case Bytecodes::_jsr            : // fall through
   72.46          case Bytecodes::_jsr_w          : nof_jsrs++;                   break;
    73.1 --- a/src/share/vm/interpreter/rewriter.hpp	Thu May 20 08:32:11 2010 -0700
    73.2 +++ b/src/share/vm/interpreter/rewriter.hpp	Mon May 24 14:15:14 2010 -0700
    73.3 @@ -1,5 +1,5 @@
    73.4  /*
    73.5 - * Copyright 1998-2009 Sun Microsystems, Inc.  All Rights Reserved.
    73.6 + * Copyright 1998-2010 Sun Microsystems, Inc.  All Rights Reserved.
    73.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    73.8   *
    73.9   * This code is free software; you can redistribute it and/or modify it
   73.10 @@ -64,8 +64,8 @@
   73.11    void scan_method(methodOop m);
   73.12    methodHandle rewrite_jsrs(methodHandle m, TRAPS);
   73.13    void rewrite_Object_init(methodHandle m, TRAPS);
   73.14 -  int  rewrite_member_reference(address bcp, int offset);
   73.15 -  void rewrite_invokedynamic(address bcp, int offset, int cp_index);
   73.16 +  void rewrite_member_reference(address bcp, int offset);
   73.17 +  void rewrite_invokedynamic(address bcp, int offset);
   73.18  
   73.19   public:
   73.20    // Driver routine:
    74.1 --- a/src/share/vm/interpreter/templateTable.cpp	Thu May 20 08:32:11 2010 -0700
    74.2 +++ b/src/share/vm/interpreter/templateTable.cpp	Mon May 24 14:15:14 2010 -0700
    74.3 @@ -1,5 +1,5 @@
    74.4  /*
    74.5 - * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
    74.6 + * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
    74.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    74.8   *
    74.9   * This code is free software; you can redistribute it and/or modify it
   74.10 @@ -434,15 +434,15 @@
   74.11    def(Bytecodes::_dreturn             , ____|disp|clvm|____, dtos, dtos, _return             , dtos         );
   74.12    def(Bytecodes::_areturn             , ____|disp|clvm|____, atos, atos, _return             , atos         );
   74.13    def(Bytecodes::_return              , ____|disp|clvm|____, vtos, vtos, _return             , vtos         );
   74.14 -  def(Bytecodes::_getstatic           , ubcp|____|clvm|____, vtos, vtos, getstatic           ,  1           );
   74.15 -  def(Bytecodes::_putstatic           , ubcp|____|clvm|____, vtos, vtos, putstatic           ,  2           );
   74.16 -  def(Bytecodes::_getfield            , ubcp|____|clvm|____, vtos, vtos, getfield            ,  1           );
   74.17 -  def(Bytecodes::_putfield            , ubcp|____|clvm|____, vtos, vtos, putfield            ,  2           );
   74.18 -  def(Bytecodes::_invokevirtual       , ubcp|disp|clvm|____, vtos, vtos, invokevirtual       ,  2           );
   74.19 -  def(Bytecodes::_invokespecial       , ubcp|disp|clvm|____, vtos, vtos, invokespecial       ,  1           );
   74.20 -  def(Bytecodes::_invokestatic        , ubcp|disp|clvm|____, vtos, vtos, invokestatic        ,  1           );
   74.21 -  def(Bytecodes::_invokeinterface     , ubcp|disp|clvm|____, vtos, vtos, invokeinterface     ,  1           );
   74.22 -  def(Bytecodes::_invokedynamic       , ubcp|disp|clvm|____, vtos, vtos, invokedynamic       ,  1           );
   74.23 +  def(Bytecodes::_getstatic           , ubcp|____|clvm|____, vtos, vtos, getstatic           , f1_byte      );
   74.24 +  def(Bytecodes::_putstatic           , ubcp|____|clvm|____, vtos, vtos, putstatic           , f2_byte      );
   74.25 +  def(Bytecodes::_getfield            , ubcp|____|clvm|____, vtos, vtos, getfield            , f1_byte      );
   74.26 +  def(Bytecodes::_putfield            , ubcp|____|clvm|____, vtos, vtos, putfield            , f2_byte      );
   74.27 +  def(Bytecodes::_invokevirtual       , ubcp|disp|clvm|____, vtos, vtos, invokevirtual       , f2_byte      );
   74.28 +  def(Bytecodes::_invokespecial       , ubcp|disp|clvm|____, vtos, vtos, invokespecial       , f1_byte      );
   74.29 +  def(Bytecodes::_invokestatic        , ubcp|disp|clvm|____, vtos, vtos, invokestatic        , f1_byte      );
   74.30 +  def(Bytecodes::_invokeinterface     , ubcp|disp|clvm|____, vtos, vtos, invokeinterface     , f1_byte      );
   74.31 +  def(Bytecodes::_invokedynamic       , ubcp|disp|clvm|____, vtos, vtos, invokedynamic       , f1_oop       );
   74.32    def(Bytecodes::_new                 , ubcp|____|clvm|____, vtos, atos, _new                ,  _           );
   74.33    def(Bytecodes::_newarray            , ubcp|____|clvm|____, itos, atos, newarray            ,  _           );
   74.34    def(Bytecodes::_anewarray           , ubcp|____|clvm|____, itos, atos, anewarray           ,  _           );
   74.35 @@ -502,7 +502,7 @@
   74.36    def(Bytecodes::_fast_iload2         , ubcp|____|____|____, vtos, itos, fast_iload2         ,  _       );
   74.37    def(Bytecodes::_fast_icaload        , ubcp|____|____|____, vtos, itos, fast_icaload        ,  _       );
   74.38  
   74.39 -  def(Bytecodes::_fast_invokevfinal   , ubcp|disp|clvm|____, vtos, vtos, fast_invokevfinal   ,  2           );
   74.40 +  def(Bytecodes::_fast_invokevfinal   , ubcp|disp|clvm|____, vtos, vtos, fast_invokevfinal   , f2_byte      );
   74.41  
   74.42    def(Bytecodes::_fast_linearswitch   , ubcp|disp|____|____, itos, vtos, fast_linearswitch   ,  _           );
   74.43    def(Bytecodes::_fast_binaryswitch   , ubcp|disp|____|____, itos, vtos, fast_binaryswitch   ,  _           );
    75.1 --- a/src/share/vm/interpreter/templateTable.hpp	Thu May 20 08:32:11 2010 -0700
    75.2 +++ b/src/share/vm/interpreter/templateTable.hpp	Mon May 24 14:15:14 2010 -0700
    75.3 @@ -1,5 +1,5 @@
    75.4  /*
    75.5 - * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
    75.6 + * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
    75.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    75.8   *
    75.9   * This code is free software; you can redistribute it and/or modify it
   75.10 @@ -73,6 +73,7 @@
   75.11   public:
   75.12    enum Operation { add, sub, mul, div, rem, _and, _or, _xor, shl, shr, ushr };
   75.13    enum Condition { equal, not_equal, less, less_equal, greater, greater_equal };
   75.14 +  enum CacheByte { f1_byte = 1, f2_byte = 2, f1_oop = 0x11 };  // byte_no codes
   75.15  
   75.16   private:
   75.17    static bool            _is_initialized;        // true if TemplateTable has been initialized
   75.18 @@ -244,13 +245,18 @@
   75.19  
   75.20    static void _return(TosState state);
   75.21  
   75.22 -  static void resolve_cache_and_index(int byte_no, Register cache, Register index);
   75.23 +  static void resolve_cache_and_index(int byte_no,       // one of 1,2,11
   75.24 +                                      Register result ,  // either noreg or output for f1/f2
   75.25 +                                      Register cache,    // output for CP cache
   75.26 +                                      Register index,    // output for CP index
   75.27 +                                      size_t index_size); // one of 1,2,4
   75.28    static void load_invoke_cp_cache_entry(int byte_no,
   75.29                                           Register method,
   75.30                                           Register itable_index,
   75.31                                           Register flags,
   75.32 -                                         bool is_invokevirtual = false,
   75.33 -                                         bool is_virtual_final = false);
   75.34 +                                         bool is_invokevirtual,
   75.35 +                                         bool is_virtual_final,
   75.36 +                                         bool is_invokedynamic);
   75.37    static void load_field_cp_cache_entry(Register obj,
   75.38                                          Register cache,
   75.39                                          Register index,
    76.1 --- a/src/share/vm/memory/iterator.cpp	Thu May 20 08:32:11 2010 -0700
    76.2 +++ b/src/share/vm/memory/iterator.cpp	Mon May 24 14:15:14 2010 -0700
    76.3 @@ -1,5 +1,5 @@
    76.4  /*
    76.5 - * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
    76.6 + * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
    76.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    76.8   *
    76.9   * This code is free software; you can redistribute it and/or modify it
   76.10 @@ -58,8 +58,8 @@
   76.11  }
   76.12  
   76.13  void MarkingCodeBlobClosure::do_code_blob(CodeBlob* cb) {
   76.14 -  if (!cb->is_nmethod())  return;
   76.15 -  nmethod* nm = (nmethod*) cb;
   76.16 +  nmethod* nm = cb->as_nmethod_or_null();
   76.17 +  if (nm == NULL)  return;
   76.18    if (!nm->test_set_oops_do_mark()) {
   76.19      NOT_PRODUCT(if (TraceScavenge)  nm->print_on(tty, "oops_do, 1st visit\n"));
   76.20      do_newly_marked_nmethod(nm);
   76.21 @@ -74,11 +74,14 @@
   76.22  
   76.23  void CodeBlobToOopClosure::do_code_blob(CodeBlob* cb) {
   76.24    if (!_do_marking) {
   76.25 -    NOT_PRODUCT(if (TraceScavenge && Verbose && cb->is_nmethod())  ((nmethod*)cb)->print_on(tty, "oops_do, unmarked visit\n"));
   76.26 +    nmethod* nm = cb->as_nmethod_or_null();
   76.27 +    NOT_PRODUCT(if (TraceScavenge && Verbose && nm != NULL)  nm->print_on(tty, "oops_do, unmarked visit\n"));
   76.28      // This assert won't work, since there are lots of mini-passes
   76.29      // (mostly in debug mode) that co-exist with marking phases.
   76.30      //assert(!(cb->is_nmethod() && ((nmethod*)cb)->test_oops_do_mark()), "found marked nmethod during mark-free phase");
   76.31 -    cb->oops_do(_cl);
   76.32 +    if (nm != NULL) {
   76.33 +      nm->oops_do(_cl);
   76.34 +    }
   76.35    } else {
   76.36      MarkingCodeBlobClosure::do_code_blob(cb);
   76.37    }
    77.1 --- a/src/share/vm/oops/constantPoolOop.cpp	Thu May 20 08:32:11 2010 -0700
    77.2 +++ b/src/share/vm/oops/constantPoolOop.cpp	Mon May 24 14:15:14 2010 -0700
    77.3 @@ -1,5 +1,5 @@
    77.4  /*
    77.5 - * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
    77.6 + * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
    77.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    77.8   *
    77.9   * This code is free software; you can redistribute it and/or modify it
   77.10 @@ -297,11 +297,9 @@
   77.11  
   77.12  
   77.13  int constantPoolOopDesc::remap_instruction_operand_from_cache(int operand) {
   77.14 -  // Operand was fetched by a stream using get_Java_u2, yet was stored
   77.15 -  // by Rewriter::rewrite_member_reference in native order.
   77.16 -  // So now we have to fix the damage by swapping back to native order.
   77.17 -  assert((int)(u2)operand == operand, "clean u2");
   77.18 -  int cpc_index = Bytes::swap_u2(operand);
   77.19 +  int cpc_index = operand;
   77.20 +  DEBUG_ONLY(cpc_index -= CPCACHE_INDEX_TAG);
   77.21 +  assert((int)(u2)cpc_index == cpc_index, "clean u2");
   77.22    int member_index = cache()->entry_at(cpc_index)->constant_pool_index();
   77.23    return member_index;
   77.24  }
    78.1 --- a/src/share/vm/oops/constantPoolOop.hpp	Thu May 20 08:32:11 2010 -0700
    78.2 +++ b/src/share/vm/oops/constantPoolOop.hpp	Mon May 24 14:15:14 2010 -0700
    78.3 @@ -1,5 +1,5 @@
    78.4  /*
    78.5 - * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
    78.6 + * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
    78.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    78.8   *
    78.9   * This code is free software; you can redistribute it and/or modify it
   78.10 @@ -434,6 +434,10 @@
   78.11    // Debugging
   78.12    const char* printable_name_at(int which) PRODUCT_RETURN0;
   78.13  
   78.14 +#ifdef ASSERT
   78.15 +  enum { CPCACHE_INDEX_TAG = 0x10000 };  // helps keep CP cache indices distinct from CP indices
   78.16 +#endif //ASSERT
   78.17 +
   78.18   private:
   78.19  
   78.20    symbolOop impl_name_ref_at(int which, bool uncached);
   78.21 @@ -441,7 +445,7 @@
   78.22    int       impl_klass_ref_index_at(int which, bool uncached);
   78.23    int       impl_name_and_type_ref_index_at(int which, bool uncached);
   78.24  
   78.25 -  int remap_instruction_operand_from_cache(int operand);
   78.26 +  int remap_instruction_operand_from_cache(int operand);  // operand must be biased by CPCACHE_INDEX_TAG
   78.27  
   78.28    // Used while constructing constant pool (only by ClassFileParser)
   78.29    jint klass_index_at(int which) {
    79.1 --- a/src/share/vm/oops/generateOopMap.cpp	Thu May 20 08:32:11 2010 -0700
    79.2 +++ b/src/share/vm/oops/generateOopMap.cpp	Mon May 24 14:15:14 2010 -0700
    79.3 @@ -1,5 +1,5 @@
    79.4  /*
    79.5 - * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
    79.6 + * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
    79.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    79.8   *
    79.9   * This code is free software; you can redistribute it and/or modify it
   79.10 @@ -1254,7 +1254,7 @@
   79.11        case Bytecodes::_invokestatic:
   79.12        case Bytecodes::_invokedynamic:
   79.13        case Bytecodes::_invokeinterface:
   79.14 -        int idx = currentBC->get_index_int();
   79.15 +        int idx = currentBC->has_index_u4() ? currentBC->get_index_u4() : currentBC->get_index_u2();
   79.16          constantPoolOop cp    = method()->constants();
   79.17          int nameAndTypeIdx    = cp->name_and_type_ref_index_at(idx);
   79.18          int signatureIdx      = cp->signature_ref_index_at(nameAndTypeIdx);
   79.19 @@ -1286,7 +1286,7 @@
   79.20        case Bytecodes::_invokestatic:
   79.21        case Bytecodes::_invokedynamic:
   79.22        case Bytecodes::_invokeinterface:
   79.23 -        int idx = currentBC->get_index_int();
   79.24 +        int idx = currentBC->has_index_u4() ? currentBC->get_index_u4() : currentBC->get_index_u2();
   79.25          constantPoolOop cp    = method()->constants();
   79.26          int nameAndTypeIdx    = cp->name_and_type_ref_index_at(idx);
   79.27          int signatureIdx      = cp->signature_ref_index_at(nameAndTypeIdx);
   79.28 @@ -1356,8 +1356,8 @@
   79.29  
   79.30      case Bytecodes::_ldc2_w:            ppush(vvCTS);               break;
   79.31  
   79.32 -    case Bytecodes::_ldc:               do_ldc(itr->get_index(), itr->bci());    break;
   79.33 -    case Bytecodes::_ldc_w:             do_ldc(itr->get_index_big(), itr->bci());break;
   79.34 +    case Bytecodes::_ldc:               do_ldc(itr->get_index(),    itr->bci()); break;
   79.35 +    case Bytecodes::_ldc_w:             do_ldc(itr->get_index_u2(), itr->bci()); break;
   79.36  
   79.37      case Bytecodes::_iload:
   79.38      case Bytecodes::_fload:             ppload(vCTS, itr->get_index()); break;
   79.39 @@ -1550,17 +1550,17 @@
   79.40      case Bytecodes::_jsr_w:             do_jsr(itr->dest_w());       break;
   79.41  
   79.42      case Bytecodes::_getstatic:         do_field(true,  true,
   79.43 -                                                 itr->get_index_big(),
   79.44 +                                                 itr->get_index_u2_cpcache(),
   79.45                                                   itr->bci()); break;
   79.46 -    case Bytecodes::_putstatic:         do_field(false, true,  itr->get_index_big(), itr->bci()); break;
   79.47 -    case Bytecodes::_getfield:          do_field(true,  false, itr->get_index_big(), itr->bci()); break;
   79.48 -    case Bytecodes::_putfield:          do_field(false, false, itr->get_index_big(), itr->bci()); break;
   79.49 +    case Bytecodes::_putstatic:         do_field(false, true,  itr->get_index_u2_cpcache(), itr->bci()); break;
   79.50 +    case Bytecodes::_getfield:          do_field(true,  false, itr->get_index_u2_cpcache(), itr->bci()); break;
   79.51 +    case Bytecodes::_putfield:          do_field(false, false, itr->get_index_u2_cpcache(), itr->bci()); break;
   79.52  
   79.53      case Bytecodes::_invokevirtual:
   79.54 -    case Bytecodes::_invokespecial:     do_method(false, false, itr->get_index_big(), itr->bci()); break;
   79.55 -    case Bytecodes::_invokestatic:      do_method(true,  false, itr->get_index_big(), itr->bci()); break;
   79.56 -    case Bytecodes::_invokedynamic:     do_method(true,  false, itr->get_index_int(), itr->bci()); break;
   79.57 -    case Bytecodes::_invokeinterface:   do_method(false, true,  itr->get_index_big(), itr->bci()); break;
   79.58 +    case Bytecodes::_invokespecial:     do_method(false, false, itr->get_index_u2_cpcache(), itr->bci()); break;
   79.59 +    case Bytecodes::_invokestatic:      do_method(true,  false, itr->get_index_u2_cpcache(), itr->bci()); break;
   79.60 +    case Bytecodes::_invokedynamic:     do_method(true,  false, itr->get_index_u4(),         itr->bci()); break;
   79.61 +    case Bytecodes::_invokeinterface:   do_method(false, true,  itr->get_index_u2_cpcache(), itr->bci()); break;
   79.62      case Bytecodes::_newarray:
   79.63      case Bytecodes::_anewarray:         pp_new_ref(vCTS, itr->bci()); break;
   79.64      case Bytecodes::_checkcast:         do_checkcast(); break;
    80.1 --- a/src/share/vm/opto/bytecodeInfo.cpp	Thu May 20 08:32:11 2010 -0700
    80.2 +++ b/src/share/vm/opto/bytecodeInfo.cpp	Mon May 24 14:15:14 2010 -0700
    80.3 @@ -1,5 +1,5 @@
    80.4  /*
    80.5 - * Copyright 1998-2009 Sun Microsystems, Inc.  All Rights Reserved.
    80.6 + * Copyright 1998-2010 Sun Microsystems, Inc.  All Rights Reserved.
    80.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    80.8   *
    80.9   * This code is free software; you can redistribute it and/or modify it
   80.10 @@ -188,8 +188,8 @@
   80.11      return NULL;
   80.12    }
   80.13  
   80.14 -  // Always inline MethodHandle methods.
   80.15 -  if (callee_method->is_method_handle_invoke())
   80.16 +  // Always inline MethodHandle methods and generated MethodHandle adapters.
   80.17 +  if (callee_method->is_method_handle_invoke() || callee_method->is_method_handle_adapter())
   80.18      return NULL;
   80.19  
   80.20    // First check all inlining restrictions which are required for correctness
   80.21 @@ -340,7 +340,7 @@
   80.22      Bytecodes::Code call_bc = iter.cur_bc();
   80.23      // An invokedynamic instruction does not have a klass.
   80.24      if (call_bc != Bytecodes::_invokedynamic) {
   80.25 -      int index = iter.get_index_int();
   80.26 +      int index = iter.get_index_u2_cpcache();
   80.27        if (!caller_method->is_klass_loaded(index, true)) {
   80.28          return false;
   80.29        }
    81.1 --- a/src/share/vm/opto/parse2.cpp	Thu May 20 08:32:11 2010 -0700
    81.2 +++ b/src/share/vm/opto/parse2.cpp	Mon May 24 14:15:14 2010 -0700
    81.3 @@ -1317,8 +1317,8 @@
    81.4    case Bytecodes::_iconst_3: push(intcon( 3)); break;
    81.5    case Bytecodes::_iconst_4: push(intcon( 4)); break;
    81.6    case Bytecodes::_iconst_5: push(intcon( 5)); break;
    81.7 -  case Bytecodes::_bipush:   push(intcon( iter().get_byte())); break;
    81.8 -  case Bytecodes::_sipush:   push(intcon( iter().get_short())); break;
    81.9 +  case Bytecodes::_bipush:   push(intcon(iter().get_constant_u1())); break;
   81.10 +  case Bytecodes::_sipush:   push(intcon(iter().get_constant_u2())); break;
   81.11    case Bytecodes::_aconst_null: push(null());  break;
   81.12    case Bytecodes::_ldc:
   81.13    case Bytecodes::_ldc_w:
    82.1 --- a/src/share/vm/prims/jvmtiClassFileReconstituter.cpp	Thu May 20 08:32:11 2010 -0700
    82.2 +++ b/src/share/vm/prims/jvmtiClassFileReconstituter.cpp	Mon May 24 14:15:14 2010 -0700
    82.3 @@ -1,5 +1,5 @@
    82.4  /*
    82.5 - * Copyright 2005-2009 Sun Microsystems, Inc.  All Rights Reserved.
    82.6 + * Copyright 2005-2010 Sun Microsystems, Inc.  All Rights Reserved.
    82.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    82.8   *
    82.9   * This code is free software; you can redistribute it and/or modify it
   82.10 @@ -638,7 +638,7 @@
   82.11  
   82.12      // length of bytecode (mnemonic + operands)
   82.13      address bcp = bs.bcp();
   82.14 -    int len = bs.next_bcp() - bcp;
   82.15 +    int     len = bs.instruction_size();
   82.16      assert(len > 0, "length must be > 0");
   82.17  
   82.18      // copy the bytecodes
    83.1 --- a/src/share/vm/prims/methodComparator.cpp	Thu May 20 08:32:11 2010 -0700
    83.2 +++ b/src/share/vm/prims/methodComparator.cpp	Mon May 24 14:15:14 2010 -0700
    83.3 @@ -1,5 +1,5 @@
    83.4  /*
    83.5 - * Copyright 2000-2009 Sun Microsystems, Inc.  All Rights Reserved.
    83.6 + * Copyright 2000-2010 Sun Microsystems, Inc.  All Rights Reserved.
    83.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    83.8   *
    83.9   * This code is free software; you can redistribute it and/or modify it
   83.10 @@ -130,8 +130,8 @@
   83.11    case Bytecodes::_multianewarray : // fall through
   83.12    case Bytecodes::_checkcast      : // fall through
   83.13    case Bytecodes::_instanceof     : {
   83.14 -    u2 cpi_old = _s_old->get_index_big();
   83.15 -    u2 cpi_new = _s_new->get_index_big();
   83.16 +    u2 cpi_old = _s_old->get_index_u2();
   83.17 +    u2 cpi_new = _s_new->get_index_u2();
   83.18      if ((_old_cp->klass_at_noresolve(cpi_old) != _new_cp->klass_at_noresolve(cpi_new)))
   83.19          return false;
   83.20      if (c_old == Bytecodes::_multianewarray &&
   83.21 @@ -147,9 +147,10 @@
   83.22    case Bytecodes::_invokevirtual   : // fall through
   83.23    case Bytecodes::_invokespecial   : // fall through
   83.24    case Bytecodes::_invokestatic    : // fall through
   83.25 +  case Bytecodes::_invokedynamic   : // fall through
   83.26    case Bytecodes::_invokeinterface : {
   83.27 -    u2 cpci_old = _s_old->get_index_int();
   83.28 -    u2 cpci_new = _s_new->get_index_int();
   83.29 +    u2 cpci_old = _s_old->has_index_u4() ? _s_old->get_index_u4() : _s_old->get_index_u2();
   83.30 +    u2 cpci_new = _s_new->has_index_u4() ? _s_new->get_index_u4() : _s_new->get_index_u2();
   83.31      // Check if the names of classes, field/method names and signatures at these indexes
   83.32      // are the same. Indices which are really into constantpool cache (rather than constant
   83.33      // pool itself) are accepted by the constantpool query routines below.
   83.34 @@ -167,8 +168,8 @@
   83.35        cpi_old = _s_old->bcp()[1];
   83.36        cpi_new = _s_new->bcp()[1];
   83.37      } else {
   83.38 -      cpi_old = _s_old->get_index_big();
   83.39 -      cpi_new = _s_new->get_index_big();
   83.40 +      cpi_old = _s_old->get_index_u2();
   83.41 +      cpi_new = _s_new->get_index_u2();
   83.42      }
   83.43      constantTag tag_old = _old_cp->tag_at(cpi_old);
   83.44      constantTag tag_new = _new_cp->tag_at(cpi_new);
   83.45 @@ -199,8 +200,8 @@
   83.46    }
   83.47  
   83.48    case Bytecodes::_ldc2_w : {
   83.49 -    u2 cpi_old = _s_old->get_index_big();
   83.50 -    u2 cpi_new = _s_new->get_index_big();
   83.51 +    u2 cpi_old = _s_old->get_index_u2();
   83.52 +    u2 cpi_new = _s_new->get_index_u2();
   83.53      constantTag tag_old = _old_cp->tag_at(cpi_old);
   83.54      constantTag tag_new = _new_cp->tag_at(cpi_new);
   83.55      if (tag_old.value() != tag_new.value())
   83.56 @@ -221,7 +222,7 @@
   83.57      break;
   83.58  
   83.59    case Bytecodes::_sipush    :
   83.60 -    if (_s_old->get_index_big() != _s_new->get_index_big())
   83.61 +    if (_s_old->get_index_u2() != _s_new->get_index_u2())
   83.62        return false;
   83.63      break;
   83.64  
   83.65 @@ -260,8 +261,8 @@
   83.66    case Bytecodes::_ifnonnull : // fall through
   83.67    case Bytecodes::_ifnull    : // fall through
   83.68    case Bytecodes::_jsr       : {
   83.69 -    short old_ofs = (short) _s_old->get_index_big();
   83.70 -    short new_ofs = (short) _s_new->get_index_big();
   83.71 +    short old_ofs = (short) _s_old->get_index_u2();
   83.72 +    short new_ofs = (short) _s_new->get_index_u2();
   83.73      if (_switchable_test) {
   83.74        int old_dest = _s_old->bci() + old_ofs;
   83.75        int new_dest = _s_new->bci() + new_ofs;
   83.76 @@ -285,9 +286,11 @@
   83.77      if (_s_old->is_wide() != _s_new->is_wide())
   83.78        return false;
   83.79      if (! _s_old->is_wide()) {
   83.80 -      if (_s_old->get_index_big() != _s_new->get_index_big())
   83.81 +      // We could use get_index_u1 and get_constant_u1, but it's simpler to grab both bytes at once:
   83.82 +      if (Bytes::get_Java_u2(_s_old->bcp() + 1) != Bytes::get_Java_u2(_s_new->bcp() + 1))
   83.83          return false;
   83.84      } else {
   83.85 +      // We could use get_index_u2 and get_constant_u2, but it's simpler to grab all four bytes at once:
   83.86        if (Bytes::get_Java_u4(_s_old->bcp() + 1) != Bytes::get_Java_u4(_s_new->bcp() + 1))
   83.87          return false;
   83.88      }
   83.89 @@ -357,8 +360,8 @@
   83.90          }
   83.91        }
   83.92      } else { // !_switchable_test, can use fast rough compare
   83.93 -      int len_old = _s_old->next_bcp() - _s_old->bcp();
   83.94 -      int len_new = _s_new->next_bcp() - _s_new->bcp();
   83.95 +      int len_old = _s_old->instruction_size();
   83.96 +      int len_new = _s_new->instruction_size();
   83.97        if (len_old != len_new)
   83.98          return false;
   83.99        if (memcmp(_s_old->bcp(), _s_new->bcp(), len_old) != 0)
    84.1 --- a/src/share/vm/prims/methodHandleWalk.cpp	Thu May 20 08:32:11 2010 -0700
    84.2 +++ b/src/share/vm/prims/methodHandleWalk.cpp	Mon May 24 14:15:14 2010 -0700
    84.3 @@ -732,7 +732,7 @@
    84.4    case Bytecodes::_dreturn:
    84.5    case Bytecodes::_areturn:
    84.6    case Bytecodes::_return:
    84.7 -    assert(strcmp(Bytecodes::format(op), "b") == 0, "wrong bytecode format");
    84.8 +    assert(Bytecodes::format_bits(op, false) == Bytecodes::_fmt_b, "wrong bytecode format");
    84.9      _bytecode.push(op);
   84.10      break;
   84.11  
   84.12 @@ -748,7 +748,7 @@
   84.13    case Bytecodes::_fstore:
   84.14    case Bytecodes::_dstore:
   84.15    case Bytecodes::_astore:
   84.16 -    assert(strcmp(Bytecodes::format(op), "bi") == 0, "wrong bytecode format");
   84.17 +    assert(Bytecodes::format_bits(op, false) == Bytecodes::_fmt_bi, "wrong bytecode format");
   84.18      assert((char) index == index, "index does not fit in 8-bit");
   84.19      _bytecode.push(op);
   84.20      _bytecode.push(index);
   84.21 @@ -757,18 +757,18 @@
   84.22    // bii
   84.23    case Bytecodes::_ldc2_w:
   84.24    case Bytecodes::_checkcast:
   84.25 -    assert(strcmp(Bytecodes::format(op), "bii") == 0, "wrong bytecode format");
   84.26 +    assert(Bytecodes::format_bits(op, false) == Bytecodes::_fmt_bkk, "wrong bytecode format");
   84.27      assert((short) index == index, "index does not fit in 16-bit");
   84.28      _bytecode.push(op);
   84.29      _bytecode.push(index >> 8);
   84.30      _bytecode.push(index);
   84.31      break;
   84.32  
   84.33 -  // bjj
   84.34 +  // bJJ
   84.35    case Bytecodes::_invokestatic:
   84.36    case Bytecodes::_invokespecial:
   84.37    case Bytecodes::_invokevirtual:
   84.38 -    assert(strcmp(Bytecodes::format(op), "bjj") == 0, "wrong bytecode format");
   84.39 +    assert(Bytecodes::format_bits(op, false) == Bytecodes::_fmt_bJJ, "wrong bytecode format");
   84.40      assert((short) index == index, "index does not fit in 16-bit");
   84.41      _bytecode.push(op);
   84.42      _bytecode.push(index >> 8);
    85.1 --- a/src/share/vm/runtime/sharedRuntime.cpp	Thu May 20 08:32:11 2010 -0700
    85.2 +++ b/src/share/vm/runtime/sharedRuntime.cpp	Mon May 24 14:15:14 2010 -0700
    85.3 @@ -1435,7 +1435,7 @@
    85.4        // for the rest of its life! Just another racing bug in the life of
    85.5        // fixup_callers_callsite ...
    85.6        //
    85.7 -      RelocIterator iter(cb, call->instruction_address(), call->next_instruction_address());
    85.8 +      RelocIterator iter(nm, call->instruction_address(), call->next_instruction_address());
    85.9        iter.next();
   85.10        assert(iter.has_current(), "must have a reloc at java call site");
   85.11        relocInfo::relocType typ = iter.reloc()->type();
   85.12 @@ -2055,11 +2055,11 @@
   85.13    void scan() {
   85.14      while (_index < _table->table_size()) {
   85.15        AdapterHandlerEntry* a = _table->bucket(_index);
   85.16 +      _index++;
   85.17        if (a != NULL) {
   85.18          _current = a;
   85.19          return;
   85.20        }
   85.21 -      _index++;
   85.22      }
   85.23    }
   85.24  
    86.1 --- a/src/share/vm/runtime/vmStructs.cpp	Thu May 20 08:32:11 2010 -0700
    86.2 +++ b/src/share/vm/runtime/vmStructs.cpp	Mon May 24 14:15:14 2010 -0700
    86.3 @@ -1,5 +1,5 @@
    86.4  /*
    86.5 - * Copyright 2000-2009 Sun Microsystems, Inc.  All Rights Reserved.
    86.6 + * Copyright 2000-2010 Sun Microsystems, Inc.  All Rights Reserved.
    86.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    86.8   *
    86.9   * This code is free software; you can redistribute it and/or modify it
   86.10 @@ -607,8 +607,6 @@
   86.11    nonstatic_field(CodeBlob,                    _instructions_offset,                          int)                                   \
   86.12    nonstatic_field(CodeBlob,                    _frame_complete_offset,                        int)                                   \
   86.13    nonstatic_field(CodeBlob,                    _data_offset,                                  int)                                   \
   86.14 -  nonstatic_field(CodeBlob,                    _oops_offset,                                  int)                                   \
   86.15 -  nonstatic_field(CodeBlob,                    _oops_length,                                  int)                                   \
   86.16    nonstatic_field(CodeBlob,                    _frame_size,                                   int)                                   \
   86.17    nonstatic_field(CodeBlob,                    _oop_maps,                                     OopMapSet*)                            \
   86.18                                                                                                                                       \
   86.19 @@ -626,6 +624,8 @@
   86.20    nonstatic_field(nmethod,             _deoptimize_offset,                            int)                                   \
   86.21    nonstatic_field(nmethod,             _orig_pc_offset,                               int)                                   \
   86.22    nonstatic_field(nmethod,             _stub_offset,                                  int)                                   \
   86.23 +  nonstatic_field(nmethod,             _consts_offset,                                int)                                   \
   86.24 +  nonstatic_field(nmethod,             _oops_offset,                                  int)                                   \
   86.25    nonstatic_field(nmethod,             _scopes_data_offset,                           int)                                   \
   86.26    nonstatic_field(nmethod,             _scopes_pcs_offset,                            int)                                   \
   86.27    nonstatic_field(nmethod,             _dependencies_offset,                          int)                                   \

mercurial