6829193: JSR 292 needs to support SPARC

Thu, 29 Apr 2010 06:30:25 -0700

author
twisti
date
Thu, 29 Apr 2010 06:30:25 -0700
changeset 1858
c640000b7cc1
parent 1834
90acda19b80f
child 1859
ae8f909e5fc7

6829193: JSR 292 needs to support SPARC
Summary: There are unimplemented portions of the hotspot code for method handles and invokedynamic specific to SPARC.
Reviewed-by: kvn, never, jrose

src/cpu/sparc/vm/assembler_sparc.cpp file | annotate | diff | comparison | revisions
src/cpu/sparc/vm/assembler_sparc.hpp file | annotate | diff | comparison | revisions
src/cpu/sparc/vm/assembler_sparc.inline.hpp file | annotate | diff | comparison | revisions
src/cpu/sparc/vm/interp_masm_sparc.cpp file | annotate | diff | comparison | revisions
src/cpu/sparc/vm/interp_masm_sparc.hpp file | annotate | diff | comparison | revisions
src/cpu/sparc/vm/interpreter_sparc.cpp file | annotate | diff | comparison | revisions
src/cpu/sparc/vm/methodHandles_sparc.cpp file | annotate | diff | comparison | revisions
src/cpu/sparc/vm/stubGenerator_sparc.cpp file | annotate | diff | comparison | revisions
src/cpu/sparc/vm/templateInterpreter_sparc.cpp file | annotate | diff | comparison | revisions
src/cpu/sparc/vm/templateTable_sparc.cpp file | annotate | diff | comparison | revisions
src/cpu/x86/vm/assembler_x86.cpp file | annotate | diff | comparison | revisions
src/cpu/x86/vm/assembler_x86.hpp file | annotate | diff | comparison | revisions
src/cpu/x86/vm/methodHandles_x86.cpp file | annotate | diff | comparison | revisions
src/share/vm/prims/methodHandles.hpp file | annotate | diff | comparison | revisions
     1.1 --- a/src/cpu/sparc/vm/assembler_sparc.cpp	Thu Apr 29 00:03:40 2010 -0700
     1.2 +++ b/src/cpu/sparc/vm/assembler_sparc.cpp	Thu Apr 29 06:30:25 2010 -0700
     1.3 @@ -1,5 +1,5 @@
     1.4  /*
     1.5 - * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
     1.6 + * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
     1.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     1.8   *
     1.9   * This code is free software; you can redistribute it and/or modify it
    1.10 @@ -2333,6 +2333,18 @@
    1.11  #endif
    1.12  
    1.13  
    1.14 +void MacroAssembler::load_sized_value(Address src, Register dst,
    1.15 +                                      size_t size_in_bytes, bool is_signed) {
    1.16 +  switch (size_in_bytes) {
    1.17 +  case  8: ldx(src, dst); break;
    1.18 +  case  4: ld( src, dst); break;
    1.19 +  case  2: is_signed ? ldsh(src, dst) : lduh(src, dst); break;
    1.20 +  case  1: is_signed ? ldsb(src, dst) : ldub(src, dst); break;
    1.21 +  default: ShouldNotReachHere();
    1.22 +  }
    1.23 +}
    1.24 +
    1.25 +
    1.26  void MacroAssembler::float_cmp( bool is_float, int unordered_result,
    1.27                                  FloatRegister Fa, FloatRegister Fb,
    1.28                                  Register Rresult) {
    1.29 @@ -2625,40 +2637,103 @@
    1.30  }
    1.31  
    1.32  
    1.33 -void MacroAssembler::regcon_inc_ptr( RegisterOrConstant& dest, RegisterOrConstant src, Register temp ) {
    1.34 -  assert(dest.register_or_noreg() != G0, "lost side effect");
    1.35 -  if ((src.is_constant() && src.as_constant() == 0) ||
    1.36 -      (src.is_register() && src.as_register() == G0)) {
    1.37 -    // do nothing
    1.38 -  } else if (dest.is_register()) {
    1.39 -    add(dest.as_register(), ensure_simm13_or_reg(src, temp), dest.as_register());
    1.40 -  } else if (src.is_constant()) {
    1.41 -    intptr_t res = dest.as_constant() + src.as_constant();
    1.42 -    dest = RegisterOrConstant(res); // side effect seen by caller
    1.43 +RegisterOrConstant MacroAssembler::regcon_andn_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) {
    1.44 +  assert(d.register_or_noreg() != G0, "lost side effect");
    1.45 +  if ((s2.is_constant() && s2.as_constant() == 0) ||
    1.46 +      (s2.is_register() && s2.as_register() == G0)) {
    1.47 +    // Do nothing, just move value.
    1.48 +    if (s1.is_register()) {
    1.49 +      if (d.is_constant())  d = temp;
    1.50 +      mov(s1.as_register(), d.as_register());
    1.51 +      return d;
    1.52 +    } else {
    1.53 +      return s1;
    1.54 +    }
    1.55 +  }
    1.56 +
    1.57 +  if (s1.is_register()) {
    1.58 +    assert_different_registers(s1.as_register(), temp);
    1.59 +    if (d.is_constant())  d = temp;
    1.60 +    andn(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register());
    1.61 +    return d;
    1.62    } else {
    1.63 -    assert(temp != noreg, "cannot handle constant += register");
    1.64 -    add(src.as_register(), ensure_simm13_or_reg(dest, temp), temp);
    1.65 -    dest = RegisterOrConstant(temp); // side effect seen by caller
    1.66 +    if (s2.is_register()) {
    1.67 +      assert_different_registers(s2.as_register(), temp);
    1.68 +      if (d.is_constant())  d = temp;
    1.69 +      set(s1.as_constant(), temp);
    1.70 +      andn(temp, s2.as_register(), d.as_register());
    1.71 +      return d;
    1.72 +    } else {
    1.73 +      intptr_t res = s1.as_constant() & ~s2.as_constant();
    1.74 +      return res;
    1.75 +    }
    1.76    }
    1.77  }
    1.78  
    1.79 -void MacroAssembler::regcon_sll_ptr( RegisterOrConstant& dest, RegisterOrConstant src, Register temp ) {
    1.80 -  assert(dest.register_or_noreg() != G0, "lost side effect");
    1.81 -  if (!is_simm13(src.constant_or_zero()))
    1.82 -    src = (src.as_constant() & 0xFF);
    1.83 -  if ((src.is_constant() && src.as_constant() == 0) ||
    1.84 -      (src.is_register() && src.as_register() == G0)) {
    1.85 -    // do nothing
    1.86 -  } else if (dest.is_register()) {
    1.87 -    sll_ptr(dest.as_register(), src, dest.as_register());
    1.88 -  } else if (src.is_constant()) {
    1.89 -    intptr_t res = dest.as_constant() << src.as_constant();
    1.90 -    dest = RegisterOrConstant(res); // side effect seen by caller
    1.91 +RegisterOrConstant MacroAssembler::regcon_inc_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) {
    1.92 +  assert(d.register_or_noreg() != G0, "lost side effect");
    1.93 +  if ((s2.is_constant() && s2.as_constant() == 0) ||
    1.94 +      (s2.is_register() && s2.as_register() == G0)) {
    1.95 +    // Do nothing, just move value.
    1.96 +    if (s1.is_register()) {
    1.97 +      if (d.is_constant())  d = temp;
    1.98 +      mov(s1.as_register(), d.as_register());
    1.99 +      return d;
   1.100 +    } else {
   1.101 +      return s1;
   1.102 +    }
   1.103 +  }
   1.104 +
   1.105 +  if (s1.is_register()) {
   1.106 +    assert_different_registers(s1.as_register(), temp);
   1.107 +    if (d.is_constant())  d = temp;
   1.108 +    add(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register());
   1.109 +    return d;
   1.110    } else {
   1.111 -    assert(temp != noreg, "cannot handle constant <<= register");
   1.112 -    set(dest.as_constant(), temp);
   1.113 -    sll_ptr(temp, src, temp);
   1.114 -    dest = RegisterOrConstant(temp); // side effect seen by caller
   1.115 +    if (s2.is_register()) {
   1.116 +      assert_different_registers(s2.as_register(), temp);
   1.117 +      if (d.is_constant())  d = temp;
   1.118 +      add(s2.as_register(), ensure_simm13_or_reg(s1, temp), d.as_register());
   1.119 +      return d;
   1.120 +    } else {
   1.121 +      intptr_t res = s1.as_constant() + s2.as_constant();
   1.122 +      return res;
   1.123 +    }
   1.124 +  }
   1.125 +}
   1.126 +
   1.127 +RegisterOrConstant MacroAssembler::regcon_sll_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) {
   1.128 +  assert(d.register_or_noreg() != G0, "lost side effect");
   1.129 +  if (!is_simm13(s2.constant_or_zero()))
   1.130 +    s2 = (s2.as_constant() & 0xFF);
   1.131 +  if ((s2.is_constant() && s2.as_constant() == 0) ||
   1.132 +      (s2.is_register() && s2.as_register() == G0)) {
   1.133 +    // Do nothing, just move value.
   1.134 +    if (s1.is_register()) {
   1.135 +      if (d.is_constant())  d = temp;
   1.136 +      mov(s1.as_register(), d.as_register());
   1.137 +      return d;
   1.138 +    } else {
   1.139 +      return s1;
   1.140 +    }
   1.141 +  }
   1.142 +
   1.143 +  if (s1.is_register()) {
   1.144 +    assert_different_registers(s1.as_register(), temp);
   1.145 +    if (d.is_constant())  d = temp;
   1.146 +    sll_ptr(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register());
   1.147 +    return d;
   1.148 +  } else {
   1.149 +    if (s2.is_register()) {
   1.150 +      assert_different_registers(s2.as_register(), temp);
   1.151 +      if (d.is_constant())  d = temp;
   1.152 +      set(s1.as_constant(), temp);
   1.153 +      sll_ptr(temp, s2.as_register(), d.as_register());
   1.154 +      return d;
   1.155 +    } else {
   1.156 +      intptr_t res = s1.as_constant() << s2.as_constant();
   1.157 +      return res;
   1.158 +    }
   1.159    }
   1.160  }
   1.161  
   1.162 @@ -2708,8 +2783,8 @@
   1.163  
   1.164    // Adjust recv_klass by scaled itable_index, so we can free itable_index.
   1.165    RegisterOrConstant itable_offset = itable_index;
   1.166 -  regcon_sll_ptr(itable_offset, exact_log2(itableMethodEntry::size() * wordSize));
   1.167 -  regcon_inc_ptr(itable_offset, itableMethodEntry::method_offset_in_bytes());
   1.168 +  itable_offset = regcon_sll_ptr(itable_index, exact_log2(itableMethodEntry::size() * wordSize), itable_offset);
   1.169 +  itable_offset = regcon_inc_ptr(itable_offset, itableMethodEntry::method_offset_in_bytes(), itable_offset);
   1.170    add(recv_klass, ensure_simm13_or_reg(itable_offset, sethi_temp), recv_klass);
   1.171  
   1.172    // for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) {
   1.173 @@ -2805,7 +2880,7 @@
   1.174  
   1.175    assert_different_registers(sub_klass, super_klass, temp_reg);
   1.176    if (super_check_offset.is_register()) {
   1.177 -    assert_different_registers(sub_klass, super_klass,
   1.178 +    assert_different_registers(sub_klass, super_klass, temp_reg,
   1.179                                 super_check_offset.as_register());
   1.180    } else if (must_load_sco) {
   1.181      assert(temp2_reg != noreg, "supply either a temp or a register offset");
   1.182 @@ -2855,6 +2930,8 @@
   1.183      // The super check offset is always positive...
   1.184      lduw(super_klass, sco_offset, temp2_reg);
   1.185      super_check_offset = RegisterOrConstant(temp2_reg);
   1.186 +    // super_check_offset is register.
   1.187 +    assert_different_registers(sub_klass, super_klass, temp_reg, super_check_offset.as_register());
   1.188    }
   1.189    ld_ptr(sub_klass, super_check_offset, temp_reg);
   1.190    cmp(super_klass, temp_reg);
   1.191 @@ -3014,11 +3091,10 @@
   1.192  }
   1.193  
   1.194  
   1.195 -
   1.196 -
   1.197  void MacroAssembler::check_method_handle_type(Register mtype_reg, Register mh_reg,
   1.198                                                Register temp_reg,
   1.199                                                Label& wrong_method_type) {
   1.200 +  if (UseCompressedOops)  unimplemented("coop");  // field accesses must decode
   1.201    assert_different_registers(mtype_reg, mh_reg, temp_reg);
   1.202    // compare method type against that of the receiver
   1.203    RegisterOrConstant mhtype_offset = delayed_value(java_dyn_MethodHandle::type_offset_in_bytes, temp_reg);
   1.204 @@ -3029,10 +3105,33 @@
   1.205  }
   1.206  
   1.207  
   1.208 -void MacroAssembler::jump_to_method_handle_entry(Register mh_reg, Register temp_reg) {
   1.209 +// A method handle has a "vmslots" field which gives the size of its
   1.210 +// argument list in JVM stack slots.  This field is either located directly
   1.211 +// in every method handle, or else is indirectly accessed through the
   1.212 +// method handle's MethodType.  This macro hides the distinction.
   1.213 +void MacroAssembler::load_method_handle_vmslots(Register vmslots_reg, Register mh_reg,
   1.214 +                                                Register temp_reg) {
   1.215 +  assert_different_registers(vmslots_reg, mh_reg, temp_reg);
   1.216 +  if (UseCompressedOops)  unimplemented("coop");  // field accesses must decode
   1.217 +  // load mh.type.form.vmslots
   1.218 +  if (java_dyn_MethodHandle::vmslots_offset_in_bytes() != 0) {
   1.219 +    // hoist vmslots into every mh to avoid dependent load chain
   1.220 +    ld(    Address(mh_reg,    delayed_value(java_dyn_MethodHandle::vmslots_offset_in_bytes, temp_reg)),   vmslots_reg);
   1.221 +  } else {
   1.222 +    Register temp2_reg = vmslots_reg;
   1.223 +    ld_ptr(Address(mh_reg,    delayed_value(java_dyn_MethodHandle::type_offset_in_bytes, temp_reg)),      temp2_reg);
   1.224 +    ld_ptr(Address(temp2_reg, delayed_value(java_dyn_MethodType::form_offset_in_bytes, temp_reg)),        temp2_reg);
   1.225 +    ld(    Address(temp2_reg, delayed_value(java_dyn_MethodTypeForm::vmslots_offset_in_bytes, temp_reg)), vmslots_reg);
   1.226 +  }
   1.227 +}
   1.228 +
   1.229 +
   1.230 +void MacroAssembler::jump_to_method_handle_entry(Register mh_reg, Register temp_reg, bool emit_delayed_nop) {
   1.231    assert(mh_reg == G3_method_handle, "caller must put MH object in G3");
   1.232    assert_different_registers(mh_reg, temp_reg);
   1.233  
   1.234 +  if (UseCompressedOops)  unimplemented("coop");  // field accesses must decode
   1.235 +
   1.236    // pick out the interpreted side of the handler
   1.237    ld_ptr(mh_reg, delayed_value(java_dyn_MethodHandle::vmentry_offset_in_bytes, temp_reg), temp_reg);
   1.238  
   1.239 @@ -3043,17 +3142,18 @@
   1.240    // for the various stubs which take control at this point,
   1.241    // see MethodHandles::generate_method_handle_stub
   1.242  
   1.243 -  // (Can any caller use this delay slot?  If so, add an option for supression.)
   1.244 -  delayed()->nop();
   1.245 +  // Some callers can fill the delay slot.
   1.246 +  if (emit_delayed_nop) {
   1.247 +    delayed()->nop();
   1.248 +  }
   1.249  }
   1.250  
   1.251 +
   1.252  RegisterOrConstant MacroAssembler::argument_offset(RegisterOrConstant arg_slot,
   1.253                                                     int extra_slot_offset) {
   1.254    // cf. TemplateTable::prepare_invoke(), if (load_receiver).
   1.255 -  int stackElementSize = Interpreter::stackElementWords() * wordSize;
   1.256 -  int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0);
   1.257 -  int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1);
   1.258 -  assert(offset1 - offset == stackElementSize, "correct arithmetic");
   1.259 +  int stackElementSize = Interpreter::stackElementSize();
   1.260 +  int offset = extra_slot_offset * stackElementSize;
   1.261    if (arg_slot.is_constant()) {
   1.262      offset += arg_slot.as_constant() * stackElementSize;
   1.263      return offset;
   1.264 @@ -3067,6 +3167,11 @@
   1.265  }
   1.266  
   1.267  
   1.268 +Address MacroAssembler::argument_address(RegisterOrConstant arg_slot,
   1.269 +                                         int extra_slot_offset) {
   1.270 +  return Address(Gargs, argument_offset(arg_slot, extra_slot_offset));
   1.271 +}
   1.272 +
   1.273  
   1.274  void MacroAssembler::biased_locking_enter(Register obj_reg, Register mark_reg,
   1.275                                            Register temp_reg,
     2.1 --- a/src/cpu/sparc/vm/assembler_sparc.hpp	Thu Apr 29 00:03:40 2010 -0700
     2.2 +++ b/src/cpu/sparc/vm/assembler_sparc.hpp	Thu Apr 29 06:30:25 2010 -0700
     2.3 @@ -1,5 +1,5 @@
     2.4  /*
     2.5 - * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
     2.6 + * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
     2.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     2.8   *
     2.9   * This code is free software; you can redistribute it and/or modify it
    2.10 @@ -1380,24 +1380,25 @@
    2.11  
    2.12    // pp 181
    2.13  
    2.14 -  void and3(     Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(and_op3               ) | rs1(s1) | rs2(s2) ); }
    2.15 -  void and3(     Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(and_op3               ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
    2.16 +  void and3(    Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(and_op3              ) | rs1(s1) | rs2(s2) ); }
    2.17 +  void and3(    Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(and_op3              ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
    2.18    void andcc(   Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(and_op3  | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
    2.19    void andcc(   Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(and_op3  | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
    2.20    void andn(    Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(andn_op3             ) | rs1(s1) | rs2(s2) ); }
    2.21    void andn(    Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(andn_op3             ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
    2.22 +  void andn(    Register s1, RegisterOrConstant s2, Register d);
    2.23    void andncc(  Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(andn_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
    2.24    void andncc(  Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(andn_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
    2.25 -  void or3(      Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(or_op3               ) | rs1(s1) | rs2(s2) ); }
    2.26 -  void or3(      Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(or_op3               ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
    2.27 +  void or3(     Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(or_op3               ) | rs1(s1) | rs2(s2) ); }
    2.28 +  void or3(     Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(or_op3               ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
    2.29    void orcc(    Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(or_op3   | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
    2.30    void orcc(    Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(or_op3   | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
    2.31    void orn(     Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(orn_op3) | rs1(s1) | rs2(s2) ); }
    2.32    void orn(     Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(orn_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
    2.33    void orncc(   Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(orn_op3  | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
    2.34    void orncc(   Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(orn_op3  | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
    2.35 -  void xor3(     Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(xor_op3              ) | rs1(s1) | rs2(s2) ); }
    2.36 -  void xor3(     Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(xor_op3              ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
    2.37 +  void xor3(    Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(xor_op3              ) | rs1(s1) | rs2(s2) ); }
    2.38 +  void xor3(    Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(xor_op3              ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
    2.39    void xorcc(   Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(xor_op3  | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
    2.40    void xorcc(   Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(xor_op3  | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
    2.41    void xnor(    Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(xnor_op3             ) | rs1(s1) | rs2(s2) ); }
    2.42 @@ -2026,8 +2027,8 @@
    2.43    inline void st_ptr(Register d, Register s1, ByteSize simm13a);
    2.44  #endif
    2.45  
    2.46 -  // ld_long will perform ld for 32 bit VM's and ldx for 64 bit VM's
    2.47 -  // st_long will perform st for 32 bit VM's and stx for 64 bit VM's
    2.48 +  // ld_long will perform ldd for 32 bit VM's and ldx for 64 bit VM's
    2.49 +  // st_long will perform std for 32 bit VM's and stx for 64 bit VM's
    2.50    inline void ld_long(Register s1, Register s2, Register d);
    2.51    inline void ld_long(Register s1, int simm13a, Register d);
    2.52    inline void ld_long(Register s1, RegisterOrConstant s2, Register d);
    2.53 @@ -2038,23 +2039,19 @@
    2.54    inline void st_long(Register d, const Address& a, int offset = 0);
    2.55  
    2.56    // Helpers for address formation.
    2.57 -  // They update the dest in place, whether it is a register or constant.
    2.58 -  // They emit no code at all if src is a constant zero.
    2.59 -  // If dest is a constant and src is a register, the temp argument
    2.60 -  // is required, and becomes the result.
    2.61 -  // If dest is a register and src is a non-simm13 constant,
    2.62 -  // the temp argument is required, and is used to materialize the constant.
    2.63 -  void regcon_inc_ptr( RegisterOrConstant& dest, RegisterOrConstant src,
    2.64 -                       Register temp = noreg );
    2.65 -  void regcon_sll_ptr( RegisterOrConstant& dest, RegisterOrConstant src,
    2.66 -                       Register temp = noreg );
    2.67 -
    2.68 -  RegisterOrConstant ensure_simm13_or_reg(RegisterOrConstant roc, Register Rtemp) {
    2.69 -    guarantee(Rtemp != noreg, "constant offset overflow");
    2.70 -    if (is_simm13(roc.constant_or_zero()))
    2.71 -      return roc;               // register or short constant
    2.72 -    set(roc.as_constant(), Rtemp);
    2.73 -    return RegisterOrConstant(Rtemp);
    2.74 +  // - They emit only a move if s2 is a constant zero.
    2.75 +  // - If dest is a constant and either s1 or s2 is a register, the temp argument is required and becomes the result.
    2.76 +  // - If dest is a register and either s1 or s2 is a non-simm13 constant, the temp argument is required and used to materialize the constant.
    2.77 +  RegisterOrConstant regcon_andn_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp = noreg);
    2.78 +  RegisterOrConstant regcon_inc_ptr( RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp = noreg);
    2.79 +  RegisterOrConstant regcon_sll_ptr( RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp = noreg);
    2.80 +
    2.81 +  RegisterOrConstant ensure_simm13_or_reg(RegisterOrConstant src, Register temp) {
    2.82 +    if (is_simm13(src.constant_or_zero()))
    2.83 +      return src;               // register or short constant
    2.84 +    guarantee(temp != noreg, "constant offset overflow");
    2.85 +    set(src.as_constant(), temp);
    2.86 +    return temp;
    2.87    }
    2.88  
    2.89    // --------------------------------------------------
    2.90 @@ -2303,6 +2300,9 @@
    2.91    void lcmp( Register Ra, Register Rb, Register Rresult);
    2.92  #endif
    2.93  
    2.94 +  // Loading values by size and signed-ness
    2.95 +  void load_sized_value(Address src, Register dst, size_t size_in_bytes, bool is_signed);
    2.96 +
    2.97    void float_cmp( bool is_float, int unordered_result,
    2.98                    FloatRegister Fa, FloatRegister Fb,
    2.99                    Register Rresult);
   2.100 @@ -2421,12 +2421,16 @@
   2.101    void check_method_handle_type(Register mtype_reg, Register mh_reg,
   2.102                                  Register temp_reg,
   2.103                                  Label& wrong_method_type);
   2.104 -  void jump_to_method_handle_entry(Register mh_reg, Register temp_reg);
   2.105 +  void load_method_handle_vmslots(Register vmslots_reg, Register mh_reg,
   2.106 +                                  Register temp_reg);
   2.107 +  void jump_to_method_handle_entry(Register mh_reg, Register temp_reg, bool emit_delayed_nop = true);
   2.108    // offset relative to Gargs of argument at tos[arg_slot].
   2.109    // (arg_slot == 0 means the last argument, not the first).
   2.110    RegisterOrConstant argument_offset(RegisterOrConstant arg_slot,
   2.111                                       int extra_slot_offset = 0);
   2.112 -
   2.113 +  // Address of Gargs and argument_offset.
   2.114 +  Address            argument_address(RegisterOrConstant arg_slot,
   2.115 +                                      int extra_slot_offset = 0);
   2.116  
   2.117    // Stack overflow checking
   2.118  
     3.1 --- a/src/cpu/sparc/vm/assembler_sparc.inline.hpp	Thu Apr 29 00:03:40 2010 -0700
     3.2 +++ b/src/cpu/sparc/vm/assembler_sparc.inline.hpp	Thu Apr 29 06:30:25 2010 -0700
     3.3 @@ -206,12 +206,17 @@
     3.4  inline void Assembler::ldd( Register s1, RegisterOrConstant s2, Register d) { ldd( Address(s1, s2), d); }
     3.5  
     3.6  // form effective addresses this way:
     3.7 -inline void Assembler::add(   Register s1, RegisterOrConstant s2, Register d, int offset) {
     3.8 -  if (s2.is_register())  add(s1, s2.as_register(), d);
     3.9 +inline void Assembler::add(Register s1, RegisterOrConstant s2, Register d, int offset) {
    3.10 +  if (s2.is_register())  add(s1, s2.as_register(),          d);
    3.11    else                 { add(s1, s2.as_constant() + offset, d); offset = 0; }
    3.12    if (offset != 0)       add(d,  offset,                    d);
    3.13  }
    3.14  
    3.15 +inline void Assembler::andn(Register s1, RegisterOrConstant s2, Register d) {
    3.16 +  if (s2.is_register())  andn(s1, s2.as_register(), d);
    3.17 +  else                   andn(s1, s2.as_constant(), d);
    3.18 +}
    3.19 +
    3.20  inline void Assembler::ldstub(  Register s1, Register s2, Register d) { emit_long( op(ldst_op) | rd(d) | op3(ldstub_op3) | rs1(s1) | rs2(s2) ); }
    3.21  inline void Assembler::ldstub(  Register s1, int simm13a, Register d) { emit_data( op(ldst_op) | rd(d) | op3(ldstub_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
    3.22  
     4.1 --- a/src/cpu/sparc/vm/interp_masm_sparc.cpp	Thu Apr 29 00:03:40 2010 -0700
     4.2 +++ b/src/cpu/sparc/vm/interp_masm_sparc.cpp	Thu Apr 29 06:30:25 2010 -0700
     4.3 @@ -814,22 +814,39 @@
     4.4  }
     4.5  
     4.6  
     4.7 -void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, Register tmp, int bcp_offset) {
     4.8 +void InterpreterMacroAssembler::get_cache_index_at_bcp(Register cache, Register tmp,
     4.9 +                                                       int bcp_offset, bool giant_index) {
    4.10 +  assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
    4.11 +  if (!giant_index) {
    4.12 +    get_2_byte_integer_at_bcp(bcp_offset, cache, tmp, Unsigned);
    4.13 +  } else {
    4.14 +    assert(EnableInvokeDynamic, "giant index used only for EnableInvokeDynamic");
    4.15 +    get_4_byte_integer_at_bcp(bcp_offset, cache, tmp);
    4.16 +    assert(constantPoolCacheOopDesc::decode_secondary_index(~123) == 123, "else change next line");
    4.17 +    xor3(tmp, -1, tmp);  // convert to plain index
    4.18 +  }
    4.19 +}
    4.20 +
    4.21 +
    4.22 +void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, Register tmp,
    4.23 +                                                           int bcp_offset, bool giant_index) {
    4.24    assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
    4.25    assert_different_registers(cache, tmp);
    4.26    assert_not_delayed();
    4.27 -  get_2_byte_integer_at_bcp(bcp_offset, cache, tmp, Unsigned);
    4.28 -              // convert from field index to ConstantPoolCacheEntry index
    4.29 -              // and from word index to byte offset
    4.30 +  get_cache_index_at_bcp(cache, tmp, bcp_offset, giant_index);
    4.31 +  // convert from field index to ConstantPoolCacheEntry index and from
    4.32 +  // word index to byte offset
    4.33    sll(tmp, exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord), tmp);
    4.34    add(LcpoolCache, tmp, cache);
    4.35  }
    4.36  
    4.37  
    4.38 -void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset) {
    4.39 +void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, Register tmp,
    4.40 +                                                               int bcp_offset, bool giant_index) {
    4.41    assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
    4.42    assert_different_registers(cache, tmp);
    4.43    assert_not_delayed();
    4.44 +  assert(!giant_index,"NYI");
    4.45    get_2_byte_integer_at_bcp(bcp_offset, cache, tmp, Unsigned);
    4.46                // convert from field index to ConstantPoolCacheEntry index
    4.47                // and from word index to byte offset
    4.48 @@ -1675,15 +1692,31 @@
    4.49  // Count a virtual call in the bytecodes.
    4.50  
    4.51  void InterpreterMacroAssembler::profile_virtual_call(Register receiver,
    4.52 -                                                     Register scratch) {
    4.53 +                                                     Register scratch,
    4.54 +                                                     bool receiver_can_be_null) {
    4.55    if (ProfileInterpreter) {
    4.56      Label profile_continue;
    4.57  
    4.58      // If no method data exists, go to profile_continue.
    4.59      test_method_data_pointer(profile_continue);
    4.60  
    4.61 +
    4.62 +    Label skip_receiver_profile;
    4.63 +    if (receiver_can_be_null) {
    4.64 +      Label not_null;
    4.65 +      tst(receiver);
    4.66 +      brx(Assembler::notZero, false, Assembler::pt, not_null);
    4.67 +      delayed()->nop();
    4.68 +      // We are making a call.  Increment the count for null receiver.
    4.69 +      increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);
    4.70 +      ba(false, skip_receiver_profile);
    4.71 +      delayed()->nop();
    4.72 +      bind(not_null);
    4.73 +    }
    4.74 +
    4.75      // Record the receiver type.
    4.76      record_klass_in_profile(receiver, scratch, true);
    4.77 +    bind(skip_receiver_profile);
    4.78  
    4.79      // The method data pointer needs to be updated to reflect the new target.
    4.80      update_mdp_by_constant(in_bytes(VirtualCallData::virtual_call_data_size()));
     5.1 --- a/src/cpu/sparc/vm/interp_masm_sparc.hpp	Thu Apr 29 00:03:40 2010 -0700
     5.2 +++ b/src/cpu/sparc/vm/interp_masm_sparc.hpp	Thu Apr 29 06:30:25 2010 -0700
     5.3 @@ -191,8 +191,9 @@
     5.4                                    Register   Rdst,
     5.5                                    setCCOrNot should_set_CC = dont_set_CC );
     5.6  
     5.7 -  void get_cache_and_index_at_bcp(Register cache, Register tmp, int bcp_offset);
     5.8 -  void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset);
     5.9 +  void get_cache_and_index_at_bcp(Register cache, Register tmp, int bcp_offset, bool giant_index = false);
    5.10 +  void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset, bool giant_index = false);
    5.11 +  void get_cache_index_at_bcp(Register cache, Register tmp, int bcp_offset, bool giant_index = false);
    5.12  
    5.13  
    5.14    // common code
    5.15 @@ -304,7 +305,7 @@
    5.16    void profile_not_taken_branch(Register scratch);
    5.17    void profile_call(Register scratch);
    5.18    void profile_final_call(Register scratch);
    5.19 -  void profile_virtual_call(Register receiver, Register scratch);
    5.20 +  void profile_virtual_call(Register receiver, Register scratch, bool receiver_can_be_null = false);
    5.21    void profile_ret(TosState state, Register return_bci, Register scratch);
    5.22    void profile_null_seen(Register scratch);
    5.23    void profile_typecheck(Register klass, Register scratch);
     6.1 --- a/src/cpu/sparc/vm/interpreter_sparc.cpp	Thu Apr 29 00:03:40 2010 -0700
     6.2 +++ b/src/cpu/sparc/vm/interpreter_sparc.cpp	Thu Apr 29 06:30:25 2010 -0700
     6.3 @@ -235,19 +235,17 @@
     6.4  }
     6.5  
     6.6  
     6.7 -
     6.8  // Method handle invoker
     6.9  // Dispatch a method of the form java.dyn.MethodHandles::invoke(...)
    6.10  address InterpreterGenerator::generate_method_handle_entry(void) {
    6.11    if (!EnableMethodHandles) {
    6.12      return generate_abstract_entry();
    6.13    }
    6.14 -  return generate_abstract_entry(); //6815692//
    6.15 +
    6.16 +  return MethodHandles::generate_method_handle_interpreter_entry(_masm);
    6.17  }
    6.18  
    6.19  
    6.20 -
    6.21 -
    6.22  //----------------------------------------------------------------------------------------------------
    6.23  // Entry points & stack frame layout
    6.24  //
     7.1 --- a/src/cpu/sparc/vm/methodHandles_sparc.cpp	Thu Apr 29 00:03:40 2010 -0700
     7.2 +++ b/src/cpu/sparc/vm/methodHandles_sparc.cpp	Thu Apr 29 06:30:25 2010 -0700
     7.3 @@ -1,5 +1,5 @@
     7.4  /*
     7.5 - * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
     7.6 + * Copyright 2008-2010 Sun Microsystems, Inc.  All Rights Reserved.
     7.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     7.8   *
     7.9   * This code is free software; you can redistribute it and/or modify it
    7.10 @@ -29,6 +29,9 @@
    7.11  
    7.12  address MethodHandleEntry::start_compiled_entry(MacroAssembler* _masm,
    7.13                                                  address interpreted_entry) {
    7.14 +  // Just before the actual machine code entry point, allocate space
    7.15 +  // for a MethodHandleEntry::Data record, so that we can manage everything
    7.16 +  // from one base pointer.
    7.17    __ align(wordSize);
    7.18    address target = __ pc() + sizeof(Data);
    7.19    while (__ pc() < target) {
    7.20 @@ -59,12 +62,876 @@
    7.21  
    7.22  // Code generation
    7.23  address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm) {
    7.24 -  ShouldNotReachHere(); //NYI, 6815692
    7.25 -  return NULL;
    7.26 +  // I5_savedSP: sender SP (must preserve)
    7.27 +  // G4 (Gargs): incoming argument list (must preserve)
    7.28 +  // G5_method:  invoke methodOop; becomes method type.
    7.29 +  // G3_method_handle: receiver method handle (must load from sp[MethodTypeForm.vmslots])
    7.30 +  // O0, O1: garbage temps, blown away
    7.31 +  Register O0_argslot = O0;
    7.32 +  Register O1_scratch = O1;
    7.33 +
    7.34 +  // emit WrongMethodType path first, to enable back-branch from main path
    7.35 +  Label wrong_method_type;
    7.36 +  __ bind(wrong_method_type);
    7.37 +  __ jump_to(AddressLiteral(Interpreter::throw_WrongMethodType_entry()), O1_scratch);
    7.38 +  __ delayed()->nop();
    7.39 +
    7.40 +  // here's where control starts out:
    7.41 +  __ align(CodeEntryAlignment);
    7.42 +  address entry_point = __ pc();
    7.43 +
    7.44 +  // fetch the MethodType from the method handle into G5_method_type
    7.45 +  {
    7.46 +    Register tem = G5_method;
    7.47 +    assert(tem == G5_method_type, "yes, it's the same register");
    7.48 +    for (jint* pchase = methodOopDesc::method_type_offsets_chain(); (*pchase) != -1; pchase++) {
    7.49 +      __ ld_ptr(Address(tem, *pchase), G5_method_type);
    7.50 +    }
    7.51 +  }
    7.52 +
    7.53 +  // given the MethodType, find out where the MH argument is buried
    7.54 +  __ ld_ptr(Address(G5_method_type, __ delayed_value(java_dyn_MethodType::form_offset_in_bytes, O1_scratch)),        O0_argslot);
    7.55 +  __ ldsw(  Address(O0_argslot,     __ delayed_value(java_dyn_MethodTypeForm::vmslots_offset_in_bytes, O1_scratch)), O0_argslot);
    7.56 +  __ ld_ptr(__ argument_address(O0_argslot), G3_method_handle);
    7.57 +
    7.58 +  __ check_method_handle_type(G5_method_type, G3_method_handle, O1_scratch, wrong_method_type);
    7.59 +  __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
    7.60 +
    7.61 +  return entry_point;
    7.62  }
    7.63  
    7.64 +
    7.65 +#ifdef ASSERT
    7.66 +static void verify_argslot(MacroAssembler* _masm, Register argslot_reg, Register temp_reg, const char* error_message) {
    7.67 +  // Verify that argslot lies within (Gargs, FP].
    7.68 +  Label L_ok, L_bad;
    7.69 +#ifdef _LP64
    7.70 +  __ add(FP, STACK_BIAS, temp_reg);
    7.71 +  __ cmp(argslot_reg, temp_reg);
    7.72 +#else
    7.73 +  __ cmp(argslot_reg, FP);
    7.74 +#endif
    7.75 +  __ brx(Assembler::greaterUnsigned, false, Assembler::pn, L_bad);
    7.76 +  __ delayed()->nop();
    7.77 +  __ cmp(Gargs, argslot_reg);
    7.78 +  __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, L_ok);
    7.79 +  __ delayed()->nop();
    7.80 +  __ bind(L_bad);
    7.81 +  __ stop(error_message);
    7.82 +  __ bind(L_ok);
    7.83 +}
    7.84 +#endif
    7.85 +
    7.86 +
    7.87 +// Helper to insert argument slots into the stack.
    7.88 +// arg_slots must be a multiple of stack_move_unit() and <= 0
    7.89 +void MethodHandles::insert_arg_slots(MacroAssembler* _masm,
    7.90 +                                     RegisterOrConstant arg_slots,
    7.91 +                                     int arg_mask,
    7.92 +                                     Register argslot_reg,
    7.93 +                                     Register temp_reg, Register temp2_reg, Register temp3_reg) {
    7.94 +  assert(temp3_reg != noreg, "temp3 required");
    7.95 +  assert_different_registers(argslot_reg, temp_reg, temp2_reg, temp3_reg,
    7.96 +                             (!arg_slots.is_register() ? Gargs : arg_slots.as_register()));
    7.97 +
    7.98 +#ifdef ASSERT
    7.99 +  verify_argslot(_masm, argslot_reg, temp_reg, "insertion point must fall within current frame");
   7.100 +  if (arg_slots.is_register()) {
   7.101 +    Label L_ok, L_bad;
   7.102 +    __ cmp(arg_slots.as_register(), (int32_t) NULL_WORD);
   7.103 +    __ br(Assembler::greater, false, Assembler::pn, L_bad);
   7.104 +    __ delayed()->nop();
   7.105 +    __ btst(-stack_move_unit() - 1, arg_slots.as_register());
   7.106 +    __ br(Assembler::zero, false, Assembler::pt, L_ok);
   7.107 +    __ delayed()->nop();
   7.108 +    __ bind(L_bad);
   7.109 +    __ stop("assert arg_slots <= 0 and clear low bits");
   7.110 +    __ bind(L_ok);
   7.111 +  } else {
   7.112 +    assert(arg_slots.as_constant() <= 0, "");
   7.113 +    assert(arg_slots.as_constant() % -stack_move_unit() == 0, "");
   7.114 +  }
   7.115 +#endif // ASSERT
   7.116 +
   7.117 +#ifdef _LP64
   7.118 +  if (arg_slots.is_register()) {
   7.119 +    // Was arg_slots register loaded as signed int?
   7.120 +    Label L_ok;
   7.121 +    __ sll(arg_slots.as_register(), BitsPerInt, temp_reg);
   7.122 +    __ sra(temp_reg, BitsPerInt, temp_reg);
   7.123 +    __ cmp(arg_slots.as_register(), temp_reg);
   7.124 +    __ br(Assembler::equal, false, Assembler::pt, L_ok);
   7.125 +    __ delayed()->nop();
   7.126 +    __ stop("arg_slots register not loaded as signed int");
   7.127 +    __ bind(L_ok);
   7.128 +  }
   7.129 +#endif
   7.130 +
   7.131 +  // Make space on the stack for the inserted argument(s).
   7.132 +  // Then pull down everything shallower than argslot_reg.
   7.133 +  // The stacked return address gets pulled down with everything else.
   7.134 +  // That is, copy [sp, argslot) downward by -size words.  In pseudo-code:
   7.135 +  //   sp -= size;
   7.136 +  //   for (temp = sp + size; temp < argslot; temp++)
   7.137 +  //     temp[-size] = temp[0]
   7.138 +  //   argslot -= size;
   7.139 +  RegisterOrConstant offset = __ regcon_sll_ptr(arg_slots, LogBytesPerWord, temp3_reg);
   7.140 +
   7.141 +  // Keep the stack pointer 2*wordSize aligned.
   7.142 +  const int TwoWordAlignmentMask = right_n_bits(LogBytesPerWord + 1);
   7.143 +  RegisterOrConstant masked_offset = __ regcon_andn_ptr(offset, TwoWordAlignmentMask, temp_reg);
   7.144 +  __ add(SP, masked_offset, SP);
   7.145 +
   7.146 +  __ mov(Gargs, temp_reg);  // source pointer for copy
   7.147 +  __ add(Gargs, offset, Gargs);
   7.148 +
   7.149 +  {
   7.150 +    Label loop;
   7.151 +    __ bind(loop);
   7.152 +    // pull one word down each time through the loop
   7.153 +    __ ld_ptr(Address(temp_reg, 0), temp2_reg);
   7.154 +    __ st_ptr(temp2_reg, Address(temp_reg, offset));
   7.155 +    __ add(temp_reg, wordSize, temp_reg);
   7.156 +    __ cmp(temp_reg, argslot_reg);
   7.157 +    __ brx(Assembler::less, false, Assembler::pt, loop);
   7.158 +    __ delayed()->nop();  // FILLME
   7.159 +  }
   7.160 +
   7.161 +  // Now move the argslot down, to point to the opened-up space.
   7.162 +  __ add(argslot_reg, offset, argslot_reg);
   7.163 +}
   7.164 +
   7.165 +
   7.166 +// Helper to remove argument slots from the stack.
   7.167 +// arg_slots must be a multiple of stack_move_unit() and >= 0
   7.168 +void MethodHandles::remove_arg_slots(MacroAssembler* _masm,
   7.169 +                                     RegisterOrConstant arg_slots,
   7.170 +                                     Register argslot_reg,
   7.171 +                                     Register temp_reg, Register temp2_reg, Register temp3_reg) {
   7.172 +  assert(temp3_reg != noreg, "temp3 required");
   7.173 +  assert_different_registers(argslot_reg, temp_reg, temp2_reg, temp3_reg,
   7.174 +                             (!arg_slots.is_register() ? Gargs : arg_slots.as_register()));
   7.175 +
   7.176 +  RegisterOrConstant offset = __ regcon_sll_ptr(arg_slots, LogBytesPerWord, temp3_reg);
   7.177 +
   7.178 +#ifdef ASSERT
   7.179 +  // Verify that [argslot..argslot+size) lies within (Gargs, FP).
   7.180 +  __ add(argslot_reg, offset, temp2_reg);
   7.181 +  verify_argslot(_masm, temp2_reg, temp_reg, "deleted argument(s) must fall within current frame");
   7.182 +  if (arg_slots.is_register()) {
   7.183 +    Label L_ok, L_bad;
   7.184 +    __ cmp(arg_slots.as_register(), (int32_t) NULL_WORD);
   7.185 +    __ br(Assembler::less, false, Assembler::pn, L_bad);
   7.186 +    __ delayed()->nop();
   7.187 +    __ btst(-stack_move_unit() - 1, arg_slots.as_register());
   7.188 +    __ br(Assembler::zero, false, Assembler::pt, L_ok);
   7.189 +    __ delayed()->nop();
   7.190 +    __ bind(L_bad);
   7.191 +    __ stop("assert arg_slots >= 0 and clear low bits");
   7.192 +    __ bind(L_ok);
   7.193 +  } else {
   7.194 +    assert(arg_slots.as_constant() >= 0, "");
   7.195 +    assert(arg_slots.as_constant() % -stack_move_unit() == 0, "");
   7.196 +  }
   7.197 +#endif // ASSERT
   7.198 +
   7.199 +  // Pull up everything shallower than argslot.
   7.200 +  // Then remove the excess space on the stack.
   7.201 +  // The stacked return address gets pulled up with everything else.
   7.202 +  // That is, copy [sp, argslot) upward by size words.  In pseudo-code:
   7.203 +  //   for (temp = argslot-1; temp >= sp; --temp)
   7.204 +  //     temp[size] = temp[0]
   7.205 +  //   argslot += size;
   7.206 +  //   sp += size;
   7.207 +  __ sub(argslot_reg, wordSize, temp_reg);  // source pointer for copy
   7.208 +  {
   7.209 +    Label loop;
   7.210 +    __ bind(loop);
   7.211 +    // pull one word up each time through the loop
   7.212 +    __ ld_ptr(Address(temp_reg, 0), temp2_reg);
   7.213 +    __ st_ptr(temp2_reg, Address(temp_reg, offset));
   7.214 +    __ sub(temp_reg, wordSize, temp_reg);
   7.215 +    __ cmp(temp_reg, Gargs);
   7.216 +    __ brx(Assembler::greaterEqual, false, Assembler::pt, loop);
   7.217 +    __ delayed()->nop();  // FILLME
   7.218 +  }
   7.219 +
   7.220 +  // Now move the argslot up, to point to the just-copied block.
   7.221 +  __ add(Gargs, offset, Gargs);
   7.222 +  // And adjust the argslot address to point at the deletion point.
   7.223 +  __ add(argslot_reg, offset, argslot_reg);
   7.224 +
   7.225 +  // Keep the stack pointer 2*wordSize aligned.
   7.226 +  const int TwoWordAlignmentMask = right_n_bits(LogBytesPerWord + 1);
   7.227 +  RegisterOrConstant masked_offset = __ regcon_andn_ptr(offset, TwoWordAlignmentMask, temp_reg);
   7.228 +  __ add(SP, masked_offset, SP);
   7.229 +}
   7.230 +
   7.231 +
   7.232 +#ifndef PRODUCT
   7.233 +extern "C" void print_method_handle(oop mh);
   7.234 +void trace_method_handle_stub(const char* adaptername,
   7.235 +                              oop mh) {
   7.236 +#if 0
   7.237 +                              intptr_t* entry_sp,
   7.238 +                              intptr_t* saved_sp,
   7.239 +                              intptr_t* saved_bp) {
   7.240 +  // called as a leaf from native code: do not block the JVM!
   7.241 +  intptr_t* last_sp = (intptr_t*) saved_bp[frame::interpreter_frame_last_sp_offset];
   7.242 +  intptr_t* base_sp = (intptr_t*) saved_bp[frame::interpreter_frame_monitor_block_top_offset];
   7.243 +  printf("MH %s mh="INTPTR_FORMAT" sp=("INTPTR_FORMAT"+"INTX_FORMAT") stack_size="INTX_FORMAT" bp="INTPTR_FORMAT"\n",
   7.244 +         adaptername, (intptr_t)mh, (intptr_t)entry_sp, (intptr_t)(saved_sp - entry_sp), (intptr_t)(base_sp - last_sp), (intptr_t)saved_bp);
   7.245 +  if (last_sp != saved_sp)
   7.246 +    printf("*** last_sp="INTPTR_FORMAT"\n", (intptr_t)last_sp);
   7.247 +#endif
   7.248 +
   7.249 +  printf("MH %s mh="INTPTR_FORMAT"\n", adaptername, (intptr_t) mh);
   7.250 +  print_method_handle(mh);
   7.251 +}
   7.252 +#endif // PRODUCT
   7.253 +
   7.254 +
   7.255 +//------------------------------------------------------------------------------
   7.256 +// MethodHandles::generate_method_handle_stub
   7.257 +//
   7.258  // Generate an "entry" field for a method handle.
   7.259  // This determines how the method handle will respond to calls.
   7.260  void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHandles::EntryKind ek) {
   7.261 -  ShouldNotReachHere(); //NYI, 6815692
   7.262 +  // Here is the register state during an interpreted call,
   7.263 +  // as set up by generate_method_handle_interpreter_entry():
   7.264 +  // - G5: garbage temp (was MethodHandle.invoke methodOop, unused)
   7.265 +  // - G3: receiver method handle
   7.266 +  // - O5_savedSP: sender SP (must preserve)
   7.267 +
   7.268 +  Register O0_argslot = O0;
   7.269 +  Register O1_scratch = O1;
   7.270 +  Register O2_scratch = O2;
   7.271 +  Register O3_scratch = O3;
   7.272 +  Register G5_index   = G5;
   7.273 +
   7.274 +  guarantee(java_dyn_MethodHandle::vmentry_offset_in_bytes() != 0, "must have offsets");
   7.275 +
   7.276 +  // Some handy addresses:
   7.277 +  Address G5_method_fie(    G5_method,        in_bytes(methodOopDesc::from_interpreted_offset()));
   7.278 +
   7.279 +  Address G3_mh_vmtarget(   G3_method_handle, java_dyn_MethodHandle::vmtarget_offset_in_bytes());
   7.280 +
   7.281 +  Address G3_dmh_vmindex(   G3_method_handle, sun_dyn_DirectMethodHandle::vmindex_offset_in_bytes());
   7.282 +
   7.283 +  Address G3_bmh_vmargslot( G3_method_handle, sun_dyn_BoundMethodHandle::vmargslot_offset_in_bytes());
   7.284 +  Address G3_bmh_argument(  G3_method_handle, sun_dyn_BoundMethodHandle::argument_offset_in_bytes());
   7.285 +
   7.286 +  Address G3_amh_vmargslot( G3_method_handle, sun_dyn_AdapterMethodHandle::vmargslot_offset_in_bytes());
   7.287 +  Address G3_amh_argument ( G3_method_handle, sun_dyn_AdapterMethodHandle::argument_offset_in_bytes());
   7.288 +  Address G3_amh_conversion(G3_method_handle, sun_dyn_AdapterMethodHandle::conversion_offset_in_bytes());
   7.289 +
   7.290 +  const int java_mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();
   7.291 +
   7.292 +  if (have_entry(ek)) {
   7.293 +    __ nop();  // empty stubs make SG sick
   7.294 +    return;
   7.295 +  }
   7.296 +
   7.297 +  address interp_entry = __ pc();
   7.298 +  if (UseCompressedOops)  __ unimplemented("UseCompressedOops");
   7.299 +
   7.300 +#ifndef PRODUCT
   7.301 +  if (TraceMethodHandles) {
   7.302 +    // save: Gargs, O5_savedSP
   7.303 +    __ save(SP, -16*wordSize, SP);
   7.304 +    __ set((intptr_t) entry_name(ek), O0);
   7.305 +    __ mov(G3_method_handle, O1);
   7.306 +    __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, trace_method_handle_stub));
   7.307 +    __ restore(SP, 16*wordSize, SP);
   7.308 +  }
   7.309 +#endif // PRODUCT
   7.310 +
   7.311 +  switch ((int) ek) {
   7.312 +  case _raise_exception:
   7.313 +    {
   7.314 +      // Not a real MH entry, but rather shared code for raising an
   7.315 +      // exception.  Extra local arguments are passed in scratch
   7.316 +      // registers, as required type in O3, failing object (or NULL)
   7.317 +      // in O2, failing bytecode type in O1.
   7.318 +
   7.319 +      __ mov(O5_savedSP, SP);  // Cut the stack back to where the caller started.
   7.320 +
   7.321 +      // Push arguments as if coming from the interpreter.
   7.322 +      Register O0_scratch = O0_argslot;
   7.323 +      int stackElementSize = Interpreter::stackElementSize();
   7.324 +
   7.325 +      // Make space on the stack for the arguments.
   7.326 +      __ sub(SP,    4*stackElementSize, SP);
   7.327 +      __ sub(Gargs, 3*stackElementSize, Gargs);
   7.328 +      //__ sub(Lesp,  3*stackElementSize, Lesp);
   7.329 +
   7.330 +      // void raiseException(int code, Object actual, Object required)
   7.331 +      __ st(    O1_scratch, Address(Gargs, 2*stackElementSize));  // code
   7.332 +      __ st_ptr(O2_scratch, Address(Gargs, 1*stackElementSize));  // actual
   7.333 +      __ st_ptr(O3_scratch, Address(Gargs, 0*stackElementSize));  // required
   7.334 +
   7.335 +      Label no_method;
   7.336 +      // FIXME: fill in _raise_exception_method with a suitable sun.dyn method
   7.337 +      __ set(AddressLiteral((address) &_raise_exception_method), G5_method);
   7.338 +      __ ld_ptr(Address(G5_method, 0), G5_method);
   7.339 +      __ tst(G5_method);
   7.340 +      __ brx(Assembler::zero, false, Assembler::pn, no_method);
   7.341 +      __ delayed()->nop();
   7.342 +
   7.343 +      int jobject_oop_offset = 0;
   7.344 +      __ ld_ptr(Address(G5_method, jobject_oop_offset), G5_method);
   7.345 +      __ tst(G5_method);
   7.346 +      __ brx(Assembler::zero, false, Assembler::pn, no_method);
   7.347 +      __ delayed()->nop();
   7.348 +
   7.349 +      __ verify_oop(G5_method);
   7.350 +      __ jump_indirect_to(G5_method_fie, O1_scratch);
   7.351 +      __ delayed()->nop();
   7.352 +
   7.353 +      // If we get here, the Java runtime did not do its job of creating the exception.
   7.354 +      // Do something that is at least causes a valid throw from the interpreter.
   7.355 +      __ bind(no_method);
   7.356 +      __ unimplemented("_raise_exception no method");
   7.357 +    }
   7.358 +    break;
   7.359 +
   7.360 +  case _invokestatic_mh:
   7.361 +  case _invokespecial_mh:
   7.362 +    {
   7.363 +      __ ld_ptr(G3_mh_vmtarget, G5_method);  // target is a methodOop
   7.364 +      __ verify_oop(G5_method);
   7.365 +      // Same as TemplateTable::invokestatic or invokespecial,
   7.366 +      // minus the CP setup and profiling:
   7.367 +      if (ek == _invokespecial_mh) {
   7.368 +        // Must load & check the first argument before entering the target method.
   7.369 +        __ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch);
   7.370 +        __ ld_ptr(__ argument_address(O0_argslot), G3_method_handle);
   7.371 +        __ null_check(G3_method_handle);
   7.372 +        __ verify_oop(G3_method_handle);
   7.373 +      }
   7.374 +      __ jump_indirect_to(G5_method_fie, O1_scratch);
   7.375 +      __ delayed()->nop();
   7.376 +    }
   7.377 +    break;
   7.378 +
   7.379 +  case _invokevirtual_mh:
   7.380 +    {
   7.381 +      // Same as TemplateTable::invokevirtual,
   7.382 +      // minus the CP setup and profiling:
   7.383 +
   7.384 +      // Pick out the vtable index and receiver offset from the MH,
   7.385 +      // and then we can discard it:
   7.386 +      __ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch);
   7.387 +      __ ldsw(G3_dmh_vmindex, G5_index);
   7.388 +      // Note:  The verifier allows us to ignore G3_mh_vmtarget.
   7.389 +      __ ld_ptr(__ argument_address(O0_argslot, -1), G3_method_handle);
   7.390 +      __ null_check(G3_method_handle, oopDesc::klass_offset_in_bytes());
   7.391 +
   7.392 +      // Get receiver klass:
   7.393 +      Register O0_klass = O0_argslot;
   7.394 +      __ load_klass(G3_method_handle, O0_klass);
   7.395 +      __ verify_oop(O0_klass);
   7.396 +
   7.397 +      // Get target methodOop & entry point:
   7.398 +      const int base = instanceKlass::vtable_start_offset() * wordSize;
   7.399 +      assert(vtableEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
   7.400 +
   7.401 +      __ sll_ptr(G5_index, LogBytesPerWord, G5_index);
   7.402 +      __ add(O0_klass, G5_index, O0_klass);
   7.403 +      Address vtable_entry_addr(O0_klass, base + vtableEntry::method_offset_in_bytes());
   7.404 +      __ ld_ptr(vtable_entry_addr, G5_method);
   7.405 +
   7.406 +      __ verify_oop(G5_method);
   7.407 +      __ jump_indirect_to(G5_method_fie, O1_scratch);
   7.408 +      __ delayed()->nop();
   7.409 +    }
   7.410 +    break;
   7.411 +
   7.412 +  case _invokeinterface_mh:
   7.413 +    {
   7.414 +      // Same as TemplateTable::invokeinterface,
   7.415 +      // minus the CP setup and profiling:
   7.416 +      __ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch);
   7.417 +      Register O1_intf  = O1_scratch;
   7.418 +      __ ld_ptr(G3_mh_vmtarget, O1_intf);
   7.419 +      __ ldsw(G3_dmh_vmindex, G5_index);
   7.420 +      __ ld_ptr(__ argument_address(O0_argslot, -1), G3_method_handle);
   7.421 +      __ null_check(G3_method_handle, oopDesc::klass_offset_in_bytes());
   7.422 +
   7.423 +      // Get receiver klass:
   7.424 +      Register O0_klass = O0_argslot;
   7.425 +      __ load_klass(G3_method_handle, O0_klass);
   7.426 +      __ verify_oop(O0_klass);
   7.427 +
   7.428 +      // Get interface:
   7.429 +      Label no_such_interface;
   7.430 +      __ verify_oop(O1_intf);
   7.431 +      __ lookup_interface_method(O0_klass, O1_intf,
   7.432 +                                 // Note: next two args must be the same:
   7.433 +                                 G5_index, G5_method,
   7.434 +                                 O2_scratch,
   7.435 +                                 O3_scratch,
   7.436 +                                 no_such_interface);
   7.437 +
   7.438 +      __ verify_oop(G5_method);
   7.439 +      __ jump_indirect_to(G5_method_fie, O1_scratch);
   7.440 +      __ delayed()->nop();
   7.441 +
   7.442 +      __ bind(no_such_interface);
   7.443 +      // Throw an exception.
   7.444 +      // For historical reasons, it will be IncompatibleClassChangeError.
   7.445 +      __ unimplemented("not tested yet");
   7.446 +      __ ld_ptr(Address(O1_intf, java_mirror_offset), O3_scratch);  // required interface
   7.447 +      __ mov(O0_klass, O2_scratch);  // bad receiver
   7.448 +      __ jump_to(AddressLiteral(from_interpreted_entry(_raise_exception)), O0_argslot);
   7.449 +      __ delayed()->mov(Bytecodes::_invokeinterface, O1_scratch);  // who is complaining?
   7.450 +    }
   7.451 +    break;
   7.452 +
   7.453 +  case _bound_ref_mh:
   7.454 +  case _bound_int_mh:
   7.455 +  case _bound_long_mh:
   7.456 +  case _bound_ref_direct_mh:
   7.457 +  case _bound_int_direct_mh:
   7.458 +  case _bound_long_direct_mh:
   7.459 +    {
   7.460 +      const bool direct_to_method = (ek >= _bound_ref_direct_mh);
   7.461 +      BasicType arg_type  = T_ILLEGAL;
   7.462 +      int       arg_mask  = _INSERT_NO_MASK;
   7.463 +      int       arg_slots = -1;
   7.464 +      get_ek_bound_mh_info(ek, arg_type, arg_mask, arg_slots);
   7.465 +
   7.466 +      // Make room for the new argument:
   7.467 +      __ ldsw(G3_bmh_vmargslot, O0_argslot);
   7.468 +      __ add(Gargs, __ argument_offset(O0_argslot), O0_argslot);
   7.469 +
   7.470 +      insert_arg_slots(_masm, arg_slots * stack_move_unit(), arg_mask, O0_argslot, O1_scratch, O2_scratch, G5_index);
   7.471 +
   7.472 +      // Store bound argument into the new stack slot:
   7.473 +      __ ld_ptr(G3_bmh_argument, O1_scratch);
   7.474 +      if (arg_type == T_OBJECT) {
   7.475 +        __ st_ptr(O1_scratch, Address(O0_argslot, 0));
   7.476 +      } else {
   7.477 +        Address prim_value_addr(O1_scratch, java_lang_boxing_object::value_offset_in_bytes(arg_type));
   7.478 +        __ load_sized_value(prim_value_addr, O2_scratch, type2aelembytes(arg_type), is_signed_subword_type(arg_type));
   7.479 +        if (arg_slots == 2) {
   7.480 +          __ unimplemented("not yet tested");
   7.481 +#ifndef _LP64
   7.482 +          __ signx(O2_scratch, O3_scratch);  // Sign extend
   7.483 +#endif
   7.484 +          __ st_long(O2_scratch, Address(O0_argslot, 0));  // Uses O2/O3 on !_LP64
   7.485 +        } else {
   7.486 +          __ st_ptr( O2_scratch, Address(O0_argslot, 0));
   7.487 +        }
   7.488 +      }
   7.489 +
   7.490 +      if (direct_to_method) {
   7.491 +        __ ld_ptr(G3_mh_vmtarget, G5_method);  // target is a methodOop
   7.492 +        __ verify_oop(G5_method);
   7.493 +        __ jump_indirect_to(G5_method_fie, O1_scratch);
   7.494 +        __ delayed()->nop();
   7.495 +      } else {
   7.496 +        __ ld_ptr(G3_mh_vmtarget, G3_method_handle);  // target is a methodOop
   7.497 +        __ verify_oop(G3_method_handle);
   7.498 +        __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
   7.499 +      }
   7.500 +    }
   7.501 +    break;
   7.502 +
   7.503 +  case _adapter_retype_only:
   7.504 +  case _adapter_retype_raw:
   7.505 +    // Immediately jump to the next MH layer:
   7.506 +    __ ld_ptr(G3_mh_vmtarget, G3_method_handle);
   7.507 +    __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
   7.508 +    // This is OK when all parameter types widen.
   7.509 +    // It is also OK when a return type narrows.
   7.510 +    break;
   7.511 +
   7.512 +  case _adapter_check_cast:
   7.513 +    {
   7.514 +      // Temps:
   7.515 +      Register G5_klass = G5_index;  // Interesting AMH data.
   7.516 +
   7.517 +      // Check a reference argument before jumping to the next layer of MH:
   7.518 +      __ ldsw(G3_amh_vmargslot, O0_argslot);
   7.519 +      Address vmarg = __ argument_address(O0_argslot);
   7.520 +
   7.521 +      // What class are we casting to?
   7.522 +      __ ld_ptr(G3_amh_argument, G5_klass);  // This is a Class object!
   7.523 +      __ ld_ptr(Address(G5_klass, java_lang_Class::klass_offset_in_bytes()), G5_klass);
   7.524 +
   7.525 +      Label done;
   7.526 +      __ ld_ptr(vmarg, O1_scratch);
   7.527 +      __ tst(O1_scratch);
   7.528 +      __ brx(Assembler::zero, false, Assembler::pn, done);  // No cast if null.
   7.529 +      __ delayed()->nop();
   7.530 +      __ load_klass(O1_scratch, O1_scratch);
   7.531 +
   7.532 +      // Live at this point:
   7.533 +      // - G5_klass        :  klass required by the target method
   7.534 +      // - O1_scratch      :  argument klass to test
   7.535 +      // - G3_method_handle:  adapter method handle
   7.536 +      __ check_klass_subtype(O1_scratch, G5_klass, O0_argslot, O2_scratch, done);
   7.537 +
   7.538 +      // If we get here, the type check failed!
   7.539 +      __ ldsw(G3_amh_vmargslot, O0_argslot);  // reload argslot field
   7.540 +      __ ld_ptr(G3_amh_argument, O3_scratch);  // required class
   7.541 +      __ ld_ptr(vmarg, O2_scratch);  // bad object
   7.542 +      __ jump_to(AddressLiteral(from_interpreted_entry(_raise_exception)), O0_argslot);
   7.543 +      __ delayed()->mov(Bytecodes::_checkcast, O1_scratch);  // who is complaining?
   7.544 +
   7.545 +      __ bind(done);
   7.546 +      // Get the new MH:
   7.547 +      __ ld_ptr(G3_mh_vmtarget, G3_method_handle);
   7.548 +      __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
   7.549 +    }
   7.550 +    break;
   7.551 +
   7.552 +  case _adapter_prim_to_prim:
   7.553 +  case _adapter_ref_to_prim:
   7.554 +    // Handled completely by optimized cases.
   7.555 +    __ stop("init_AdapterMethodHandle should not issue this");
   7.556 +    break;
   7.557 +
   7.558 +  case _adapter_opt_i2i:        // optimized subcase of adapt_prim_to_prim
   7.559 +//case _adapter_opt_f2i:        // optimized subcase of adapt_prim_to_prim
   7.560 +  case _adapter_opt_l2i:        // optimized subcase of adapt_prim_to_prim
   7.561 +  case _adapter_opt_unboxi:     // optimized subcase of adapt_ref_to_prim
   7.562 +    {
   7.563 +      // Perform an in-place conversion to int or an int subword.
   7.564 +      __ ldsw(G3_amh_vmargslot, O0_argslot);
   7.565 +      Address vmarg = __ argument_address(O0_argslot);
   7.566 +      Address value;
   7.567 +      bool value_left_justified = false;
   7.568 +
   7.569 +      switch (ek) {
   7.570 +      case _adapter_opt_i2i:
   7.571 +      case _adapter_opt_l2i:
   7.572 +        __ unimplemented(entry_name(ek));
   7.573 +        value = vmarg;
   7.574 +        break;
   7.575 +      case _adapter_opt_unboxi:
   7.576 +        {
   7.577 +          // Load the value up from the heap.
   7.578 +          __ ld_ptr(vmarg, O1_scratch);
   7.579 +          int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_INT);
   7.580 +#ifdef ASSERT
   7.581 +          for (int bt = T_BOOLEAN; bt < T_INT; bt++) {
   7.582 +            if (is_subword_type(BasicType(bt)))
   7.583 +              assert(value_offset == java_lang_boxing_object::value_offset_in_bytes(BasicType(bt)), "");
   7.584 +          }
   7.585 +#endif
   7.586 +          __ null_check(O1_scratch, value_offset);
   7.587 +          value = Address(O1_scratch, value_offset);
   7.588 +#ifdef _BIG_ENDIAN
   7.589 +          // Values stored in objects are packed.
   7.590 +          value_left_justified = true;
   7.591 +#endif
   7.592 +        }
   7.593 +        break;
   7.594 +      default:
   7.595 +        ShouldNotReachHere();
   7.596 +      }
   7.597 +
   7.598 +      // This check is required on _BIG_ENDIAN
   7.599 +      Register G5_vminfo = G5_index;
   7.600 +      __ ldsw(G3_amh_conversion, G5_vminfo);
   7.601 +      assert(CONV_VMINFO_SHIFT == 0, "preshifted");
   7.602 +
   7.603 +      // Original 32-bit vmdata word must be of this form:
   7.604 +      // | MBZ:6 | signBitCount:8 | srcDstTypes:8 | conversionOp:8 |
   7.605 +      __ lduw(value, O1_scratch);
   7.606 +      if (!value_left_justified)
   7.607 +        __ sll(O1_scratch, G5_vminfo, O1_scratch);
   7.608 +      Label zero_extend, done;
   7.609 +      __ btst(CONV_VMINFO_SIGN_FLAG, G5_vminfo);
   7.610 +      __ br(Assembler::zero, false, Assembler::pn, zero_extend);
   7.611 +      __ delayed()->nop();
   7.612 +
   7.613 +      // this path is taken for int->byte, int->short
   7.614 +      __ sra(O1_scratch, G5_vminfo, O1_scratch);
   7.615 +      __ ba(false, done);
   7.616 +      __ delayed()->nop();
   7.617 +
   7.618 +      __ bind(zero_extend);
   7.619 +      // this is taken for int->char
   7.620 +      __ srl(O1_scratch, G5_vminfo, O1_scratch);
   7.621 +
   7.622 +      __ bind(done);
   7.623 +      __ st(O1_scratch, vmarg);
   7.624 +
   7.625 +      // Get the new MH:
   7.626 +      __ ld_ptr(G3_mh_vmtarget, G3_method_handle);
   7.627 +      __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
   7.628 +    }
   7.629 +    break;
   7.630 +
   7.631 +  case _adapter_opt_i2l:        // optimized subcase of adapt_prim_to_prim
   7.632 +  case _adapter_opt_unboxl:     // optimized subcase of adapt_ref_to_prim
   7.633 +    {
   7.634 +      // Perform an in-place int-to-long or ref-to-long conversion.
   7.635 +      __ ldsw(G3_amh_vmargslot, O0_argslot);
   7.636 +
   7.637 +      // On big-endian machine we duplicate the slot and store the MSW
   7.638 +      // in the first slot.
   7.639 +      __ add(Gargs, __ argument_offset(O0_argslot, 1), O0_argslot);
   7.640 +
   7.641 +      insert_arg_slots(_masm, stack_move_unit(), _INSERT_INT_MASK, O0_argslot, O1_scratch, O2_scratch, G5_index);
   7.642 +
   7.643 +      Address arg_lsw(O0_argslot, 0);
   7.644 +      Address arg_msw(O0_argslot, -Interpreter::stackElementSize());
   7.645 +
   7.646 +      switch (ek) {
   7.647 +      case _adapter_opt_i2l:
   7.648 +        {
   7.649 +          __ ldsw(arg_lsw, O2_scratch);      // Load LSW
   7.650 +#ifndef _LP64
   7.651 +          __ signx(O2_scratch, O3_scratch);  // Sign extend
   7.652 +#endif
   7.653 +          __ st_long(O2_scratch, arg_msw);   // Uses O2/O3 on !_LP64
   7.654 +        }
   7.655 +        break;
   7.656 +      case _adapter_opt_unboxl:
   7.657 +        {
   7.658 +          // Load the value up from the heap.
   7.659 +          __ ld_ptr(arg_lsw, O1_scratch);
   7.660 +          int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_LONG);
   7.661 +          assert(value_offset == java_lang_boxing_object::value_offset_in_bytes(T_DOUBLE), "");
   7.662 +          __ null_check(O1_scratch, value_offset);
   7.663 +          __ ld_long(Address(O1_scratch, value_offset), O2_scratch);  // Uses O2/O3 on !_LP64
   7.664 +          __ st_long(O2_scratch, arg_msw);
   7.665 +        }
   7.666 +        break;
   7.667 +      default:
   7.668 +        ShouldNotReachHere();
   7.669 +      }
   7.670 +
   7.671 +      __ ld_ptr(G3_mh_vmtarget, G3_method_handle);
   7.672 +      __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
   7.673 +    }
   7.674 +    break;
   7.675 +
   7.676 +  case _adapter_opt_f2d:        // optimized subcase of adapt_prim_to_prim
   7.677 +  case _adapter_opt_d2f:        // optimized subcase of adapt_prim_to_prim
   7.678 +    {
   7.679 +      // perform an in-place floating primitive conversion
   7.680 +      __ unimplemented(entry_name(ek));
   7.681 +    }
   7.682 +    break;
   7.683 +
   7.684 +  case _adapter_prim_to_ref:
   7.685 +    __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
   7.686 +    break;
   7.687 +
   7.688 +  case _adapter_swap_args:
   7.689 +  case _adapter_rot_args:
   7.690 +    // handled completely by optimized cases
   7.691 +    __ stop("init_AdapterMethodHandle should not issue this");
   7.692 +    break;
   7.693 +
   7.694 +  case _adapter_opt_swap_1:
   7.695 +  case _adapter_opt_swap_2:
   7.696 +  case _adapter_opt_rot_1_up:
   7.697 +  case _adapter_opt_rot_1_down:
   7.698 +  case _adapter_opt_rot_2_up:
   7.699 +  case _adapter_opt_rot_2_down:
   7.700 +    {
   7.701 +      int swap_bytes = 0, rotate = 0;
   7.702 +      get_ek_adapter_opt_swap_rot_info(ek, swap_bytes, rotate);
   7.703 +
   7.704 +      // 'argslot' is the position of the first argument to swap.
   7.705 +      __ ldsw(G3_amh_vmargslot, O0_argslot);
   7.706 +      __ add(Gargs, __ argument_offset(O0_argslot), O0_argslot);
   7.707 +
   7.708 +      // 'vminfo' is the second.
   7.709 +      Register O1_destslot = O1_scratch;
   7.710 +      __ ldsw(G3_amh_conversion, O1_destslot);
   7.711 +      assert(CONV_VMINFO_SHIFT == 0, "preshifted");
   7.712 +      __ and3(O1_destslot, CONV_VMINFO_MASK, O1_destslot);
   7.713 +      __ add(Gargs, __ argument_offset(O1_destslot), O1_destslot);
   7.714 +
   7.715 +      if (!rotate) {
   7.716 +        for (int i = 0; i < swap_bytes; i += wordSize) {
   7.717 +          __ ld_ptr(Address(O0_argslot,  i), O2_scratch);
   7.718 +          __ ld_ptr(Address(O1_destslot, i), O3_scratch);
   7.719 +          __ st_ptr(O3_scratch, Address(O0_argslot,  i));
   7.720 +          __ st_ptr(O2_scratch, Address(O1_destslot, i));
   7.721 +        }
   7.722 +      } else {
   7.723 +        // Save the first chunk, which is going to get overwritten.
   7.724 +        switch (swap_bytes) {
   7.725 +        case 4 : __ lduw(Address(O0_argslot, 0), O2_scratch); break;
   7.726 +        case 16: __ ldx( Address(O0_argslot, 8), O3_scratch); //fall-thru
   7.727 +        case 8 : __ ldx( Address(O0_argslot, 0), O2_scratch); break;
   7.728 +        default: ShouldNotReachHere();
   7.729 +        }
   7.730 +
   7.731 +        if (rotate > 0) {
   7.732 +          // Rorate upward.
   7.733 +          __ sub(O0_argslot, swap_bytes, O0_argslot);
   7.734 +#if ASSERT
   7.735 +          {
   7.736 +            // Verify that argslot > destslot, by at least swap_bytes.
   7.737 +            Label L_ok;
   7.738 +            __ cmp(O0_argslot, O1_destslot);
   7.739 +            __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, L_ok);
   7.740 +            __ delayed()->nop();
   7.741 +            __ stop("source must be above destination (upward rotation)");
   7.742 +            __ bind(L_ok);
   7.743 +          }
   7.744 +#endif
   7.745 +          // Work argslot down to destslot, copying contiguous data upwards.
   7.746 +          // Pseudo-code:
   7.747 +          //   argslot  = src_addr - swap_bytes
   7.748 +          //   destslot = dest_addr
   7.749 +          //   while (argslot >= destslot) {
   7.750 +          //     *(argslot + swap_bytes) = *(argslot + 0);
   7.751 +          //     argslot--;
   7.752 +          //   }
   7.753 +          Label loop;
   7.754 +          __ bind(loop);
   7.755 +          __ ld_ptr(Address(O0_argslot, 0), G5_index);
   7.756 +          __ st_ptr(G5_index, Address(O0_argslot, swap_bytes));
   7.757 +          __ sub(O0_argslot, wordSize, O0_argslot);
   7.758 +          __ cmp(O0_argslot, O1_destslot);
   7.759 +          __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, loop);
   7.760 +          __ delayed()->nop();  // FILLME
   7.761 +        } else {
   7.762 +          __ add(O0_argslot, swap_bytes, O0_argslot);
   7.763 +#if ASSERT
   7.764 +          {
   7.765 +            // Verify that argslot < destslot, by at least swap_bytes.
   7.766 +            Label L_ok;
   7.767 +            __ cmp(O0_argslot, O1_destslot);
   7.768 +            __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, L_ok);
   7.769 +            __ delayed()->nop();
   7.770 +            __ stop("source must be above destination (upward rotation)");
   7.771 +            __ bind(L_ok);
   7.772 +          }
   7.773 +#endif
   7.774 +          // Work argslot up to destslot, copying contiguous data downwards.
   7.775 +          // Pseudo-code:
   7.776 +          //   argslot  = src_addr + swap_bytes
   7.777 +          //   destslot = dest_addr
   7.778 +          //   while (argslot >= destslot) {
   7.779 +          //     *(argslot - swap_bytes) = *(argslot + 0);
   7.780 +          //     argslot++;
   7.781 +          //   }
   7.782 +          Label loop;
   7.783 +          __ bind(loop);
   7.784 +          __ ld_ptr(Address(O0_argslot, 0), G5_index);
   7.785 +          __ st_ptr(G5_index, Address(O0_argslot, -swap_bytes));
   7.786 +          __ add(O0_argslot, wordSize, O0_argslot);
   7.787 +          __ cmp(O0_argslot, O1_destslot);
   7.788 +          __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, loop);
   7.789 +          __ delayed()->nop();  // FILLME
   7.790 +        }
   7.791 +
   7.792 +        // Store the original first chunk into the destination slot, now free.
   7.793 +        switch (swap_bytes) {
   7.794 +        case 4 : __ stw(O2_scratch, Address(O1_destslot, 0)); break;
   7.795 +        case 16: __ stx(O3_scratch, Address(O1_destslot, 8)); // fall-thru
   7.796 +        case 8 : __ stx(O2_scratch, Address(O1_destslot, 0)); break;
   7.797 +        default: ShouldNotReachHere();
   7.798 +        }
   7.799 +      }
   7.800 +
   7.801 +      __ ld_ptr(G3_mh_vmtarget, G3_method_handle);
   7.802 +      __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
   7.803 +    }
   7.804 +    break;
   7.805 +
   7.806 +  case _adapter_dup_args:
   7.807 +    {
   7.808 +      // 'argslot' is the position of the first argument to duplicate.
   7.809 +      __ ldsw(G3_amh_vmargslot, O0_argslot);
   7.810 +      __ add(Gargs, __ argument_offset(O0_argslot), O0_argslot);
   7.811 +
   7.812 +      // 'stack_move' is negative number of words to duplicate.
   7.813 +      Register G5_stack_move = G5_index;
   7.814 +      __ ldsw(G3_amh_conversion, G5_stack_move);
   7.815 +      __ sra(G5_stack_move, CONV_STACK_MOVE_SHIFT, G5_stack_move);
   7.816 +
   7.817 +      // Remember the old Gargs (argslot[0]).
   7.818 +      Register O1_oldarg = O1_scratch;
   7.819 +      __ mov(Gargs, O1_oldarg);
   7.820 +
   7.821 +      // Move Gargs down to make room for dups.
   7.822 +      __ sll_ptr(G5_stack_move, LogBytesPerWord, G5_stack_move);
   7.823 +      __ add(Gargs, G5_stack_move, Gargs);
   7.824 +
   7.825 +      // Compute the new Gargs (argslot[0]).
   7.826 +      Register O2_newarg = O2_scratch;
   7.827 +      __ mov(Gargs, O2_newarg);
   7.828 +
   7.829 +      // Copy from oldarg[0...] down to newarg[0...]
   7.830 +      // Pseude-code:
   7.831 +      //   O1_oldarg  = old-Gargs
   7.832 +      //   O2_newarg  = new-Gargs
   7.833 +      //   O0_argslot = argslot
   7.834 +      //   while (O2_newarg < O1_oldarg) *O2_newarg = *O0_argslot++
   7.835 +      Label loop;
   7.836 +      __ bind(loop);
   7.837 +      __ ld_ptr(Address(O0_argslot, 0), O3_scratch);
   7.838 +      __ st_ptr(O3_scratch, Address(O2_newarg, 0));
   7.839 +      __ add(O0_argslot, wordSize, O0_argslot);
   7.840 +      __ add(O2_newarg,  wordSize, O2_newarg);
   7.841 +      __ cmp(O2_newarg, O1_oldarg);
   7.842 +      __ brx(Assembler::less, false, Assembler::pt, loop);
   7.843 +      __ delayed()->nop();  // FILLME
   7.844 +
   7.845 +      __ ld_ptr(G3_mh_vmtarget, G3_method_handle);
   7.846 +      __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
   7.847 +    }
   7.848 +    break;
   7.849 +
   7.850 +  case _adapter_drop_args:
   7.851 +    {
   7.852 +      // 'argslot' is the position of the first argument to nuke.
   7.853 +      __ ldsw(G3_amh_vmargslot, O0_argslot);
   7.854 +      __ add(Gargs, __ argument_offset(O0_argslot), O0_argslot);
   7.855 +
   7.856 +      // 'stack_move' is number of words to drop.
   7.857 +      Register G5_stack_move = G5_index;
   7.858 +      __ ldsw(G3_amh_conversion, G5_stack_move);
   7.859 +      __ sra(G5_stack_move, CONV_STACK_MOVE_SHIFT, G5_stack_move);
   7.860 +
   7.861 +      remove_arg_slots(_masm, G5_stack_move, O0_argslot, O1_scratch, O2_scratch, O3_scratch);
   7.862 +
   7.863 +      __ ld_ptr(G3_mh_vmtarget, G3_method_handle);
   7.864 +      __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
   7.865 +    }
   7.866 +    break;
   7.867 +
   7.868 +  case _adapter_collect_args:
   7.869 +    __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
   7.870 +    break;
   7.871 +
   7.872 +  case _adapter_spread_args:
   7.873 +    // Handled completely by optimized cases.
   7.874 +    __ stop("init_AdapterMethodHandle should not issue this");
   7.875 +    break;
   7.876 +
   7.877 +  case _adapter_opt_spread_0:
   7.878 +  case _adapter_opt_spread_1:
   7.879 +  case _adapter_opt_spread_more:
   7.880 +    {
   7.881 +      // spread an array out into a group of arguments
   7.882 +      __ unimplemented(entry_name(ek));
   7.883 +    }
   7.884 +    break;
   7.885 +
   7.886 +  case _adapter_flyby:
   7.887 +  case _adapter_ricochet:
   7.888 +    __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
   7.889 +    break;
   7.890 +
   7.891 +  default:
   7.892 +    ShouldNotReachHere();
   7.893 +  }
   7.894 +
   7.895 +  address me_cookie = MethodHandleEntry::start_compiled_entry(_masm, interp_entry);
   7.896 +  __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
   7.897 +
   7.898 +  init_entry(ek, MethodHandleEntry::finish_compiled_entry(_masm, me_cookie));
   7.899  }
     8.1 --- a/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Thu Apr 29 00:03:40 2010 -0700
     8.2 +++ b/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Thu Apr 29 06:30:25 2010 -0700
     8.3 @@ -2917,6 +2917,16 @@
     8.4      // arraycopy stubs used by compilers
     8.5      generate_arraycopy_stubs();
     8.6  
     8.7 +    // generic method handle stubs
     8.8 +    if (EnableMethodHandles && SystemDictionary::MethodHandle_klass() != NULL) {
     8.9 +      for (MethodHandles::EntryKind ek = MethodHandles::_EK_FIRST;
    8.10 +           ek < MethodHandles::_EK_LIMIT;
    8.11 +           ek = MethodHandles::EntryKind(1 + (int)ek)) {
    8.12 +        StubCodeMark mark(this, "MethodHandle", MethodHandles::entry_name(ek));
    8.13 +        MethodHandles::generate_method_handle_stub(_masm, ek);
    8.14 +      }
    8.15 +    }
    8.16 +
    8.17      // Don't initialize the platform math functions since sparc
    8.18      // doesn't have intrinsics for these operations.
    8.19    }
     9.1 --- a/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Thu Apr 29 00:03:40 2010 -0700
     9.2 +++ b/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Thu Apr 29 06:30:25 2010 -0700
     9.3 @@ -151,8 +151,10 @@
     9.4  
     9.5  
     9.6  address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step) {
     9.7 +  TosState incoming_state = state;
     9.8 +
     9.9 +  Label cont;
    9.10    address compiled_entry = __ pc();
    9.11 -  Label cont;
    9.12  
    9.13    address entry = __ pc();
    9.14  #if !defined(_LP64) && defined(COMPILER2)
    9.15 @@ -165,12 +167,11 @@
    9.16    // do this here. Unfortunately if we did a rethrow we'd see an machepilog node
    9.17    // first which would move g1 -> O0/O1 and destroy the exception we were throwing.
    9.18  
    9.19 -  if( state == ltos ) {
    9.20 -    __ srl (G1, 0,O1);
    9.21 -    __ srlx(G1,32,O0);
    9.22 +  if (incoming_state == ltos) {
    9.23 +    __ srl (G1,  0, O1);
    9.24 +    __ srlx(G1, 32, O0);
    9.25    }
    9.26 -#endif /* !_LP64 && COMPILER2 */
    9.27 -
    9.28 +#endif // !_LP64 && COMPILER2
    9.29  
    9.30    __ bind(cont);
    9.31  
    9.32 @@ -182,10 +183,17 @@
    9.33  
    9.34    __ mov(Llast_SP, SP);   // Remove any adapter added stack space.
    9.35  
    9.36 -
    9.37 +  Label L_got_cache, L_giant_index;
    9.38    const Register cache = G3_scratch;
    9.39    const Register size  = G1_scratch;
    9.40 +  if (EnableInvokeDynamic) {
    9.41 +    __ ldub(Address(Lbcp, 0), G1_scratch);  // Load current bytecode.
    9.42 +    __ cmp(G1_scratch, Bytecodes::_invokedynamic);
    9.43 +    __ br(Assembler::equal, false, Assembler::pn, L_giant_index);
    9.44 +    __ delayed()->nop();
    9.45 +  }
    9.46    __ get_cache_and_index_at_bcp(cache, G1_scratch, 1);
    9.47 +  __ bind(L_got_cache);
    9.48    __ ld_ptr(cache, constantPoolCacheOopDesc::base_offset() +
    9.49                     ConstantPoolCacheEntry::flags_offset(), size);
    9.50    __ and3(size, 0xFF, size);                   // argument size in words
    9.51 @@ -193,6 +201,14 @@
    9.52    __ add(Lesp, size, Lesp);                    // pop arguments
    9.53    __ dispatch_next(state, step);
    9.54  
    9.55 +  // out of the main line of code...
    9.56 +  if (EnableInvokeDynamic) {
    9.57 +    __ bind(L_giant_index);
    9.58 +    __ get_cache_and_index_at_bcp(cache, G1_scratch, 1, true);
    9.59 +    __ ba(false, L_got_cache);
    9.60 +    __ delayed()->nop();
    9.61 +  }
    9.62 +
    9.63    return entry;
    9.64  }
    9.65  
    10.1 --- a/src/cpu/sparc/vm/templateTable_sparc.cpp	Thu Apr 29 00:03:40 2010 -0700
    10.2 +++ b/src/cpu/sparc/vm/templateTable_sparc.cpp	Thu Apr 29 06:30:25 2010 -0700
    10.3 @@ -1,5 +1,5 @@
    10.4  /*
    10.5 - * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
    10.6 + * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
    10.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    10.8   *
    10.9   * This code is free software; you can redistribute it and/or modify it
   10.10 @@ -1963,19 +1963,30 @@
   10.11  // ----------------------------------------------------------------------------
   10.12  void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Register index) {
   10.13    assert(byte_no == 1 || byte_no == 2, "byte_no out of range");
   10.14 +  bool is_invokedynamic = (bytecode() == Bytecodes::_invokedynamic);
   10.15 +
   10.16    // Depends on cpCacheOop layout!
   10.17    const int shift_count = (1 + byte_no)*BitsPerByte;
   10.18    Label resolved;
   10.19  
   10.20 -  __ get_cache_and_index_at_bcp(Rcache, index, 1);
   10.21 -  __ ld_ptr(Rcache, constantPoolCacheOopDesc::base_offset() +
   10.22 -                    ConstantPoolCacheEntry::indices_offset(), Lbyte_code);
   10.23 -
   10.24 -  __ srl(  Lbyte_code, shift_count, Lbyte_code );
   10.25 -  __ and3( Lbyte_code,        0xFF, Lbyte_code );
   10.26 -  __ cmp(  Lbyte_code, (int)bytecode());
   10.27 -  __ br(   Assembler::equal, false, Assembler::pt, resolved);
   10.28 -  __ delayed()->set((int)bytecode(), O1);
   10.29 +  __ get_cache_and_index_at_bcp(Rcache, index, 1, is_invokedynamic);
   10.30 +  if (is_invokedynamic) {
   10.31 +    // We are resolved if the f1 field contains a non-null CallSite object.
   10.32 +    __ ld_ptr(Rcache, constantPoolCacheOopDesc::base_offset() +
   10.33 +              ConstantPoolCacheEntry::f1_offset(), Lbyte_code);
   10.34 +    __ tst(Lbyte_code);
   10.35 +    __ br(Assembler::notEqual, false, Assembler::pt, resolved);
   10.36 +    __ delayed()->set((int)bytecode(), O1);
   10.37 +  } else {
   10.38 +    __ ld_ptr(Rcache, constantPoolCacheOopDesc::base_offset() +
   10.39 +              ConstantPoolCacheEntry::indices_offset(), Lbyte_code);
   10.40 +
   10.41 +    __ srl(  Lbyte_code, shift_count, Lbyte_code );
   10.42 +    __ and3( Lbyte_code,        0xFF, Lbyte_code );
   10.43 +    __ cmp(  Lbyte_code, (int)bytecode());
   10.44 +    __ br(   Assembler::equal, false, Assembler::pt, resolved);
   10.45 +    __ delayed()->set((int)bytecode(), O1);
   10.46 +  }
   10.47  
   10.48    address entry;
   10.49    switch (bytecode()) {
   10.50 @@ -1987,12 +1998,13 @@
   10.51      case Bytecodes::_invokespecial  : // fall through
   10.52      case Bytecodes::_invokestatic   : // fall through
   10.53      case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke);  break;
   10.54 +    case Bytecodes::_invokedynamic  : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic);  break;
   10.55      default                         : ShouldNotReachHere();                                 break;
   10.56    }
   10.57    // first time invocation - must resolve first
   10.58    __ call_VM(noreg, entry, O1);
   10.59    // Update registers with resolved info
   10.60 -  __ get_cache_and_index_at_bcp(Rcache, index, 1);
   10.61 +  __ get_cache_and_index_at_bcp(Rcache, index, 1, is_invokedynamic);
   10.62    __ bind(resolved);
   10.63  }
   10.64  
   10.65 @@ -3130,7 +3142,42 @@
   10.66      return;
   10.67    }
   10.68  
   10.69 -  __ stop("invokedynamic NYI");//6815692//
   10.70 +  // G5: CallSite object (f1)
   10.71 +  // XX: unused (f2)
   10.72 +  // G3: receiver address
   10.73 +  // XX: flags (unused)
   10.74 +
   10.75 +  Register G5_callsite = G5_method;
   10.76 +  Register Rscratch    = G3_scratch;
   10.77 +  Register Rtemp       = G1_scratch;
   10.78 +  Register Rret        = Lscratch;
   10.79 +
   10.80 +  load_invoke_cp_cache_entry(byte_no, G5_callsite, noreg, Rret, false);
   10.81 +  __ mov(SP, O5_savedSP);  // record SP that we wanted the callee to restore
   10.82 +
   10.83 +  __ verify_oop(G5_callsite);
   10.84 +
   10.85 +  // profile this call
   10.86 +  __ profile_call(O4);
   10.87 +
   10.88 +  // get return address
   10.89 +  AddressLiteral table(Interpreter::return_5_addrs_by_index_table());
   10.90 +  __ set(table, Rtemp);
   10.91 +  __ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret);  // get return type
   10.92 +  // Make sure we don't need to mask Rret for tosBits after the above shift
   10.93 +  ConstantPoolCacheEntry::verify_tosBits();
   10.94 +  __ sll(Rret, LogBytesPerWord, Rret);
   10.95 +  __ ld_ptr(Rtemp, Rret, Rret);  // get return address
   10.96 +
   10.97 +  __ ld_ptr(G5_callsite, __ delayed_value(java_dyn_CallSite::target_offset_in_bytes, Rscratch), G3_method_handle);
   10.98 +  __ null_check(G3_method_handle);
   10.99 +
  10.100 +  // Adjust Rret first so Llast_SP can be same as Rret
  10.101 +  __ add(Rret, -frame::pc_return_offset, O7);
  10.102 +  __ add(Lesp, BytesPerWord, Gargs);  // setup parameter pointer
  10.103 +  __ jump_to_method_handle_entry(G3_method_handle, Rtemp, /* emit_delayed_nop */ false);
  10.104 +  // Record SP so we can remove any stack space allocated by adapter transition
  10.105 +  __ delayed()->mov(SP, Llast_SP);
  10.106  }
  10.107  
  10.108  
    11.1 --- a/src/cpu/x86/vm/assembler_x86.cpp	Thu Apr 29 00:03:40 2010 -0700
    11.2 +++ b/src/cpu/x86/vm/assembler_x86.cpp	Thu Apr 29 06:30:25 2010 -0700
    11.3 @@ -1,5 +1,5 @@
    11.4  /*
    11.5 - * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
    11.6 + * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
    11.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    11.8   *
    11.9   * This code is free software; you can redistribute it and/or modify it
   11.10 @@ -6492,24 +6492,19 @@
   11.11  }
   11.12  
   11.13  void MacroAssembler::load_sized_value(Register dst, Address src,
   11.14 -                                      int size_in_bytes, bool is_signed) {
   11.15 -  switch (size_in_bytes ^ (is_signed ? -1 : 0)) {
   11.16 +                                      size_t size_in_bytes, bool is_signed) {
   11.17 +  switch (size_in_bytes) {
   11.18  #ifndef _LP64
   11.19    // For case 8, caller is responsible for manually loading
   11.20    // the second word into another register.
   11.21 -  case ~8:  // fall through:
   11.22 -  case  8:  movl(                dst, src ); break;
   11.23 +  case  8: movl(dst, src); break;
   11.24  #else
   11.25 -  case ~8:  // fall through:
   11.26 -  case  8:  movq(                dst, src ); break;
   11.27 +  case  8: movq(dst, src); break;
   11.28  #endif
   11.29 -  case ~4:  // fall through:
   11.30 -  case  4:  movl(                dst, src ); break;
   11.31 -  case ~2:  load_signed_short(   dst, src ); break;
   11.32 -  case  2:  load_unsigned_short( dst, src ); break;
   11.33 -  case ~1:  load_signed_byte(    dst, src ); break;
   11.34 -  case  1:  load_unsigned_byte(  dst, src ); break;
   11.35 -  default:  ShouldNotReachHere();
   11.36 +  case  4: movl(dst, src); break;
   11.37 +  case  2: is_signed ? load_signed_short(dst, src) : load_unsigned_short(dst, src); break;
   11.38 +  case  1: is_signed ? load_signed_byte( dst, src) : load_unsigned_byte( dst, src); break;
   11.39 +  default: ShouldNotReachHere();
   11.40    }
   11.41  }
   11.42  
   11.43 @@ -7706,6 +7701,7 @@
   11.44  // method handle's MethodType.  This macro hides the distinction.
   11.45  void MacroAssembler::load_method_handle_vmslots(Register vmslots_reg, Register mh_reg,
   11.46                                                  Register temp_reg) {
   11.47 +  assert_different_registers(vmslots_reg, mh_reg, temp_reg);
   11.48    if (UseCompressedOops)  unimplemented();  // field accesses must decode
   11.49    // load mh.type.form.vmslots
   11.50    if (java_dyn_MethodHandle::vmslots_offset_in_bytes() != 0) {
    12.1 --- a/src/cpu/x86/vm/assembler_x86.hpp	Thu Apr 29 00:03:40 2010 -0700
    12.2 +++ b/src/cpu/x86/vm/assembler_x86.hpp	Thu Apr 29 06:30:25 2010 -0700
    12.3 @@ -1,5 +1,5 @@
    12.4  /*
    12.5 - * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
    12.6 + * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
    12.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    12.8   *
    12.9   * This code is free software; you can redistribute it and/or modify it
   12.10 @@ -1511,7 +1511,7 @@
   12.11    void extend_sign(Register hi, Register lo);
   12.12  
   12.13    // Loading values by size and signed-ness
   12.14 -  void load_sized_value(Register dst, Address src, int size_in_bytes, bool is_signed);
   12.15 +  void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed);
   12.16  
   12.17    // Support for inc/dec with optimal instruction selection depending on value
   12.18  
    13.1 --- a/src/cpu/x86/vm/methodHandles_x86.cpp	Thu Apr 29 00:03:40 2010 -0700
    13.2 +++ b/src/cpu/x86/vm/methodHandles_x86.cpp	Thu Apr 29 06:30:25 2010 -0700
    13.3 @@ -127,7 +127,8 @@
    13.4                                       RegisterOrConstant arg_slots,
    13.5                                       int arg_mask,
    13.6                                       Register rax_argslot,
    13.7 -                                     Register rbx_temp, Register rdx_temp) {
    13.8 +                                     Register rbx_temp, Register rdx_temp, Register temp3_reg) {
    13.9 +  assert(temp3_reg == noreg, "temp3 not required");
   13.10    assert_different_registers(rax_argslot, rbx_temp, rdx_temp,
   13.11                               (!arg_slots.is_register() ? rsp : arg_slots.as_register()));
   13.12  
   13.13 @@ -185,7 +186,8 @@
   13.14  void MethodHandles::remove_arg_slots(MacroAssembler* _masm,
   13.15                                      RegisterOrConstant arg_slots,
   13.16                                      Register rax_argslot,
   13.17 -                                    Register rbx_temp, Register rdx_temp) {
   13.18 +                                     Register rbx_temp, Register rdx_temp, Register temp3_reg) {
   13.19 +  assert(temp3_reg == noreg, "temp3 not required");
   13.20    assert_different_registers(rax_argslot, rbx_temp, rdx_temp,
   13.21                               (!arg_slots.is_register() ? rsp : arg_slots.as_register()));
   13.22  
    14.1 --- a/src/share/vm/prims/methodHandles.hpp	Thu Apr 29 00:03:40 2010 -0700
    14.2 +++ b/src/share/vm/prims/methodHandles.hpp	Thu Apr 29 06:30:25 2010 -0700
    14.3 @@ -430,12 +430,12 @@
    14.4                                 RegisterOrConstant arg_slots,
    14.5                                 int arg_mask,
    14.6                                 Register argslot_reg,
    14.7 -                               Register temp_reg, Register temp2_reg);
    14.8 +                               Register temp_reg, Register temp2_reg, Register temp3_reg = noreg);
    14.9  
   14.10    static void remove_arg_slots(MacroAssembler* _masm,
   14.11                                 RegisterOrConstant arg_slots,
   14.12                                 Register argslot_reg,
   14.13 -                               Register temp_reg, Register temp2_reg);
   14.14 +                               Register temp_reg, Register temp2_reg, Register temp3_reg = noreg);
   14.15  };
   14.16  
   14.17  

mercurial