src/cpu/sparc/vm/methodHandles_sparc.cpp

changeset 3969
1d7922586cf6
parent 3451
5dbed2f542ff
child 4037
da91efe96a93
     1.1 --- a/src/cpu/sparc/vm/methodHandles_sparc.cpp	Mon Jul 23 13:04:59 2012 -0700
     1.2 +++ b/src/cpu/sparc/vm/methodHandles_sparc.cpp	Tue Jul 24 10:51:00 2012 -0700
     1.3 @@ -31,452 +31,37 @@
     1.4  
     1.5  #ifdef PRODUCT
     1.6  #define BLOCK_COMMENT(str) /* nothing */
     1.7 +#define STOP(error) stop(error)
     1.8  #else
     1.9  #define BLOCK_COMMENT(str) __ block_comment(str)
    1.10 +#define STOP(error) block_comment(error); __ stop(error)
    1.11  #endif
    1.12  
    1.13  #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
    1.14  
    1.15 -address MethodHandleEntry::start_compiled_entry(MacroAssembler* _masm,
    1.16 -                                                address interpreted_entry) {
    1.17 -  // Just before the actual machine code entry point, allocate space
    1.18 -  // for a MethodHandleEntry::Data record, so that we can manage everything
    1.19 -  // from one base pointer.
    1.20 -  __ align(wordSize);
    1.21 -  address target = __ pc() + sizeof(Data);
    1.22 -  while (__ pc() < target) {
    1.23 -    __ nop();
    1.24 -    __ align(wordSize);
    1.25 -  }
    1.26 -
    1.27 -  MethodHandleEntry* me = (MethodHandleEntry*) __ pc();
    1.28 -  me->set_end_address(__ pc());         // set a temporary end_address
    1.29 -  me->set_from_interpreted_entry(interpreted_entry);
    1.30 -  me->set_type_checking_entry(NULL);
    1.31 -
    1.32 -  return (address) me;
    1.33 +// Workaround for C++ overloading nastiness on '0' for RegisterOrConstant.
    1.34 +static RegisterOrConstant constant(int value) {
    1.35 +  return RegisterOrConstant(value);
    1.36  }
    1.37  
    1.38 -MethodHandleEntry* MethodHandleEntry::finish_compiled_entry(MacroAssembler* _masm,
    1.39 -                                                address start_addr) {
    1.40 -  MethodHandleEntry* me = (MethodHandleEntry*) start_addr;
    1.41 -  assert(me->end_address() == start_addr, "valid ME");
    1.42 -
    1.43 -  // Fill in the real end_address:
    1.44 -  __ align(wordSize);
    1.45 -  me->set_end_address(__ pc());
    1.46 -
    1.47 -  return me;
    1.48 -}
    1.49 -
    1.50 -// stack walking support
    1.51 -
    1.52 -frame MethodHandles::ricochet_frame_sender(const frame& fr, RegisterMap *map) {
    1.53 -  //RicochetFrame* f = RicochetFrame::from_frame(fr);
    1.54 -  // Cf. is_interpreted_frame path of frame::sender
    1.55 -  intptr_t* younger_sp = fr.sp();
    1.56 -  intptr_t* sp         = fr.sender_sp();
    1.57 -  map->make_integer_regs_unsaved();
    1.58 -  map->shift_window(sp, younger_sp);
    1.59 -  bool this_frame_adjusted_stack = true;  // I5_savedSP is live in this RF
    1.60 -  return frame(sp, younger_sp, this_frame_adjusted_stack);
    1.61 -}
    1.62 -
    1.63 -void MethodHandles::ricochet_frame_oops_do(const frame& fr, OopClosure* blk, const RegisterMap* reg_map) {
    1.64 -  ResourceMark rm;
    1.65 -  RicochetFrame* f = RicochetFrame::from_frame(fr);
    1.66 -
    1.67 -  // pick up the argument type descriptor:
    1.68 -  Thread* thread = Thread::current();
    1.69 -  Handle cookie(thread, f->compute_saved_args_layout(true, true));
    1.70 -
    1.71 -  // process fixed part
    1.72 -  blk->do_oop((oop*)f->saved_target_addr());
    1.73 -  blk->do_oop((oop*)f->saved_args_layout_addr());
    1.74 -
    1.75 -  // process variable arguments:
    1.76 -  if (cookie.is_null())  return;  // no arguments to describe
    1.77 -
    1.78 -  // the cookie is actually the invokeExact method for my target
    1.79 -  // his argument signature is what I'm interested in
    1.80 -  assert(cookie->is_method(), "");
    1.81 -  methodHandle invoker(thread, methodOop(cookie()));
    1.82 -  assert(invoker->name() == vmSymbols::invokeExact_name(), "must be this kind of method");
    1.83 -  assert(!invoker->is_static(), "must have MH argument");
    1.84 -  int slot_count = invoker->size_of_parameters();
    1.85 -  assert(slot_count >= 1, "must include 'this'");
    1.86 -  intptr_t* base = f->saved_args_base();
    1.87 -  intptr_t* retval = NULL;
    1.88 -  if (f->has_return_value_slot())
    1.89 -    retval = f->return_value_slot_addr();
    1.90 -  int slot_num = slot_count - 1;
    1.91 -  intptr_t* loc = &base[slot_num];
    1.92 -  //blk->do_oop((oop*) loc);   // original target, which is irrelevant
    1.93 -  int arg_num = 0;
    1.94 -  for (SignatureStream ss(invoker->signature()); !ss.is_done(); ss.next()) {
    1.95 -    if (ss.at_return_type())  continue;
    1.96 -    BasicType ptype = ss.type();
    1.97 -    if (ptype == T_ARRAY)  ptype = T_OBJECT; // fold all refs to T_OBJECT
    1.98 -    assert(ptype >= T_BOOLEAN && ptype <= T_OBJECT, "not array or void");
    1.99 -    slot_num -= type2size[ptype];
   1.100 -    loc = &base[slot_num];
   1.101 -    bool is_oop = (ptype == T_OBJECT && loc != retval);
   1.102 -    if (is_oop)  blk->do_oop((oop*)loc);
   1.103 -    arg_num += 1;
   1.104 -  }
   1.105 -  assert(slot_num == 0, "must have processed all the arguments");
   1.106 -}
   1.107 -
   1.108 -// Ricochet Frames
   1.109 -const Register MethodHandles::RicochetFrame::L1_continuation      = L1;
   1.110 -const Register MethodHandles::RicochetFrame::L2_saved_target      = L2;
   1.111 -const Register MethodHandles::RicochetFrame::L3_saved_args_layout = L3;
   1.112 -const Register MethodHandles::RicochetFrame::L4_saved_args_base   = L4; // cf. Gargs = G4
   1.113 -const Register MethodHandles::RicochetFrame::L5_conversion        = L5;
   1.114 -#ifdef ASSERT
   1.115 -const Register MethodHandles::RicochetFrame::L0_magic_number_1    = L0;
   1.116 -#endif //ASSERT
   1.117 -
   1.118 -oop MethodHandles::RicochetFrame::compute_saved_args_layout(bool read_cache, bool write_cache) {
   1.119 -  if (read_cache) {
   1.120 -    oop cookie = saved_args_layout();
   1.121 -    if (cookie != NULL)  return cookie;
   1.122 -  }
   1.123 -  oop target = saved_target();
   1.124 -  oop mtype  = java_lang_invoke_MethodHandle::type(target);
   1.125 -  oop mtform = java_lang_invoke_MethodType::form(mtype);
   1.126 -  oop cookie = java_lang_invoke_MethodTypeForm::vmlayout(mtform);
   1.127 -  if (write_cache)  {
   1.128 -    (*saved_args_layout_addr()) = cookie;
   1.129 -  }
   1.130 -  return cookie;
   1.131 -}
   1.132 -
   1.133 -void MethodHandles::RicochetFrame::generate_ricochet_blob(MacroAssembler* _masm,
   1.134 -                                                          // output params:
   1.135 -                                                          int* bounce_offset,
   1.136 -                                                          int* exception_offset,
   1.137 -                                                          int* frame_size_in_words) {
   1.138 -  (*frame_size_in_words) = RicochetFrame::frame_size_in_bytes() / wordSize;
   1.139 -
   1.140 -  address start = __ pc();
   1.141 -
   1.142 -#ifdef ASSERT
   1.143 -  __ illtrap(0); __ illtrap(0); __ illtrap(0);
   1.144 -  // here's a hint of something special:
   1.145 -  __ set(MAGIC_NUMBER_1, G0);
   1.146 -  __ set(MAGIC_NUMBER_2, G0);
   1.147 -#endif //ASSERT
   1.148 -  __ illtrap(0);  // not reached
   1.149 -
   1.150 -  // Return values are in registers.
   1.151 -  // L1_continuation contains a cleanup continuation we must return
   1.152 -  // to.
   1.153 -
   1.154 -  (*bounce_offset) = __ pc() - start;
   1.155 -  BLOCK_COMMENT("ricochet_blob.bounce");
   1.156 -
   1.157 -  if (VerifyMethodHandles)  RicochetFrame::verify_clean(_masm);
   1.158 -  trace_method_handle(_masm, "return/ricochet_blob.bounce");
   1.159 -
   1.160 -  __ JMP(L1_continuation, 0);
   1.161 -  __ delayed()->nop();
   1.162 -  __ illtrap(0);
   1.163 -
   1.164 -  DEBUG_ONLY(__ set(MAGIC_NUMBER_2, G0));
   1.165 -
   1.166 -  (*exception_offset) = __ pc() - start;
   1.167 -  BLOCK_COMMENT("ricochet_blob.exception");
   1.168 -
   1.169 -  // compare this to Interpreter::rethrow_exception_entry, which is parallel code
   1.170 -  // for example, see TemplateInterpreterGenerator::generate_throw_exception
   1.171 -  // Live registers in:
   1.172 -  //   Oexception  (O0): exception
   1.173 -  //   Oissuing_pc (O1): return address/pc that threw exception (ignored, always equal to bounce addr)
   1.174 -  __ verify_oop(Oexception);
   1.175 -
   1.176 -  // Take down the frame.
   1.177 -
   1.178 -  // Cf. InterpreterMacroAssembler::remove_activation.
   1.179 -  leave_ricochet_frame(_masm, /*recv_reg=*/ noreg, I5_savedSP, I7);
   1.180 -
   1.181 -  // We are done with this activation frame; find out where to go next.
   1.182 -  // The continuation point will be an exception handler, which expects
   1.183 -  // the following registers set up:
   1.184 -  //
   1.185 -  // Oexception: exception
   1.186 -  // Oissuing_pc: the local call that threw exception
   1.187 -  // Other On: garbage
   1.188 -  // In/Ln:  the contents of the caller's register window
   1.189 -  //
   1.190 -  // We do the required restore at the last possible moment, because we
   1.191 -  // need to preserve some state across a runtime call.
   1.192 -  // (Remember that the caller activation is unknown--it might not be
   1.193 -  // interpreted, so things like Lscratch are useless in the caller.)
   1.194 -  __ mov(Oexception,  Oexception ->after_save());  // get exception in I0 so it will be on O0 after restore
   1.195 -  __ add(I7, frame::pc_return_offset, Oissuing_pc->after_save());  // likewise set I1 to a value local to the caller
   1.196 -  __ call_VM_leaf(L7_thread_cache,
   1.197 -                  CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address),
   1.198 -                  G2_thread, Oissuing_pc->after_save());
   1.199 -
   1.200 -  // The caller's SP was adjusted upon method entry to accomodate
   1.201 -  // the callee's non-argument locals. Undo that adjustment.
   1.202 -  __ JMP(O0, 0);                         // return exception handler in caller
   1.203 -  __ delayed()->restore(I5_savedSP, G0, SP);
   1.204 -
   1.205 -  // (same old exception object is already in Oexception; see above)
   1.206 -  // Note that an "issuing PC" is actually the next PC after the call
   1.207 -}
   1.208 -
   1.209 -void MethodHandles::RicochetFrame::enter_ricochet_frame(MacroAssembler* _masm,
   1.210 -                                                        Register recv_reg,
   1.211 -                                                        Register argv_reg,
   1.212 -                                                        address return_handler) {
   1.213 -  // does not include the __ save()
   1.214 -  assert(argv_reg == Gargs, "");
   1.215 -  Address G3_mh_vmtarget(   recv_reg, java_lang_invoke_MethodHandle::vmtarget_offset_in_bytes());
   1.216 -  Address G3_amh_conversion(recv_reg, java_lang_invoke_AdapterMethodHandle::conversion_offset_in_bytes());
   1.217 -
   1.218 -  // Create the RicochetFrame.
   1.219 -  // Unlike on x86 we can store all required information in local
   1.220 -  // registers.
   1.221 -  BLOCK_COMMENT("push RicochetFrame {");
   1.222 -  __ set(ExternalAddress(return_handler),          L1_continuation);
   1.223 -  __ load_heap_oop(G3_mh_vmtarget,                 L2_saved_target);
   1.224 -  __ mov(G0,                                       L3_saved_args_layout);
   1.225 -  __ mov(Gargs,                                    L4_saved_args_base);
   1.226 -  __ lduw(G3_amh_conversion,                       L5_conversion);  // 32-bit field
   1.227 -  // I5, I6, I7 are already set up
   1.228 -  DEBUG_ONLY(__ set((int32_t) MAGIC_NUMBER_1,      L0_magic_number_1));
   1.229 -  BLOCK_COMMENT("} RicochetFrame");
   1.230 -}
   1.231 -
   1.232 -void MethodHandles::RicochetFrame::leave_ricochet_frame(MacroAssembler* _masm,
   1.233 -                                                        Register recv_reg,
   1.234 -                                                        Register new_sp_reg,
   1.235 -                                                        Register sender_pc_reg) {
   1.236 -  assert(new_sp_reg == I5_savedSP, "exact_sender_sp already in place");
   1.237 -  assert(sender_pc_reg == I7, "in a fixed place");
   1.238 -  // does not include the __ ret() & __ restore()
   1.239 -  assert_different_registers(recv_reg, new_sp_reg, sender_pc_reg);
   1.240 -  // Take down the frame.
   1.241 -  // Cf. InterpreterMacroAssembler::remove_activation.
   1.242 -  BLOCK_COMMENT("end_ricochet_frame {");
   1.243 -  if (recv_reg->is_valid())
   1.244 -    __ mov(L2_saved_target, recv_reg);
   1.245 -  BLOCK_COMMENT("} end_ricochet_frame");
   1.246 -}
   1.247 -
   1.248 -// Emit code to verify that FP is pointing at a valid ricochet frame.
   1.249 -#ifndef PRODUCT
   1.250 -enum {
   1.251 -  ARG_LIMIT = 255, SLOP = 45,
   1.252 -  // use this parameter for checking for garbage stack movements:
   1.253 -  UNREASONABLE_STACK_MOVE = (ARG_LIMIT + SLOP)
   1.254 -  // the slop defends against false alarms due to fencepost errors
   1.255 -};
   1.256 -#endif
   1.257 -
   1.258 -#ifdef ASSERT
   1.259 -void MethodHandles::RicochetFrame::verify_clean(MacroAssembler* _masm) {
   1.260 -  // The stack should look like this:
   1.261 -  //    ... keep1 | dest=42 | keep2 | magic | handler | magic | recursive args | [RF]
   1.262 -  // Check various invariants.
   1.263 -
   1.264 -  Register O7_temp = O7, O5_temp = O5;
   1.265 -
   1.266 -  Label L_ok_1, L_ok_2, L_ok_3, L_ok_4;
   1.267 -  BLOCK_COMMENT("verify_clean {");
   1.268 -  // Magic numbers must check out:
   1.269 -  __ set((int32_t) MAGIC_NUMBER_1, O7_temp);
   1.270 -  __ cmp_and_br_short(O7_temp, L0_magic_number_1, Assembler::equal, Assembler::pt, L_ok_1);
   1.271 -  __ stop("damaged ricochet frame: MAGIC_NUMBER_1 not found");
   1.272 -
   1.273 -  __ BIND(L_ok_1);
   1.274 -
   1.275 -  // Arguments pointer must look reasonable:
   1.276 -#ifdef _LP64
   1.277 -  Register FP_temp = O5_temp;
   1.278 -  __ add(FP, STACK_BIAS, FP_temp);
   1.279 -#else
   1.280 -  Register FP_temp = FP;
   1.281 -#endif
   1.282 -  __ cmp_and_brx_short(L4_saved_args_base, FP_temp, Assembler::greaterEqualUnsigned, Assembler::pt, L_ok_2);
   1.283 -  __ stop("damaged ricochet frame: L4 < FP");
   1.284 -
   1.285 -  __ BIND(L_ok_2);
   1.286 -  // Disable until we decide on it's fate
   1.287 -  // __ sub(L4_saved_args_base, UNREASONABLE_STACK_MOVE * Interpreter::stackElementSize, O7_temp);
   1.288 -  // __ cmp(O7_temp, FP_temp);
   1.289 -  // __ br(Assembler::lessEqualUnsigned, false, Assembler::pt, L_ok_3);
   1.290 -  // __ delayed()->nop();
   1.291 -  // __ stop("damaged ricochet frame: (L4 - UNREASONABLE_STACK_MOVE) > FP");
   1.292 -
   1.293 -  __ BIND(L_ok_3);
   1.294 -  extract_conversion_dest_type(_masm, L5_conversion, O7_temp);
   1.295 -  __ cmp_and_br_short(O7_temp, T_VOID, Assembler::equal, Assembler::pt, L_ok_4);
   1.296 -  extract_conversion_vminfo(_masm, L5_conversion, O5_temp);
   1.297 -  __ ld_ptr(L4_saved_args_base, __ argument_offset(O5_temp, O5_temp), O7_temp);
   1.298 -  assert(Assembler::is_simm13(RETURN_VALUE_PLACEHOLDER), "must be simm13");
   1.299 -  __ cmp_and_brx_short(O7_temp, (int32_t) RETURN_VALUE_PLACEHOLDER, Assembler::equal, Assembler::pt, L_ok_4);
   1.300 -  __ stop("damaged ricochet frame: RETURN_VALUE_PLACEHOLDER not found");
   1.301 -  __ BIND(L_ok_4);
   1.302 -  BLOCK_COMMENT("} verify_clean");
   1.303 -}
   1.304 -#endif //ASSERT
   1.305 -
   1.306  void MethodHandles::load_klass_from_Class(MacroAssembler* _masm, Register klass_reg, Register temp_reg, Register temp2_reg) {
   1.307    if (VerifyMethodHandles)
   1.308      verify_klass(_masm, klass_reg, SystemDictionaryHandles::Class_klass(), temp_reg, temp2_reg,
   1.309 -                 "AMH argument is a Class");
   1.310 +                 "MH argument is a Class");
   1.311    __ load_heap_oop(Address(klass_reg, java_lang_Class::klass_offset_in_bytes()), klass_reg);
   1.312  }
   1.313  
   1.314 -void MethodHandles::load_conversion_vminfo(MacroAssembler* _masm, Address conversion_field_addr, Register reg) {
   1.315 -  assert(CONV_VMINFO_SHIFT == 0, "preshifted");
   1.316 -  assert(CONV_VMINFO_MASK == right_n_bits(BitsPerByte), "else change type of following load");
   1.317 -  __ ldub(conversion_field_addr.plus_disp(BytesPerInt - 1), reg);
   1.318 +#ifdef ASSERT
   1.319 +static int check_nonzero(const char* xname, int x) {
   1.320 +  assert(x != 0, err_msg("%s should be nonzero", xname));
   1.321 +  return x;
   1.322  }
   1.323 -
   1.324 -void MethodHandles::extract_conversion_vminfo(MacroAssembler* _masm, Register conversion_field_reg, Register reg) {
   1.325 -  assert(CONV_VMINFO_SHIFT == 0, "preshifted");
   1.326 -  __ and3(conversion_field_reg, CONV_VMINFO_MASK, reg);
   1.327 -}
   1.328 -
   1.329 -void MethodHandles::extract_conversion_dest_type(MacroAssembler* _masm, Register conversion_field_reg, Register reg) {
   1.330 -  __ srl(conversion_field_reg, CONV_DEST_TYPE_SHIFT, reg);
   1.331 -  __ and3(reg, 0x0F, reg);
   1.332 -}
   1.333 -
   1.334 -void MethodHandles::load_stack_move(MacroAssembler* _masm,
   1.335 -                                    Address G3_amh_conversion,
   1.336 -                                    Register stack_move_reg) {
   1.337 -  BLOCK_COMMENT("load_stack_move {");
   1.338 -  __ ldsw(G3_amh_conversion, stack_move_reg);
   1.339 -  __ sra(stack_move_reg, CONV_STACK_MOVE_SHIFT, stack_move_reg);
   1.340 -#ifdef ASSERT
   1.341 -  if (VerifyMethodHandles) {
   1.342 -    Label L_ok, L_bad;
   1.343 -    int32_t stack_move_limit = 0x0800;  // extra-large
   1.344 -    __ cmp_and_br_short(stack_move_reg, stack_move_limit, Assembler::greaterEqual, Assembler::pn, L_bad);
   1.345 -    __ cmp(stack_move_reg, -stack_move_limit);
   1.346 -    __ br(Assembler::greater, false, Assembler::pt, L_ok);
   1.347 -    __ delayed()->nop();
   1.348 -    __ BIND(L_bad);
   1.349 -    __ stop("load_stack_move of garbage value");
   1.350 -    __ BIND(L_ok);
   1.351 -  }
   1.352 -#endif
   1.353 -  BLOCK_COMMENT("} load_stack_move");
   1.354 -}
   1.355 +#define NONZERO(x) check_nonzero(#x, x)
   1.356 +#else //ASSERT
   1.357 +#define NONZERO(x) (x)
   1.358 +#endif //ASSERT
   1.359  
   1.360  #ifdef ASSERT
   1.361 -void MethodHandles::RicochetFrame::verify() const {
   1.362 -  assert(magic_number_1() == MAGIC_NUMBER_1, "");
   1.363 -  if (!Universe::heap()->is_gc_active()) {
   1.364 -    if (saved_args_layout() != NULL) {
   1.365 -      assert(saved_args_layout()->is_method(), "must be valid oop");
   1.366 -    }
   1.367 -    if (saved_target() != NULL) {
   1.368 -      assert(java_lang_invoke_MethodHandle::is_instance(saved_target()), "checking frame value");
   1.369 -    }
   1.370 -  }
   1.371 -  int conv_op = adapter_conversion_op(conversion());
   1.372 -  assert(conv_op == java_lang_invoke_AdapterMethodHandle::OP_COLLECT_ARGS ||
   1.373 -         conv_op == java_lang_invoke_AdapterMethodHandle::OP_FOLD_ARGS ||
   1.374 -         conv_op == java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_REF,
   1.375 -         "must be a sane conversion");
   1.376 -  if (has_return_value_slot()) {
   1.377 -    assert(*return_value_slot_addr() == RETURN_VALUE_PLACEHOLDER, "");
   1.378 -  }
   1.379 -}
   1.380 -
   1.381 -void MethodHandles::verify_argslot(MacroAssembler* _masm, Register argslot_reg, Register temp_reg, const char* error_message) {
   1.382 -  // Verify that argslot lies within (Gargs, FP].
   1.383 -  Label L_ok, L_bad;
   1.384 -  BLOCK_COMMENT("verify_argslot {");
   1.385 -  __ cmp_and_brx_short(Gargs, argslot_reg, Assembler::greaterUnsigned, Assembler::pn, L_bad);
   1.386 -  __ add(FP, STACK_BIAS, temp_reg);  // STACK_BIAS is zero on !_LP64
   1.387 -  __ cmp_and_brx_short(argslot_reg, temp_reg, Assembler::lessEqualUnsigned, Assembler::pt, L_ok);
   1.388 -  __ BIND(L_bad);
   1.389 -  __ stop(error_message);
   1.390 -  __ BIND(L_ok);
   1.391 -  BLOCK_COMMENT("} verify_argslot");
   1.392 -}
   1.393 -
   1.394 -void MethodHandles::verify_argslots(MacroAssembler* _masm,
   1.395 -                                    RegisterOrConstant arg_slots,
   1.396 -                                    Register arg_slot_base_reg,
   1.397 -                                    Register temp_reg,
   1.398 -                                    Register temp2_reg,
   1.399 -                                    bool negate_argslots,
   1.400 -                                    const char* error_message) {
   1.401 -  // Verify that [argslot..argslot+size) lies within (Gargs, FP).
   1.402 -  Label L_ok, L_bad;
   1.403 -  BLOCK_COMMENT("verify_argslots {");
   1.404 -  if (negate_argslots) {
   1.405 -    if (arg_slots.is_constant()) {
   1.406 -      arg_slots = -1 * arg_slots.as_constant();
   1.407 -    } else {
   1.408 -      __ neg(arg_slots.as_register(), temp_reg);
   1.409 -      arg_slots = temp_reg;
   1.410 -    }
   1.411 -  }
   1.412 -  __ add(arg_slot_base_reg, __ argument_offset(arg_slots, temp_reg), temp_reg);
   1.413 -  __ add(FP, STACK_BIAS, temp2_reg);  // STACK_BIAS is zero on !_LP64
   1.414 -  __ cmp_and_brx_short(temp_reg, temp2_reg, Assembler::greaterUnsigned, Assembler::pn, L_bad);
   1.415 -  // Gargs points to the first word so adjust by BytesPerWord
   1.416 -  __ add(arg_slot_base_reg, BytesPerWord, temp_reg);
   1.417 -  __ cmp_and_brx_short(Gargs, temp_reg, Assembler::lessEqualUnsigned, Assembler::pt, L_ok);
   1.418 -  __ BIND(L_bad);
   1.419 -  __ stop(error_message);
   1.420 -  __ BIND(L_ok);
   1.421 -  BLOCK_COMMENT("} verify_argslots");
   1.422 -}
   1.423 -
   1.424 -// Make sure that arg_slots has the same sign as the given direction.
   1.425 -// If (and only if) arg_slots is a assembly-time constant, also allow it to be zero.
   1.426 -void MethodHandles::verify_stack_move(MacroAssembler* _masm,
   1.427 -                                      RegisterOrConstant arg_slots, int direction) {
   1.428 -  enum { UNREASONABLE_STACK_MOVE = 256 * 4 };  // limit of 255 arguments
   1.429 -  bool allow_zero = arg_slots.is_constant();
   1.430 -  if (direction == 0) { direction = +1; allow_zero = true; }
   1.431 -  assert(stack_move_unit() == -1, "else add extra checks here");
   1.432 -  if (arg_slots.is_register()) {
   1.433 -    Label L_ok, L_bad;
   1.434 -    BLOCK_COMMENT("verify_stack_move {");
   1.435 -    // __ btst(-stack_move_unit() - 1, arg_slots.as_register());  // no need
   1.436 -    // __ br(Assembler::notZero, false, Assembler::pn, L_bad);
   1.437 -    // __ delayed()->nop();
   1.438 -    __ cmp(arg_slots.as_register(), (int32_t) NULL_WORD);
   1.439 -    if (direction > 0) {
   1.440 -      __ br(allow_zero ? Assembler::less : Assembler::lessEqual, false, Assembler::pn, L_bad);
   1.441 -      __ delayed()->nop();
   1.442 -      __ cmp(arg_slots.as_register(), (int32_t) UNREASONABLE_STACK_MOVE);
   1.443 -      __ br(Assembler::less, false, Assembler::pn, L_ok);
   1.444 -      __ delayed()->nop();
   1.445 -    } else {
   1.446 -      __ br(allow_zero ? Assembler::greater : Assembler::greaterEqual, false, Assembler::pn, L_bad);
   1.447 -      __ delayed()->nop();
   1.448 -      __ cmp(arg_slots.as_register(), (int32_t) -UNREASONABLE_STACK_MOVE);
   1.449 -      __ br(Assembler::greater, false, Assembler::pn, L_ok);
   1.450 -      __ delayed()->nop();
   1.451 -    }
   1.452 -    __ BIND(L_bad);
   1.453 -    if (direction > 0)
   1.454 -      __ stop("assert arg_slots > 0");
   1.455 -    else
   1.456 -      __ stop("assert arg_slots < 0");
   1.457 -    __ BIND(L_ok);
   1.458 -    BLOCK_COMMENT("} verify_stack_move");
   1.459 -  } else {
   1.460 -    intptr_t size = arg_slots.as_constant();
   1.461 -    if (direction < 0)  size = -size;
   1.462 -    assert(size >= 0, "correct direction of constant move");
   1.463 -    assert(size < UNREASONABLE_STACK_MOVE, "reasonable size of constant move");
   1.464 -  }
   1.465 -}
   1.466 -
   1.467  void MethodHandles::verify_klass(MacroAssembler* _masm,
   1.468                                   Register obj_reg, KlassHandle klass,
   1.469                                   Register temp_reg, Register temp2_reg,
   1.470 @@ -485,6 +70,14 @@
   1.471    assert(klass_addr >= SystemDictionaryHandles::Object_klass().raw_value() &&
   1.472           klass_addr <= SystemDictionaryHandles::Long_klass().raw_value(),
   1.473           "must be one of the SystemDictionaryHandles");
   1.474 +  bool did_save = false;
   1.475 +  if (temp_reg == noreg || temp2_reg == noreg) {
   1.476 +    temp_reg = L1;
   1.477 +    temp2_reg = L2;
   1.478 +    __ save_frame_and_mov(0, obj_reg, L0);
   1.479 +    obj_reg = L0;
   1.480 +    did_save = true;
   1.481 +  }
   1.482    Label L_ok, L_bad;
   1.483    BLOCK_COMMENT("verify_klass {");
   1.484    __ verify_oop(obj_reg);
   1.485 @@ -499,537 +92,415 @@
   1.486    __ ld_ptr(Address(temp2_reg, 0), temp2_reg);
   1.487    __ cmp_and_brx_short(temp_reg, temp2_reg, Assembler::equal, Assembler::pt, L_ok);
   1.488    __ BIND(L_bad);
   1.489 -  __ stop(error_message);
   1.490 +  if (did_save)  __ restore();
   1.491 +  __ STOP(error_message);
   1.492    __ BIND(L_ok);
   1.493 +  if (did_save)  __ restore();
   1.494    BLOCK_COMMENT("} verify_klass");
   1.495  }
   1.496 +
   1.497 +void MethodHandles::verify_ref_kind(MacroAssembler* _masm, int ref_kind, Register member_reg, Register temp) {
   1.498 +  Label L;
   1.499 +  BLOCK_COMMENT("verify_ref_kind {");
   1.500 +  __ lduw(Address(member_reg, NONZERO(java_lang_invoke_MemberName::flags_offset_in_bytes())), temp);
   1.501 +  __ srl( temp, java_lang_invoke_MemberName::MN_REFERENCE_KIND_SHIFT, temp);
   1.502 +  __ and3(temp, java_lang_invoke_MemberName::MN_REFERENCE_KIND_MASK,  temp);
   1.503 +  __ cmp_and_br_short(temp, ref_kind, Assembler::equal, Assembler::pt, L);
   1.504 +  { char* buf = NEW_C_HEAP_ARRAY(char, 100, mtInternal);
   1.505 +    jio_snprintf(buf, 100, "verify_ref_kind expected %x", ref_kind);
   1.506 +    if (ref_kind == JVM_REF_invokeVirtual ||
   1.507 +        ref_kind == JVM_REF_invokeSpecial)
   1.508 +      // could do this for all ref_kinds, but would explode assembly code size
   1.509 +      trace_method_handle(_masm, buf);
   1.510 +    __ STOP(buf);
   1.511 +  }
   1.512 +  BLOCK_COMMENT("} verify_ref_kind");
   1.513 +  __ bind(L);
   1.514 +}
   1.515 +
   1.516  #endif // ASSERT
   1.517  
   1.518 -
   1.519 -void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register method, Register target, Register temp) {
   1.520 +void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register method, Register target, Register temp,
   1.521 +                                            bool for_compiler_entry) {
   1.522    assert(method == G5_method, "interpreter calling convention");
   1.523    __ verify_oop(method);
   1.524 -  __ ld_ptr(G5_method, in_bytes(methodOopDesc::from_interpreted_offset()), target);
   1.525 -  if (JvmtiExport::can_post_interpreter_events()) {
   1.526 +
   1.527 +  if (!for_compiler_entry && JvmtiExport::can_post_interpreter_events()) {
   1.528 +    Label run_compiled_code;
   1.529      // JVMTI events, such as single-stepping, are implemented partly by avoiding running
   1.530      // compiled code in threads for which the event is enabled.  Check here for
   1.531      // interp_only_mode if these events CAN be enabled.
   1.532      __ verify_thread();
   1.533 -    Label skip_compiled_code;
   1.534 -
   1.535      const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset());
   1.536      __ ld(interp_only, temp);
   1.537 -    __ tst(temp);
   1.538 -    __ br(Assembler::notZero, true, Assembler::pn, skip_compiled_code);
   1.539 -    __ delayed()->ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), target);
   1.540 -    __ bind(skip_compiled_code);
   1.541 +    __ cmp_and_br_short(temp, 0, Assembler::zero, Assembler::pt, run_compiled_code);
   1.542 +    __ ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), target);
   1.543 +    __ jmp(target, 0);
   1.544 +    __ delayed()->nop();
   1.545 +    __ BIND(run_compiled_code);
   1.546 +    // Note: we could fill some delay slots here, but
   1.547 +    // it doesn't matter, since this is interpreter code.
   1.548    }
   1.549 +
   1.550 +  const ByteSize entry_offset = for_compiler_entry ? methodOopDesc::from_compiled_offset() :
   1.551 +                                                     methodOopDesc::from_interpreted_offset();
   1.552 +  __ ld_ptr(G5_method, in_bytes(entry_offset), target);
   1.553    __ jmp(target, 0);
   1.554    __ delayed()->nop();
   1.555  }
   1.556  
   1.557 +void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm,
   1.558 +                                        Register recv, Register method_temp,
   1.559 +                                        Register temp2, Register temp3,
   1.560 +                                        bool for_compiler_entry) {
   1.561 +  BLOCK_COMMENT("jump_to_lambda_form {");
   1.562 +  // This is the initial entry point of a lazy method handle.
   1.563 +  // After type checking, it picks up the invoker from the LambdaForm.
   1.564 +  assert_different_registers(recv, method_temp, temp2, temp3);
   1.565 +  assert(method_temp == G5_method, "required register for loading method");
   1.566 +
   1.567 +  //NOT_PRODUCT({ FlagSetting fs(TraceMethodHandles, true); trace_method_handle(_masm, "LZMH"); });
   1.568 +
   1.569 +  // Load the invoker, as MH -> MH.form -> LF.vmentry
   1.570 +  __ verify_oop(recv);
   1.571 +  __ load_heap_oop(Address(recv,        NONZERO(java_lang_invoke_MethodHandle::form_offset_in_bytes())),       method_temp);
   1.572 +  __ verify_oop(method_temp);
   1.573 +  __ load_heap_oop(Address(method_temp, NONZERO(java_lang_invoke_LambdaForm::vmentry_offset_in_bytes())), method_temp);
   1.574 +  __ verify_oop(method_temp);
   1.575 +  // the following assumes that a methodOop is normally compressed in the vmtarget field:
   1.576 +  __ load_heap_oop(Address(method_temp, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes())),     method_temp);
   1.577 +  __ verify_oop(method_temp);
   1.578 +
   1.579 +  if (VerifyMethodHandles && !for_compiler_entry) {
   1.580 +    // make sure recv is already on stack
   1.581 +    __ load_sized_value(Address(method_temp, methodOopDesc::size_of_parameters_offset()),
   1.582 +                        temp2,
   1.583 +                        sizeof(u2), /*is_signed*/ false);
   1.584 +    // assert(sizeof(u2) == sizeof(methodOopDesc::_size_of_parameters), "");
   1.585 +    Label L;
   1.586 +    __ ld_ptr(__ argument_address(temp2, temp2, -1), temp2);
   1.587 +    __ cmp_and_br_short(temp2, recv, Assembler::equal, Assembler::pt, L);
   1.588 +    __ STOP("receiver not on stack");
   1.589 +    __ BIND(L);
   1.590 +  }
   1.591 +
   1.592 +  jump_from_method_handle(_masm, method_temp, temp2, temp3, for_compiler_entry);
   1.593 +  BLOCK_COMMENT("} jump_to_lambda_form");
   1.594 +}
   1.595 +
   1.596  
   1.597  // Code generation
   1.598 -address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm) {
   1.599 -  // I5_savedSP/O5_savedSP: sender SP (must preserve)
   1.600 +address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm,
   1.601 +                                                                vmIntrinsics::ID iid) {
   1.602 +  const bool not_for_compiler_entry = false;  // this is the interpreter entry
   1.603 +  assert(is_signature_polymorphic(iid), "expected invoke iid");
   1.604 +  if (iid == vmIntrinsics::_invokeGeneric ||
   1.605 +      iid == vmIntrinsics::_compiledLambdaForm) {
   1.606 +    // Perhaps surprisingly, the symbolic references visible to Java are not directly used.
   1.607 +    // They are linked to Java-generated adapters via MethodHandleNatives.linkMethod.
   1.608 +    // They all allow an appendix argument.
   1.609 +    __ should_not_reach_here();           // empty stubs make SG sick
   1.610 +    return NULL;
   1.611 +  }
   1.612 +
   1.613 +  // I5_savedSP/O5_savedSP: sender SP (must preserve; see prepare_to_jump_from_interpreted)
   1.614 +  // G5_method:  methodOop
   1.615    // G4 (Gargs): incoming argument list (must preserve)
   1.616 -  // G5_method:  invoke methodOop
   1.617 -  // G3_method_handle: receiver method handle (must load from sp[MethodTypeForm.vmslots])
   1.618 -  // O0, O1, O2, O3, O4: garbage temps, blown away
   1.619 -  Register O0_mtype   = O0;
   1.620 -  Register O1_scratch = O1;
   1.621 -  Register O2_scratch = O2;
   1.622 -  Register O3_scratch = O3;
   1.623 -  Register O4_argslot = O4;
   1.624 -  Register O4_argbase = O4;
   1.625 +  // O0: used as temp to hold mh or receiver
   1.626 +  // O1, O4: garbage temps, blown away
   1.627 +  Register O1_scratch    = O1;
   1.628 +  Register O4_param_size = O4;   // size of parameters
   1.629  
   1.630 -  // emit WrongMethodType path first, to enable back-branch from main path
   1.631 -  Label wrong_method_type;
   1.632 -  __ bind(wrong_method_type);
   1.633 -  Label invoke_generic_slow_path;
   1.634 -  assert(methodOopDesc::intrinsic_id_size_in_bytes() == sizeof(u1), "");;
   1.635 -  __ ldub(Address(G5_method, methodOopDesc::intrinsic_id_offset_in_bytes()), O1_scratch);
   1.636 -  __ cmp(O1_scratch, (int) vmIntrinsics::_invokeExact);
   1.637 -  __ brx(Assembler::notEqual, false, Assembler::pt, invoke_generic_slow_path);
   1.638 -  __ delayed()->nop();
   1.639 -  __ mov(O0_mtype, G5_method_type);  // required by throw_WrongMethodType
   1.640 -  __ mov(G3_method_handle, G3_method_handle);  // already in this register
   1.641 -  // O0 will be filled in with JavaThread in stub
   1.642 -  __ jump_to(AddressLiteral(StubRoutines::throw_WrongMethodTypeException_entry()), O3_scratch);
   1.643 -  __ delayed()->nop();
   1.644 +  address code_start = __ pc();
   1.645  
   1.646    // here's where control starts out:
   1.647    __ align(CodeEntryAlignment);
   1.648    address entry_point = __ pc();
   1.649  
   1.650 -  // fetch the MethodType from the method handle
   1.651 -  // FIXME: Interpreter should transmit pre-popped stack pointer, to locate base of arg list.
   1.652 -  // This would simplify several touchy bits of code.
   1.653 -  // See 6984712: JSR 292 method handle calls need a clean argument base pointer
   1.654 -  {
   1.655 -    Register tem = G5_method;
   1.656 -    for (jint* pchase = methodOopDesc::method_type_offsets_chain(); (*pchase) != -1; pchase++) {
   1.657 -      __ ld_ptr(Address(tem, *pchase), O0_mtype);
   1.658 -      tem = O0_mtype;          // in case there is another indirection
   1.659 +  if (VerifyMethodHandles) {
   1.660 +    Label L;
   1.661 +    BLOCK_COMMENT("verify_intrinsic_id {");
   1.662 +    __ ldub(Address(G5_method, methodOopDesc::intrinsic_id_offset_in_bytes()), O1_scratch);
   1.663 +    __ cmp_and_br_short(O1_scratch, (int) iid, Assembler::equal, Assembler::pt, L);
   1.664 +    if (iid == vmIntrinsics::_linkToVirtual ||
   1.665 +        iid == vmIntrinsics::_linkToSpecial) {
   1.666 +      // could do this for all kinds, but would explode assembly code size
   1.667 +      trace_method_handle(_masm, "bad methodOop::intrinsic_id");
   1.668      }
   1.669 +    __ STOP("bad methodOop::intrinsic_id");
   1.670 +    __ bind(L);
   1.671 +    BLOCK_COMMENT("} verify_intrinsic_id");
   1.672    }
   1.673  
   1.674 -  // given the MethodType, find out where the MH argument is buried
   1.675 -  __ load_heap_oop(Address(O0_mtype,   __ delayed_value(java_lang_invoke_MethodType::form_offset_in_bytes,        O1_scratch)), O4_argslot);
   1.676 -  __ ldsw(         Address(O4_argslot, __ delayed_value(java_lang_invoke_MethodTypeForm::vmslots_offset_in_bytes, O1_scratch)), O4_argslot);
   1.677 -  __ add(__ argument_address(O4_argslot, O4_argslot, 1), O4_argbase);
   1.678 -  // Note: argument_address uses its input as a scratch register!
   1.679 -  Address mh_receiver_slot_addr(O4_argbase, -Interpreter::stackElementSize);
   1.680 -  __ ld_ptr(mh_receiver_slot_addr, G3_method_handle);
   1.681 +  // First task:  Find out how big the argument list is.
   1.682 +  Address O4_first_arg_addr;
   1.683 +  int ref_kind = signature_polymorphic_intrinsic_ref_kind(iid);
   1.684 +  assert(ref_kind != 0 || iid == vmIntrinsics::_invokeBasic, "must be _invokeBasic or a linkTo intrinsic");
   1.685 +  if (ref_kind == 0 || MethodHandles::ref_kind_has_receiver(ref_kind)) {
   1.686 +    __ load_sized_value(Address(G5_method, methodOopDesc::size_of_parameters_offset()),
   1.687 +                        O4_param_size,
   1.688 +                        sizeof(u2), /*is_signed*/ false);
   1.689 +    // assert(sizeof(u2) == sizeof(methodOopDesc::_size_of_parameters), "");
   1.690 +    O4_first_arg_addr = __ argument_address(O4_param_size, O4_param_size, -1);
   1.691 +  } else {
   1.692 +    DEBUG_ONLY(O4_param_size = noreg);
   1.693 +  }
   1.694  
   1.695 -  trace_method_handle(_masm, "invokeExact");
   1.696 +  Register O0_mh = noreg;
   1.697 +  if (!is_signature_polymorphic_static(iid)) {
   1.698 +    __ ld_ptr(O4_first_arg_addr, O0_mh = O0);
   1.699 +    DEBUG_ONLY(O4_param_size = noreg);
   1.700 +  }
   1.701  
   1.702 -  __ check_method_handle_type(O0_mtype, G3_method_handle, O1_scratch, wrong_method_type);
   1.703 +  // O4_first_arg_addr is live!
   1.704  
   1.705 -  // Nobody uses the MH receiver slot after this.  Make sure.
   1.706 -  DEBUG_ONLY(__ set((int32_t) 0x999999, O1_scratch); __ st_ptr(O1_scratch, mh_receiver_slot_addr));
   1.707 +  if (TraceMethodHandles) {
   1.708 +    const char* name = vmIntrinsics::name_at(iid);
   1.709 +    if (*name == '_')  name += 1;
   1.710 +    const size_t len = strlen(name) + 50;
   1.711 +    char* qname = NEW_C_HEAP_ARRAY(char, len, mtInternal);
   1.712 +    const char* suffix = "";
   1.713 +    if (vmIntrinsics::method_for(iid) == NULL ||
   1.714 +        !vmIntrinsics::method_for(iid)->access_flags().is_public()) {
   1.715 +      if (is_signature_polymorphic_static(iid))
   1.716 +        suffix = "/static";
   1.717 +      else
   1.718 +        suffix = "/private";
   1.719 +    }
   1.720 +    jio_snprintf(qname, len, "MethodHandle::interpreter_entry::%s%s", name, suffix);
   1.721 +    if (O0_mh != noreg)
   1.722 +      __ mov(O0_mh, G3_method_handle);  // make stub happy
   1.723 +    trace_method_handle(_masm, qname);
   1.724 +  }
   1.725  
   1.726 -  __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
   1.727 +  if (iid == vmIntrinsics::_invokeBasic) {
   1.728 +    generate_method_handle_dispatch(_masm, iid, O0_mh, noreg, not_for_compiler_entry);
   1.729  
   1.730 -  // for invokeGeneric (only), apply argument and result conversions on the fly
   1.731 -  __ bind(invoke_generic_slow_path);
   1.732 -#ifdef ASSERT
   1.733 -  if (VerifyMethodHandles) {
   1.734 -    Label L;
   1.735 -    __ ldub(Address(G5_method, methodOopDesc::intrinsic_id_offset_in_bytes()), O1_scratch);
   1.736 -    __ cmp(O1_scratch, (int) vmIntrinsics::_invokeGeneric);
   1.737 -    __ brx(Assembler::equal, false, Assembler::pt, L);
   1.738 -    __ delayed()->nop();
   1.739 -    __ stop("bad methodOop::intrinsic_id");
   1.740 -    __ bind(L);
   1.741 +  } else {
   1.742 +    // Adjust argument list by popping the trailing MemberName argument.
   1.743 +    Register O0_recv = noreg;
   1.744 +    if (MethodHandles::ref_kind_has_receiver(ref_kind)) {
   1.745 +      // Load the receiver (not the MH; the actual MemberName's receiver) up from the interpreter stack.
   1.746 +      __ ld_ptr(O4_first_arg_addr, O0_recv = O0);
   1.747 +      DEBUG_ONLY(O4_param_size = noreg);
   1.748 +    }
   1.749 +    Register G5_member = G5_method;  // MemberName ptr; incoming method ptr is dead now
   1.750 +    __ ld_ptr(__ argument_address(constant(0)), G5_member);
   1.751 +    __ add(Gargs, Interpreter::stackElementSize, Gargs);
   1.752 +    generate_method_handle_dispatch(_masm, iid, O0_recv, G5_member, not_for_compiler_entry);
   1.753    }
   1.754 -#endif //ASSERT
   1.755  
   1.756 -  // make room on the stack for another pointer:
   1.757 -  insert_arg_slots(_masm, 2 * stack_move_unit(), O4_argbase, O1_scratch, O2_scratch, O3_scratch);
   1.758 -  // load up an adapter from the calling type (Java weaves this)
   1.759 -  Register O2_form    = O2_scratch;
   1.760 -  Register O3_adapter = O3_scratch;
   1.761 -  __ load_heap_oop(Address(O0_mtype, __ delayed_value(java_lang_invoke_MethodType::form_offset_in_bytes,               O1_scratch)), O2_form);
   1.762 -  __ load_heap_oop(Address(O2_form,  __ delayed_value(java_lang_invoke_MethodTypeForm::genericInvoker_offset_in_bytes, O1_scratch)), O3_adapter);
   1.763 -  __ verify_oop(O3_adapter);
   1.764 -  __ st_ptr(O3_adapter, Address(O4_argbase, 1 * Interpreter::stackElementSize));
   1.765 -  // As a trusted first argument, pass the type being called, so the adapter knows
   1.766 -  // the actual types of the arguments and return values.
   1.767 -  // (Generic invokers are shared among form-families of method-type.)
   1.768 -  __ st_ptr(O0_mtype,   Address(O4_argbase, 0 * Interpreter::stackElementSize));
   1.769 -  // FIXME: assert that O3_adapter is of the right method-type.
   1.770 -  __ mov(O3_adapter, G3_method_handle);
   1.771 -  trace_method_handle(_masm, "invokeGeneric");
   1.772 -  __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
   1.773 +  if (PrintMethodHandleStubs) {
   1.774 +    address code_end = __ pc();
   1.775 +    tty->print_cr("--------");
   1.776 +    tty->print_cr("method handle interpreter entry for %s", vmIntrinsics::name_at(iid));
   1.777 +    Disassembler::decode(code_start, code_end);
   1.778 +    tty->cr();
   1.779 +  }
   1.780  
   1.781    return entry_point;
   1.782  }
   1.783  
   1.784 -// Workaround for C++ overloading nastiness on '0' for RegisterOrConstant.
   1.785 -static RegisterOrConstant constant(int value) {
   1.786 -  return RegisterOrConstant(value);
   1.787 -}
   1.788 +void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
   1.789 +                                                    vmIntrinsics::ID iid,
   1.790 +                                                    Register receiver_reg,
   1.791 +                                                    Register member_reg,
   1.792 +                                                    bool for_compiler_entry) {
   1.793 +  assert(is_signature_polymorphic(iid), "expected invoke iid");
   1.794 +  // temps used in this code are not used in *either* compiled or interpreted calling sequences
   1.795 +  Register temp1 = (for_compiler_entry ? G1_scratch : O1);
   1.796 +  Register temp2 = (for_compiler_entry ? G4_scratch : O4);
   1.797 +  Register temp3 = G3_scratch;
   1.798 +  Register temp4 = (for_compiler_entry ? noreg      : O2);
   1.799 +  if (for_compiler_entry) {
   1.800 +    assert(receiver_reg == (iid == vmIntrinsics::_linkToStatic ? noreg : O0), "only valid assignment");
   1.801 +    assert_different_registers(temp1,      O0, O1, O2, O3, O4, O5);
   1.802 +    assert_different_registers(temp2,      O0, O1, O2, O3, O4, O5);
   1.803 +    assert_different_registers(temp3,      O0, O1, O2, O3, O4, O5);
   1.804 +    assert_different_registers(temp4,      O0, O1, O2, O3, O4, O5);
   1.805 +  }
   1.806 +  if (receiver_reg != noreg)  assert_different_registers(temp1, temp2, temp3, temp4, receiver_reg);
   1.807 +  if (member_reg   != noreg)  assert_different_registers(temp1, temp2, temp3, temp4, member_reg);
   1.808 +  if (!for_compiler_entry)    assert_different_registers(temp1, temp2, temp3, temp4, O5_savedSP);  // don't trash lastSP
   1.809  
   1.810 -static void load_vmargslot(MacroAssembler* _masm, Address vmargslot_addr, Register result) {
   1.811 -  __ ldsw(vmargslot_addr, result);
   1.812 -}
   1.813 +  if (iid == vmIntrinsics::_invokeBasic) {
   1.814 +    // indirect through MH.form.vmentry.vmtarget
   1.815 +    jump_to_lambda_form(_masm, receiver_reg, G5_method, temp2, temp3, for_compiler_entry);
   1.816  
   1.817 -static RegisterOrConstant adjust_SP_and_Gargs_down_by_slots(MacroAssembler* _masm,
   1.818 -                                                            RegisterOrConstant arg_slots,
   1.819 -                                                            Register temp_reg, Register temp2_reg) {
   1.820 -  // Keep the stack pointer 2*wordSize aligned.
   1.821 -  const int TwoWordAlignmentMask = right_n_bits(LogBytesPerWord + 1);
   1.822 -  if (arg_slots.is_constant()) {
   1.823 -    const int        offset = arg_slots.as_constant() << LogBytesPerWord;
   1.824 -    const int masked_offset = round_to(offset, 2 * BytesPerWord);
   1.825 -    const int masked_offset2 = (offset + 1*BytesPerWord) & ~TwoWordAlignmentMask;
   1.826 -    assert(masked_offset == masked_offset2, "must agree");
   1.827 -    __ sub(Gargs,        offset, Gargs);
   1.828 -    __ sub(SP,    masked_offset, SP   );
   1.829 -    return offset;
   1.830    } else {
   1.831 -#ifdef ASSERT
   1.832 +    // The method is a member invoker used by direct method handles.
   1.833 +    if (VerifyMethodHandles) {
   1.834 +      // make sure the trailing argument really is a MemberName (caller responsibility)
   1.835 +      verify_klass(_masm, member_reg, SystemDictionaryHandles::MemberName_klass(),
   1.836 +                   temp1, temp2,
   1.837 +                   "MemberName required for invokeVirtual etc.");
   1.838 +    }
   1.839 +
   1.840 +    Address member_clazz(    member_reg, NONZERO(java_lang_invoke_MemberName::clazz_offset_in_bytes()));
   1.841 +    Address member_vmindex(  member_reg, NONZERO(java_lang_invoke_MemberName::vmindex_offset_in_bytes()));
   1.842 +    Address member_vmtarget( member_reg, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes()));
   1.843 +
   1.844 +    Register temp1_recv_klass = temp1;
   1.845 +    if (iid != vmIntrinsics::_linkToStatic) {
   1.846 +      __ verify_oop(receiver_reg);
   1.847 +      if (iid == vmIntrinsics::_linkToSpecial) {
   1.848 +        // Don't actually load the klass; just null-check the receiver.
   1.849 +        __ null_check(receiver_reg);
   1.850 +      } else {
   1.851 +        // load receiver klass itself
   1.852 +        __ null_check(receiver_reg, oopDesc::klass_offset_in_bytes());
   1.853 +        __ load_klass(receiver_reg, temp1_recv_klass);
   1.854 +        __ verify_oop(temp1_recv_klass);
   1.855 +      }
   1.856 +      BLOCK_COMMENT("check_receiver {");
   1.857 +      // The receiver for the MemberName must be in receiver_reg.
   1.858 +      // Check the receiver against the MemberName.clazz
   1.859 +      if (VerifyMethodHandles && iid == vmIntrinsics::_linkToSpecial) {
   1.860 +        // Did not load it above...
   1.861 +        __ load_klass(receiver_reg, temp1_recv_klass);
   1.862 +        __ verify_oop(temp1_recv_klass);
   1.863 +      }
   1.864 +      if (VerifyMethodHandles && iid != vmIntrinsics::_linkToInterface) {
   1.865 +        Label L_ok;
   1.866 +        Register temp2_defc = temp2;
   1.867 +        __ load_heap_oop(member_clazz, temp2_defc);
   1.868 +        load_klass_from_Class(_masm, temp2_defc, temp3, temp4);
   1.869 +        __ verify_oop(temp2_defc);
   1.870 +        __ check_klass_subtype(temp1_recv_klass, temp2_defc, temp3, temp4, L_ok);
   1.871 +        // If we get here, the type check failed!
   1.872 +        __ STOP("receiver class disagrees with MemberName.clazz");
   1.873 +        __ bind(L_ok);
   1.874 +      }
   1.875 +      BLOCK_COMMENT("} check_receiver");
   1.876 +    }
   1.877 +    if (iid == vmIntrinsics::_linkToSpecial ||
   1.878 +        iid == vmIntrinsics::_linkToStatic) {
   1.879 +      DEBUG_ONLY(temp1_recv_klass = noreg);  // these guys didn't load the recv_klass
   1.880 +    }
   1.881 +
   1.882 +    // Live registers at this point:
   1.883 +    //  member_reg - MemberName that was the trailing argument
   1.884 +    //  temp1_recv_klass - klass of stacked receiver, if needed
   1.885 +    //  O5_savedSP - interpreter linkage (if interpreted)
   1.886 +    //  O0..O7,G1,G4 - compiler arguments (if compiled)
   1.887 +
   1.888 +    bool method_is_live = false;
   1.889 +    switch (iid) {
   1.890 +    case vmIntrinsics::_linkToSpecial:
   1.891 +      if (VerifyMethodHandles) {
   1.892 +        verify_ref_kind(_masm, JVM_REF_invokeSpecial, member_reg, temp3);
   1.893 +      }
   1.894 +      __ load_heap_oop(member_vmtarget, G5_method);
   1.895 +      method_is_live = true;
   1.896 +      break;
   1.897 +
   1.898 +    case vmIntrinsics::_linkToStatic:
   1.899 +      if (VerifyMethodHandles) {
   1.900 +        verify_ref_kind(_masm, JVM_REF_invokeStatic, member_reg, temp3);
   1.901 +      }
   1.902 +      __ load_heap_oop(member_vmtarget, G5_method);
   1.903 +      method_is_live = true;
   1.904 +      break;
   1.905 +
   1.906 +    case vmIntrinsics::_linkToVirtual:
   1.907      {
   1.908 -      Label L_ok;
   1.909 -      __ cmp_and_br_short(arg_slots.as_register(), 0, Assembler::greaterEqual, Assembler::pt, L_ok);
   1.910 -      __ stop("negative arg_slots");
   1.911 -      __ bind(L_ok);
   1.912 +      // same as TemplateTable::invokevirtual,
   1.913 +      // minus the CP setup and profiling:
   1.914 +
   1.915 +      if (VerifyMethodHandles) {
   1.916 +        verify_ref_kind(_masm, JVM_REF_invokeVirtual, member_reg, temp3);
   1.917 +      }
   1.918 +
   1.919 +      // pick out the vtable index from the MemberName, and then we can discard it:
   1.920 +      Register temp2_index = temp2;
   1.921 +      __ ld_ptr(member_vmindex, temp2_index);
   1.922 +
   1.923 +      if (VerifyMethodHandles) {
   1.924 +        Label L_index_ok;
   1.925 +        __ cmp_and_br_short(temp2_index, (int) 0, Assembler::greaterEqual, Assembler::pn, L_index_ok);
   1.926 +        __ STOP("no virtual index");
   1.927 +        __ BIND(L_index_ok);
   1.928 +      }
   1.929 +
   1.930 +      // Note:  The verifier invariants allow us to ignore MemberName.clazz and vmtarget
   1.931 +      // at this point.  And VerifyMethodHandles has already checked clazz, if needed.
   1.932 +
   1.933 +      // get target methodOop & entry point
   1.934 +      __ lookup_virtual_method(temp1_recv_klass, temp2_index, G5_method);
   1.935 +      method_is_live = true;
   1.936 +      break;
   1.937      }
   1.938 -#endif
   1.939 -    __ sll_ptr(arg_slots.as_register(), LogBytesPerWord, temp_reg);
   1.940 -    __ add( temp_reg,  1*BytesPerWord,       temp2_reg);
   1.941 -    __ andn(temp2_reg, TwoWordAlignmentMask, temp2_reg);
   1.942 -    __ sub(Gargs, temp_reg,  Gargs);
   1.943 -    __ sub(SP,    temp2_reg, SP   );
   1.944 -    return temp_reg;
   1.945 +
   1.946 +    case vmIntrinsics::_linkToInterface:
   1.947 +    {
   1.948 +      // same as TemplateTable::invokeinterface
   1.949 +      // (minus the CP setup and profiling, with different argument motion)
   1.950 +      if (VerifyMethodHandles) {
   1.951 +        verify_ref_kind(_masm, JVM_REF_invokeInterface, member_reg, temp3);
   1.952 +      }
   1.953 +
   1.954 +      Register temp3_intf = temp3;
   1.955 +      __ load_heap_oop(member_clazz, temp3_intf);
   1.956 +      load_klass_from_Class(_masm, temp3_intf, temp2, temp4);
   1.957 +      __ verify_oop(temp3_intf);
   1.958 +
   1.959 +      Register G5_index = G5_method;
   1.960 +      __ ld_ptr(member_vmindex, G5_index);
   1.961 +      if (VerifyMethodHandles) {
   1.962 +        Label L;
   1.963 +        __ cmp_and_br_short(G5_index, 0, Assembler::greaterEqual, Assembler::pt, L);
   1.964 +        __ STOP("invalid vtable index for MH.invokeInterface");
   1.965 +        __ bind(L);
   1.966 +      }
   1.967 +
   1.968 +      // given intf, index, and recv klass, dispatch to the implementation method
   1.969 +      Label L_no_such_interface;
   1.970 +      Register no_sethi_temp = noreg;
   1.971 +      __ lookup_interface_method(temp1_recv_klass, temp3_intf,
   1.972 +                                 // note: next two args must be the same:
   1.973 +                                 G5_index, G5_method,
   1.974 +                                 temp2, no_sethi_temp,
   1.975 +                                 L_no_such_interface);
   1.976 +
   1.977 +      __ verify_oop(G5_method);
   1.978 +      jump_from_method_handle(_masm, G5_method, temp2, temp3, for_compiler_entry);
   1.979 +
   1.980 +      __ bind(L_no_such_interface);
   1.981 +      AddressLiteral icce(StubRoutines::throw_IncompatibleClassChangeError_entry());
   1.982 +      __ jump_to(icce, temp3);
   1.983 +      __ delayed()->nop();
   1.984 +      break;
   1.985 +    }
   1.986 +
   1.987 +    default:
   1.988 +      fatal(err_msg("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid)));
   1.989 +      break;
   1.990 +    }
   1.991 +
   1.992 +    if (method_is_live) {
   1.993 +      // live at this point:  G5_method, O5_savedSP (if interpreted)
   1.994 +
   1.995 +      // After figuring out which concrete method to call, jump into it.
   1.996 +      // Note that this works in the interpreter with no data motion.
   1.997 +      // But the compiled version will require that rcx_recv be shifted out.
   1.998 +      __ verify_oop(G5_method);
   1.999 +      jump_from_method_handle(_masm, G5_method, temp1, temp3, for_compiler_entry);
  1.1000 +    }
  1.1001    }
  1.1002  }
  1.1003  
  1.1004 -static RegisterOrConstant adjust_SP_and_Gargs_up_by_slots(MacroAssembler* _masm,
  1.1005 -                                                          RegisterOrConstant arg_slots,
  1.1006 -                                                          Register temp_reg, Register temp2_reg) {
  1.1007 -  // Keep the stack pointer 2*wordSize aligned.
  1.1008 -  const int TwoWordAlignmentMask = right_n_bits(LogBytesPerWord + 1);
  1.1009 -  if (arg_slots.is_constant()) {
  1.1010 -    const int        offset = arg_slots.as_constant() << LogBytesPerWord;
  1.1011 -    const int masked_offset = offset & ~TwoWordAlignmentMask;
  1.1012 -    __ add(Gargs,        offset, Gargs);
  1.1013 -    __ add(SP,    masked_offset, SP   );
  1.1014 -    return offset;
  1.1015 -  } else {
  1.1016 -    __ sll_ptr(arg_slots.as_register(), LogBytesPerWord, temp_reg);
  1.1017 -    __ andn(temp_reg, TwoWordAlignmentMask, temp2_reg);
  1.1018 -    __ add(Gargs, temp_reg,  Gargs);
  1.1019 -    __ add(SP,    temp2_reg, SP   );
  1.1020 -    return temp_reg;
  1.1021 -  }
  1.1022 -}
  1.1023 -
  1.1024 -// Helper to insert argument slots into the stack.
  1.1025 -// arg_slots must be a multiple of stack_move_unit() and < 0
  1.1026 -// argslot_reg is decremented to point to the new (shifted) location of the argslot
  1.1027 -// But, temp_reg ends up holding the original value of argslot_reg.
  1.1028 -void MethodHandles::insert_arg_slots(MacroAssembler* _masm,
  1.1029 -                                     RegisterOrConstant arg_slots,
  1.1030 -                                     Register argslot_reg,
  1.1031 -                                     Register temp_reg, Register temp2_reg, Register temp3_reg) {
  1.1032 -  // allow constant zero
  1.1033 -  if (arg_slots.is_constant() && arg_slots.as_constant() == 0)
  1.1034 -    return;
  1.1035 -
  1.1036 -  assert_different_registers(argslot_reg, temp_reg, temp2_reg, temp3_reg,
  1.1037 -                             (!arg_slots.is_register() ? Gargs : arg_slots.as_register()));
  1.1038 -
  1.1039 -  BLOCK_COMMENT("insert_arg_slots {");
  1.1040 -  if (VerifyMethodHandles)
  1.1041 -    verify_argslot(_masm, argslot_reg, temp_reg, "insertion point must fall within current frame");
  1.1042 -  if (VerifyMethodHandles)
  1.1043 -    verify_stack_move(_masm, arg_slots, -1);
  1.1044 -
  1.1045 -  // Make space on the stack for the inserted argument(s).
  1.1046 -  // Then pull down everything shallower than argslot_reg.
  1.1047 -  // The stacked return address gets pulled down with everything else.
  1.1048 -  // That is, copy [sp, argslot) downward by -size words.  In pseudo-code:
  1.1049 -  //   sp -= size;
  1.1050 -  //   for (temp = sp + size; temp < argslot; temp++)
  1.1051 -  //     temp[-size] = temp[0]
  1.1052 -  //   argslot -= size;
  1.1053 -
  1.1054 -  // offset is temp3_reg in case of arg_slots being a register.
  1.1055 -  RegisterOrConstant offset = adjust_SP_and_Gargs_up_by_slots(_masm, arg_slots, temp3_reg, temp_reg);
  1.1056 -  __ sub(Gargs, offset, temp_reg);  // source pointer for copy
  1.1057 -
  1.1058 -  {
  1.1059 -    Label loop;
  1.1060 -    __ BIND(loop);
  1.1061 -    // pull one word down each time through the loop
  1.1062 -    __ ld_ptr(           Address(temp_reg, 0     ), temp2_reg);
  1.1063 -    __ st_ptr(temp2_reg, Address(temp_reg, offset)           );
  1.1064 -    __ add(temp_reg, wordSize, temp_reg);
  1.1065 -    __ cmp_and_brx_short(temp_reg, argslot_reg, Assembler::lessUnsigned, Assembler::pt, loop);
  1.1066 -  }
  1.1067 -
  1.1068 -  // Now move the argslot down, to point to the opened-up space.
  1.1069 -  __ add(argslot_reg, offset, argslot_reg);
  1.1070 -  BLOCK_COMMENT("} insert_arg_slots");
  1.1071 -}
  1.1072 -
  1.1073 -
  1.1074 -// Helper to remove argument slots from the stack.
  1.1075 -// arg_slots must be a multiple of stack_move_unit() and > 0
  1.1076 -void MethodHandles::remove_arg_slots(MacroAssembler* _masm,
  1.1077 -                                     RegisterOrConstant arg_slots,
  1.1078 -                                     Register argslot_reg,
  1.1079 -                                     Register temp_reg, Register temp2_reg, Register temp3_reg) {
  1.1080 -  // allow constant zero
  1.1081 -  if (arg_slots.is_constant() && arg_slots.as_constant() == 0)
  1.1082 -    return;
  1.1083 -  assert_different_registers(argslot_reg, temp_reg, temp2_reg, temp3_reg,
  1.1084 -                             (!arg_slots.is_register() ? Gargs : arg_slots.as_register()));
  1.1085 -
  1.1086 -  BLOCK_COMMENT("remove_arg_slots {");
  1.1087 -  if (VerifyMethodHandles)
  1.1088 -    verify_argslots(_masm, arg_slots, argslot_reg, temp_reg, temp2_reg, false,
  1.1089 -                    "deleted argument(s) must fall within current frame");
  1.1090 -  if (VerifyMethodHandles)
  1.1091 -    verify_stack_move(_masm, arg_slots, +1);
  1.1092 -
  1.1093 -  // Pull up everything shallower than argslot.
  1.1094 -  // Then remove the excess space on the stack.
  1.1095 -  // The stacked return address gets pulled up with everything else.
  1.1096 -  // That is, copy [sp, argslot) upward by size words.  In pseudo-code:
  1.1097 -  //   for (temp = argslot-1; temp >= sp; --temp)
  1.1098 -  //     temp[size] = temp[0]
  1.1099 -  //   argslot += size;
  1.1100 -  //   sp += size;
  1.1101 -
  1.1102 -  RegisterOrConstant offset = __ regcon_sll_ptr(arg_slots, LogBytesPerWord, temp3_reg);
  1.1103 -  __ sub(argslot_reg, wordSize, temp_reg);  // source pointer for copy
  1.1104 -
  1.1105 -  {
  1.1106 -    Label L_loop;
  1.1107 -    __ BIND(L_loop);
  1.1108 -    // pull one word up each time through the loop
  1.1109 -    __ ld_ptr(           Address(temp_reg, 0     ), temp2_reg);
  1.1110 -    __ st_ptr(temp2_reg, Address(temp_reg, offset)           );
  1.1111 -    __ sub(temp_reg, wordSize, temp_reg);
  1.1112 -    __ cmp_and_brx_short(temp_reg, Gargs, Assembler::greaterEqualUnsigned, Assembler::pt, L_loop);
  1.1113 -  }
  1.1114 -
  1.1115 -  // And adjust the argslot address to point at the deletion point.
  1.1116 -  __ add(argslot_reg, offset, argslot_reg);
  1.1117 -
  1.1118 -  // We don't need the offset at this point anymore, just adjust SP and Gargs.
  1.1119 -  (void) adjust_SP_and_Gargs_up_by_slots(_masm, arg_slots, temp3_reg, temp_reg);
  1.1120 -
  1.1121 -  BLOCK_COMMENT("} remove_arg_slots");
  1.1122 -}
  1.1123 -
  1.1124 -// Helper to copy argument slots to the top of the stack.
  1.1125 -// The sequence starts with argslot_reg and is counted by slot_count
  1.1126 -// slot_count must be a multiple of stack_move_unit() and >= 0
  1.1127 -// This function blows the temps but does not change argslot_reg.
  1.1128 -void MethodHandles::push_arg_slots(MacroAssembler* _masm,
  1.1129 -                                   Register argslot_reg,
  1.1130 -                                   RegisterOrConstant slot_count,
  1.1131 -                                   Register temp_reg, Register temp2_reg) {
  1.1132 -  // allow constant zero
  1.1133 -  if (slot_count.is_constant() && slot_count.as_constant() == 0)
  1.1134 -    return;
  1.1135 -  assert_different_registers(argslot_reg, temp_reg, temp2_reg,
  1.1136 -                             (!slot_count.is_register() ? Gargs : slot_count.as_register()),
  1.1137 -                             SP);
  1.1138 -  assert(Interpreter::stackElementSize == wordSize, "else change this code");
  1.1139 -
  1.1140 -  BLOCK_COMMENT("push_arg_slots {");
  1.1141 -  if (VerifyMethodHandles)
  1.1142 -    verify_stack_move(_masm, slot_count, 0);
  1.1143 -
  1.1144 -  RegisterOrConstant offset = adjust_SP_and_Gargs_down_by_slots(_masm, slot_count, temp2_reg, temp_reg);
  1.1145 -
  1.1146 -  if (slot_count.is_constant()) {
  1.1147 -    for (int i = slot_count.as_constant() - 1; i >= 0; i--) {
  1.1148 -      __ ld_ptr(          Address(argslot_reg, i * wordSize), temp_reg);
  1.1149 -      __ st_ptr(temp_reg, Address(Gargs,       i * wordSize));
  1.1150 -    }
  1.1151 -  } else {
  1.1152 -    Label L_plural, L_loop, L_break;
  1.1153 -    // Emit code to dynamically check for the common cases, zero and one slot.
  1.1154 -    __ cmp(slot_count.as_register(), (int32_t) 1);
  1.1155 -    __ br(Assembler::greater, false, Assembler::pn, L_plural);
  1.1156 -    __ delayed()->nop();
  1.1157 -    __ br(Assembler::less, false, Assembler::pn, L_break);
  1.1158 -    __ delayed()->nop();
  1.1159 -    __ ld_ptr(          Address(argslot_reg, 0), temp_reg);
  1.1160 -    __ st_ptr(temp_reg, Address(Gargs,       0));
  1.1161 -    __ ba_short(L_break);
  1.1162 -    __ BIND(L_plural);
  1.1163 -
  1.1164 -    // Loop for 2 or more:
  1.1165 -    //   top = &argslot[slot_count]
  1.1166 -    //   while (top > argslot)  *(--Gargs) = *(--top)
  1.1167 -    Register top_reg = temp_reg;
  1.1168 -    __ add(argslot_reg, offset, top_reg);
  1.1169 -    __ add(Gargs,       offset, Gargs  );  // move back up again so we can go down
  1.1170 -    __ BIND(L_loop);
  1.1171 -    __ sub(top_reg, wordSize, top_reg);
  1.1172 -    __ sub(Gargs,   wordSize, Gargs  );
  1.1173 -    __ ld_ptr(           Address(top_reg, 0), temp2_reg);
  1.1174 -    __ st_ptr(temp2_reg, Address(Gargs,   0));
  1.1175 -    __ cmp_and_brx_short(top_reg, argslot_reg, Assembler::greaterUnsigned, Assembler::pt, L_loop);
  1.1176 -    __ BIND(L_break);
  1.1177 -  }
  1.1178 -  BLOCK_COMMENT("} push_arg_slots");
  1.1179 -}
  1.1180 -
  1.1181 -// in-place movement; no change to Gargs
  1.1182 -// blows temp_reg, temp2_reg
  1.1183 -void MethodHandles::move_arg_slots_up(MacroAssembler* _masm,
  1.1184 -                                      Register bottom_reg,  // invariant
  1.1185 -                                      Address  top_addr,    // can use temp_reg
  1.1186 -                                      RegisterOrConstant positive_distance_in_slots,  // destroyed if register
  1.1187 -                                      Register temp_reg, Register temp2_reg) {
  1.1188 -  assert_different_registers(bottom_reg,
  1.1189 -                             temp_reg, temp2_reg,
  1.1190 -                             positive_distance_in_slots.register_or_noreg());
  1.1191 -  BLOCK_COMMENT("move_arg_slots_up {");
  1.1192 -  Label L_loop, L_break;
  1.1193 -  Register top_reg = temp_reg;
  1.1194 -  if (!top_addr.is_same_address(Address(top_reg, 0))) {
  1.1195 -    __ add(top_addr, top_reg);
  1.1196 -  }
  1.1197 -  // Detect empty (or broken) loop:
  1.1198 -#ifdef ASSERT
  1.1199 -  if (VerifyMethodHandles) {
  1.1200 -    // Verify that &bottom < &top (non-empty interval)
  1.1201 -    Label L_ok, L_bad;
  1.1202 -    if (positive_distance_in_slots.is_register()) {
  1.1203 -      __ cmp(positive_distance_in_slots.as_register(), (int32_t) 0);
  1.1204 -      __ br(Assembler::lessEqual, false, Assembler::pn, L_bad);
  1.1205 -      __ delayed()->nop();
  1.1206 -    }
  1.1207 -    __ cmp_and_brx_short(bottom_reg, top_reg, Assembler::lessUnsigned, Assembler::pt, L_ok);
  1.1208 -    __ BIND(L_bad);
  1.1209 -    __ stop("valid bounds (copy up)");
  1.1210 -    __ BIND(L_ok);
  1.1211 -  }
  1.1212 -#endif
  1.1213 -  __ cmp_and_brx_short(bottom_reg, top_reg, Assembler::greaterEqualUnsigned, Assembler::pn, L_break);
  1.1214 -  // work top down to bottom, copying contiguous data upwards
  1.1215 -  // In pseudo-code:
  1.1216 -  //   while (--top >= bottom) *(top + distance) = *(top + 0);
  1.1217 -  RegisterOrConstant offset = __ argument_offset(positive_distance_in_slots, positive_distance_in_slots.register_or_noreg());
  1.1218 -  __ BIND(L_loop);
  1.1219 -  __ sub(top_reg, wordSize, top_reg);
  1.1220 -  __ ld_ptr(           Address(top_reg, 0     ), temp2_reg);
  1.1221 -  __ st_ptr(temp2_reg, Address(top_reg, offset)           );
  1.1222 -  __ cmp_and_brx_short(top_reg, bottom_reg, Assembler::greaterUnsigned, Assembler::pt, L_loop);
  1.1223 -  assert(Interpreter::stackElementSize == wordSize, "else change loop");
  1.1224 -  __ BIND(L_break);
  1.1225 -  BLOCK_COMMENT("} move_arg_slots_up");
  1.1226 -}
  1.1227 -
  1.1228 -// in-place movement; no change to rsp
  1.1229 -// blows temp_reg, temp2_reg
  1.1230 -void MethodHandles::move_arg_slots_down(MacroAssembler* _masm,
  1.1231 -                                        Address  bottom_addr,  // can use temp_reg
  1.1232 -                                        Register top_reg,      // invariant
  1.1233 -                                        RegisterOrConstant negative_distance_in_slots,  // destroyed if register
  1.1234 -                                        Register temp_reg, Register temp2_reg) {
  1.1235 -  assert_different_registers(top_reg,
  1.1236 -                             negative_distance_in_slots.register_or_noreg(),
  1.1237 -                             temp_reg, temp2_reg);
  1.1238 -  BLOCK_COMMENT("move_arg_slots_down {");
  1.1239 -  Label L_loop, L_break;
  1.1240 -  Register bottom_reg = temp_reg;
  1.1241 -  if (!bottom_addr.is_same_address(Address(bottom_reg, 0))) {
  1.1242 -    __ add(bottom_addr, bottom_reg);
  1.1243 -  }
  1.1244 -  // Detect empty (or broken) loop:
  1.1245 -#ifdef ASSERT
  1.1246 -  assert(!negative_distance_in_slots.is_constant() || negative_distance_in_slots.as_constant() < 0, "");
  1.1247 -  if (VerifyMethodHandles) {
  1.1248 -    // Verify that &bottom < &top (non-empty interval)
  1.1249 -    Label L_ok, L_bad;
  1.1250 -    if (negative_distance_in_slots.is_register()) {
  1.1251 -      __ cmp(negative_distance_in_slots.as_register(), (int32_t) 0);
  1.1252 -      __ br(Assembler::greaterEqual, false, Assembler::pn, L_bad);
  1.1253 -      __ delayed()->nop();
  1.1254 -    }
  1.1255 -    __ cmp_and_brx_short(bottom_reg, top_reg, Assembler::lessUnsigned, Assembler::pt, L_ok);
  1.1256 -    __ BIND(L_bad);
  1.1257 -    __ stop("valid bounds (copy down)");
  1.1258 -    __ BIND(L_ok);
  1.1259 -  }
  1.1260 -#endif
  1.1261 -  __ cmp_and_brx_short(bottom_reg, top_reg, Assembler::greaterEqualUnsigned, Assembler::pn, L_break);
  1.1262 -  // work bottom up to top, copying contiguous data downwards
  1.1263 -  // In pseudo-code:
  1.1264 -  //   while (bottom < top) *(bottom - distance) = *(bottom + 0), bottom++;
  1.1265 -  RegisterOrConstant offset = __ argument_offset(negative_distance_in_slots, negative_distance_in_slots.register_or_noreg());
  1.1266 -  __ BIND(L_loop);
  1.1267 -  __ ld_ptr(           Address(bottom_reg, 0     ), temp2_reg);
  1.1268 -  __ st_ptr(temp2_reg, Address(bottom_reg, offset)           );
  1.1269 -  __ add(bottom_reg, wordSize, bottom_reg);
  1.1270 -  __ cmp_and_brx_short(bottom_reg, top_reg, Assembler::lessUnsigned, Assembler::pt, L_loop);
  1.1271 -  assert(Interpreter::stackElementSize == wordSize, "else change loop");
  1.1272 -  __ BIND(L_break);
  1.1273 -  BLOCK_COMMENT("} move_arg_slots_down");
  1.1274 -}
  1.1275 -
  1.1276 -// Copy from a field or array element to a stacked argument slot.
  1.1277 -// is_element (ignored) says whether caller is loading an array element instead of an instance field.
  1.1278 -void MethodHandles::move_typed_arg(MacroAssembler* _masm,
  1.1279 -                                   BasicType type, bool is_element,
  1.1280 -                                   Address value_src, Address slot_dest,
  1.1281 -                                   Register temp_reg) {
  1.1282 -  assert(!slot_dest.uses(temp_reg), "must be different register");
  1.1283 -  BLOCK_COMMENT(!is_element ? "move_typed_arg {" : "move_typed_arg { (array element)");
  1.1284 -  if (type == T_OBJECT || type == T_ARRAY) {
  1.1285 -    __ load_heap_oop(value_src, temp_reg);
  1.1286 -    __ verify_oop(temp_reg);
  1.1287 -    __ st_ptr(temp_reg, slot_dest);
  1.1288 -  } else if (type != T_VOID) {
  1.1289 -    int  arg_size      = type2aelembytes(type);
  1.1290 -    bool arg_is_signed = is_signed_subword_type(type);
  1.1291 -    int  slot_size     = is_subword_type(type) ? type2aelembytes(T_INT) : arg_size;  // store int sub-words as int
  1.1292 -    __ load_sized_value( value_src, temp_reg, arg_size, arg_is_signed);
  1.1293 -    __ store_sized_value(temp_reg, slot_dest, slot_size              );
  1.1294 -  }
  1.1295 -  BLOCK_COMMENT("} move_typed_arg");
  1.1296 -}
  1.1297 -
  1.1298 -// Cf. TemplateInterpreterGenerator::generate_return_entry_for and
  1.1299 -// InterpreterMacroAssembler::save_return_value
  1.1300 -void MethodHandles::move_return_value(MacroAssembler* _masm, BasicType type,
  1.1301 -                                      Address return_slot) {
  1.1302 -  BLOCK_COMMENT("move_return_value {");
  1.1303 -  // Look at the type and pull the value out of the corresponding register.
  1.1304 -  if (type == T_VOID) {
  1.1305 -    // nothing to do
  1.1306 -  } else if (type == T_OBJECT) {
  1.1307 -    __ verify_oop(O0);
  1.1308 -    __ st_ptr(O0, return_slot);
  1.1309 -  } else if (type == T_INT || is_subword_type(type)) {
  1.1310 -    int type_size = type2aelembytes(T_INT);
  1.1311 -    __ store_sized_value(O0, return_slot, type_size);
  1.1312 -  } else if (type == T_LONG) {
  1.1313 -    // store the value by parts
  1.1314 -    // Note: We assume longs are continguous (if misaligned) on the interpreter stack.
  1.1315 -#if !defined(_LP64) && defined(COMPILER2)
  1.1316 -    __ stx(G1, return_slot);
  1.1317 -#else
  1.1318 -  #ifdef _LP64
  1.1319 -    __ stx(O0, return_slot);
  1.1320 -  #else
  1.1321 -    if (return_slot.has_disp()) {
  1.1322 -      // The displacement is a constant
  1.1323 -      __ st(O0, return_slot);
  1.1324 -      __ st(O1, return_slot.plus_disp(Interpreter::stackElementSize));
  1.1325 -    } else {
  1.1326 -      __ std(O0, return_slot);
  1.1327 -    }
  1.1328 -  #endif
  1.1329 -#endif
  1.1330 -  } else if (type == T_FLOAT) {
  1.1331 -    __ stf(FloatRegisterImpl::S, Ftos_f, return_slot);
  1.1332 -  } else if (type == T_DOUBLE) {
  1.1333 -    __ stf(FloatRegisterImpl::D, Ftos_f, return_slot);
  1.1334 -  } else {
  1.1335 -    ShouldNotReachHere();
  1.1336 -  }
  1.1337 -  BLOCK_COMMENT("} move_return_value");
  1.1338 -}
  1.1339 -
  1.1340  #ifndef PRODUCT
  1.1341 -void MethodHandles::RicochetFrame::describe(const frame* fr, FrameValues& values, int frame_no)  {
  1.1342 -    RicochetFrame* rf = new RicochetFrame(*fr);
  1.1343 -
  1.1344 -    // ricochet slots (kept in registers for sparc)
  1.1345 -    values.describe(frame_no, rf->register_addr(I5_savedSP), err_msg("exact_sender_sp reg for #%d", frame_no));
  1.1346 -    values.describe(frame_no, rf->register_addr(L5_conversion), err_msg("conversion reg for #%d", frame_no));
  1.1347 -    values.describe(frame_no, rf->register_addr(L4_saved_args_base), err_msg("saved_args_base reg for #%d", frame_no));
  1.1348 -    values.describe(frame_no, rf->register_addr(L3_saved_args_layout), err_msg("saved_args_layout reg for #%d", frame_no));
  1.1349 -    values.describe(frame_no, rf->register_addr(L2_saved_target), err_msg("saved_target reg for #%d", frame_no));
  1.1350 -    values.describe(frame_no, rf->register_addr(L1_continuation), err_msg("continuation reg for #%d", frame_no));
  1.1351 -
  1.1352 -    // relevant ricochet targets (in caller frame)
  1.1353 -    values.describe(-1, rf->saved_args_base(),  err_msg("*saved_args_base for #%d", frame_no));
  1.1354 -    values.describe(-1, (intptr_t *)(STACK_BIAS+(uintptr_t)rf->exact_sender_sp()),  err_msg("*exact_sender_sp+STACK_BIAS for #%d", frame_no));
  1.1355 -}
  1.1356 -#endif // ASSERT
  1.1357 -
  1.1358 -#ifndef PRODUCT
  1.1359 -extern "C" void print_method_handle(oop mh);
  1.1360  void trace_method_handle_stub(const char* adaptername,
  1.1361                                oopDesc* mh,
  1.1362                                intptr_t* saved_sp,
  1.1363                                intptr_t* args,
  1.1364                                intptr_t* tracing_fp) {
  1.1365 -  bool has_mh = (strstr(adaptername, "return/") == NULL);  // return adapters don't have mh
  1.1366 -
  1.1367 -  tty->print_cr("MH %s mh="INTPTR_FORMAT " saved_sp=" INTPTR_FORMAT " args=" INTPTR_FORMAT, adaptername, (intptr_t) mh, saved_sp, args);
  1.1368 +  bool has_mh = (strstr(adaptername, "/static") == NULL &&
  1.1369 +                 strstr(adaptername, "linkTo") == NULL);    // static linkers don't have MH
  1.1370 +  const char* mh_reg_name = has_mh ? "G3_mh" : "G3";
  1.1371 +  tty->print_cr("MH %s %s="INTPTR_FORMAT " saved_sp=" INTPTR_FORMAT " args=" INTPTR_FORMAT,
  1.1372 +                adaptername, mh_reg_name,
  1.1373 +                (intptr_t) mh, saved_sp, args);
  1.1374  
  1.1375    if (Verbose) {
  1.1376      // dumping last frame with frame::describe
  1.1377 @@ -1090,6 +561,7 @@
  1.1378  
  1.1379      // mark saved_sp, if seems valid (may not be valid for some adapters)
  1.1380      intptr_t *unbiased_sp = (intptr_t *)(STACK_BIAS+(uintptr_t)saved_sp);
  1.1381 +    const int ARG_LIMIT = 255, SLOP = 45, UNREASONABLE_STACK_MOVE = (ARG_LIMIT + SLOP);
  1.1382      if ((unbiased_sp >= dump_sp - UNREASONABLE_STACK_MOVE) && (unbiased_sp < dump_fp)) {
  1.1383        values.describe(-1, unbiased_sp, "*saved_sp+STACK_BIAS");
  1.1384      }
  1.1385 @@ -1097,10 +569,13 @@
  1.1386      // Note: the unextended_sp may not be correct
  1.1387      tty->print_cr("  stack layout:");
  1.1388      values.print(p);
  1.1389 -  }
  1.1390 -
  1.1391 -  if (has_mh) {
  1.1392 -    print_method_handle(mh);
  1.1393 +    if (has_mh && mh->is_oop()) {
  1.1394 +      mh->print();
  1.1395 +      if (java_lang_invoke_MethodHandle::is_instance(mh)) {
  1.1396 +        if (java_lang_invoke_MethodHandle::form_offset_in_bytes() != 0)
  1.1397 +          java_lang_invoke_MethodHandle::form(mh)->print();
  1.1398 +      }
  1.1399 +    }
  1.1400    }
  1.1401  }
  1.1402  
  1.1403 @@ -1143,1260 +618,3 @@
  1.1404    BLOCK_COMMENT("} trace_method_handle");
  1.1405  }
  1.1406  #endif // PRODUCT
  1.1407 -
  1.1408 -// which conversion op types are implemented here?
  1.1409 -int MethodHandles::adapter_conversion_ops_supported_mask() {
  1.1410 -  return ((1<<java_lang_invoke_AdapterMethodHandle::OP_RETYPE_ONLY)
  1.1411 -         |(1<<java_lang_invoke_AdapterMethodHandle::OP_RETYPE_RAW)
  1.1412 -         |(1<<java_lang_invoke_AdapterMethodHandle::OP_CHECK_CAST)
  1.1413 -         |(1<<java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_PRIM)
  1.1414 -         |(1<<java_lang_invoke_AdapterMethodHandle::OP_REF_TO_PRIM)
  1.1415 -          // OP_PRIM_TO_REF is below...
  1.1416 -         |(1<<java_lang_invoke_AdapterMethodHandle::OP_SWAP_ARGS)
  1.1417 -         |(1<<java_lang_invoke_AdapterMethodHandle::OP_ROT_ARGS)
  1.1418 -         |(1<<java_lang_invoke_AdapterMethodHandle::OP_DUP_ARGS)
  1.1419 -         |(1<<java_lang_invoke_AdapterMethodHandle::OP_DROP_ARGS)
  1.1420 -          // OP_COLLECT_ARGS is below...
  1.1421 -         |(1<<java_lang_invoke_AdapterMethodHandle::OP_SPREAD_ARGS)
  1.1422 -         |(
  1.1423 -           java_lang_invoke_MethodTypeForm::vmlayout_offset_in_bytes() <= 0 ? 0 :
  1.1424 -           ((1<<java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_REF)
  1.1425 -           |(1<<java_lang_invoke_AdapterMethodHandle::OP_COLLECT_ARGS)
  1.1426 -           |(1<<java_lang_invoke_AdapterMethodHandle::OP_FOLD_ARGS)
  1.1427 -           )
  1.1428 -          )
  1.1429 -         );
  1.1430 -}
  1.1431 -
  1.1432 -//------------------------------------------------------------------------------
  1.1433 -// MethodHandles::generate_method_handle_stub
  1.1434 -//
  1.1435 -// Generate an "entry" field for a method handle.
  1.1436 -// This determines how the method handle will respond to calls.
  1.1437 -void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHandles::EntryKind ek) {
  1.1438 -  MethodHandles::EntryKind ek_orig = ek_original_kind(ek);
  1.1439 -
  1.1440 -  // Here is the register state during an interpreted call,
  1.1441 -  // as set up by generate_method_handle_interpreter_entry():
  1.1442 -  // - G5: garbage temp (was MethodHandle.invoke methodOop, unused)
  1.1443 -  // - G3: receiver method handle
  1.1444 -  // - O5_savedSP: sender SP (must preserve)
  1.1445 -
  1.1446 -  const Register O0_scratch = O0;
  1.1447 -  const Register O1_scratch = O1;
  1.1448 -  const Register O2_scratch = O2;
  1.1449 -  const Register O3_scratch = O3;
  1.1450 -  const Register O4_scratch = O4;
  1.1451 -  const Register G5_scratch = G5;
  1.1452 -
  1.1453 -  // Often used names:
  1.1454 -  const Register O0_argslot = O0;
  1.1455 -
  1.1456 -  // Argument registers for _raise_exception:
  1.1457 -  const Register O0_code     = O0;
  1.1458 -  const Register O1_actual   = O1;
  1.1459 -  const Register O2_required = O2;
  1.1460 -
  1.1461 -  guarantee(java_lang_invoke_MethodHandle::vmentry_offset_in_bytes() != 0, "must have offsets");
  1.1462 -
  1.1463 -  // Some handy addresses:
  1.1464 -  Address G3_mh_vmtarget(   G3_method_handle, java_lang_invoke_MethodHandle::vmtarget_offset_in_bytes());
  1.1465 -
  1.1466 -  Address G3_dmh_vmindex(   G3_method_handle, java_lang_invoke_DirectMethodHandle::vmindex_offset_in_bytes());
  1.1467 -
  1.1468 -  Address G3_bmh_vmargslot( G3_method_handle, java_lang_invoke_BoundMethodHandle::vmargslot_offset_in_bytes());
  1.1469 -  Address G3_bmh_argument(  G3_method_handle, java_lang_invoke_BoundMethodHandle::argument_offset_in_bytes());
  1.1470 -
  1.1471 -  Address G3_amh_vmargslot( G3_method_handle, java_lang_invoke_AdapterMethodHandle::vmargslot_offset_in_bytes());
  1.1472 -  Address G3_amh_argument ( G3_method_handle, java_lang_invoke_AdapterMethodHandle::argument_offset_in_bytes());
  1.1473 -  Address G3_amh_conversion(G3_method_handle, java_lang_invoke_AdapterMethodHandle::conversion_offset_in_bytes());
  1.1474 -
  1.1475 -  const int java_mirror_offset = in_bytes(Klass::java_mirror_offset());
  1.1476 -
  1.1477 -  if (have_entry(ek)) {
  1.1478 -    __ nop();  // empty stubs make SG sick
  1.1479 -    return;
  1.1480 -  }
  1.1481 -
  1.1482 -  address interp_entry = __ pc();
  1.1483 -
  1.1484 -  trace_method_handle(_masm, entry_name(ek));
  1.1485 -
  1.1486 -  BLOCK_COMMENT(err_msg("Entry %s {", entry_name(ek)));
  1.1487 -
  1.1488 -  switch ((int) ek) {
  1.1489 -  case _raise_exception:
  1.1490 -    {
  1.1491 -      // Not a real MH entry, but rather shared code for raising an
  1.1492 -      // exception.  For sharing purposes the arguments are passed into registers
  1.1493 -      // and then placed in the intepreter calling convention here.
  1.1494 -      assert(raise_exception_method(), "must be set");
  1.1495 -      assert(raise_exception_method()->from_compiled_entry(), "method must be linked");
  1.1496 -
  1.1497 -      __ set(AddressLiteral((address) &_raise_exception_method), G5_method);
  1.1498 -      __ ld_ptr(Address(G5_method, 0), G5_method);
  1.1499 -
  1.1500 -      const int jobject_oop_offset = 0;
  1.1501 -      __ ld_ptr(Address(G5_method, jobject_oop_offset), G5_method);
  1.1502 -
  1.1503 -      adjust_SP_and_Gargs_down_by_slots(_masm, 3, noreg, noreg);
  1.1504 -
  1.1505 -      __ st    (O0_code,     __ argument_address(constant(2), noreg, 0));
  1.1506 -      __ st_ptr(O1_actual,   __ argument_address(constant(1), noreg, 0));
  1.1507 -      __ st_ptr(O2_required, __ argument_address(constant(0), noreg, 0));
  1.1508 -      jump_from_method_handle(_masm, G5_method, O1_scratch, O2_scratch);
  1.1509 -    }
  1.1510 -    break;
  1.1511 -
  1.1512 -  case _invokestatic_mh:
  1.1513 -  case _invokespecial_mh:
  1.1514 -    {
  1.1515 -      __ load_heap_oop(G3_mh_vmtarget, G5_method);  // target is a methodOop
  1.1516 -      // Same as TemplateTable::invokestatic or invokespecial,
  1.1517 -      // minus the CP setup and profiling:
  1.1518 -      if (ek == _invokespecial_mh) {
  1.1519 -        // Must load & check the first argument before entering the target method.
  1.1520 -        __ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch);
  1.1521 -        __ ld_ptr(__ argument_address(O0_argslot, O0_argslot, -1), G3_method_handle);
  1.1522 -        __ null_check(G3_method_handle);
  1.1523 -        __ verify_oop(G3_method_handle);
  1.1524 -      }
  1.1525 -      jump_from_method_handle(_masm, G5_method, O1_scratch, O2_scratch);
  1.1526 -    }
  1.1527 -    break;
  1.1528 -
  1.1529 -  case _invokevirtual_mh:
  1.1530 -    {
  1.1531 -      // Same as TemplateTable::invokevirtual,
  1.1532 -      // minus the CP setup and profiling:
  1.1533 -
  1.1534 -      // Pick out the vtable index and receiver offset from the MH,
  1.1535 -      // and then we can discard it:
  1.1536 -      Register O2_index = O2_scratch;
  1.1537 -      __ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch);
  1.1538 -      __ ldsw(G3_dmh_vmindex, O2_index);
  1.1539 -      // Note:  The verifier allows us to ignore G3_mh_vmtarget.
  1.1540 -      __ ld_ptr(__ argument_address(O0_argslot, O0_argslot, -1), G3_method_handle);
  1.1541 -      __ null_check(G3_method_handle, oopDesc::klass_offset_in_bytes());
  1.1542 -
  1.1543 -      // Get receiver klass:
  1.1544 -      Register O0_klass = O0_argslot;
  1.1545 -      __ load_klass(G3_method_handle, O0_klass);
  1.1546 -      __ verify_oop(O0_klass);
  1.1547 -
  1.1548 -      // Get target methodOop & entry point:
  1.1549 -      const int base = instanceKlass::vtable_start_offset() * wordSize;
  1.1550 -      assert(vtableEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
  1.1551 -
  1.1552 -      __ sll_ptr(O2_index, LogBytesPerWord, O2_index);
  1.1553 -      __ add(O0_klass, O2_index, O0_klass);
  1.1554 -      Address vtable_entry_addr(O0_klass, base + vtableEntry::method_offset_in_bytes());
  1.1555 -      __ ld_ptr(vtable_entry_addr, G5_method);
  1.1556 -
  1.1557 -      jump_from_method_handle(_masm, G5_method, O1_scratch, O2_scratch);
  1.1558 -    }
  1.1559 -    break;
  1.1560 -
  1.1561 -  case _invokeinterface_mh:
  1.1562 -    {
  1.1563 -      // Same as TemplateTable::invokeinterface,
  1.1564 -      // minus the CP setup and profiling:
  1.1565 -      __ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch);
  1.1566 -      Register O1_intf  = O1_scratch;
  1.1567 -      Register G5_index = G5_scratch;
  1.1568 -      __ load_heap_oop(G3_mh_vmtarget, O1_intf);
  1.1569 -      __ ldsw(G3_dmh_vmindex, G5_index);
  1.1570 -      __ ld_ptr(__ argument_address(O0_argslot, O0_argslot, -1), G3_method_handle);
  1.1571 -      __ null_check(G3_method_handle, oopDesc::klass_offset_in_bytes());
  1.1572 -
  1.1573 -      // Get receiver klass:
  1.1574 -      Register O0_klass = O0_argslot;
  1.1575 -      __ load_klass(G3_method_handle, O0_klass);
  1.1576 -      __ verify_oop(O0_klass);
  1.1577 -
  1.1578 -      // Get interface:
  1.1579 -      Label no_such_interface;
  1.1580 -      __ verify_oop(O1_intf);
  1.1581 -      __ lookup_interface_method(O0_klass, O1_intf,
  1.1582 -                                 // Note: next two args must be the same:
  1.1583 -                                 G5_index, G5_method,
  1.1584 -                                 O2_scratch,
  1.1585 -                                 O3_scratch,
  1.1586 -                                 no_such_interface);
  1.1587 -
  1.1588 -      jump_from_method_handle(_masm, G5_method, O1_scratch, O2_scratch);
  1.1589 -
  1.1590 -      __ bind(no_such_interface);
  1.1591 -      // Throw an exception.
  1.1592 -      // For historical reasons, it will be IncompatibleClassChangeError.
  1.1593 -      __ unimplemented("not tested yet");
  1.1594 -      __ ld_ptr(Address(O1_intf, java_mirror_offset), O2_required);  // required interface
  1.1595 -      __ mov(   O0_klass,                             O1_actual);    // bad receiver
  1.1596 -      __ jump_to(AddressLiteral(from_interpreted_entry(_raise_exception)), O3_scratch);
  1.1597 -      __ delayed()->mov(Bytecodes::_invokeinterface,  O0_code);      // who is complaining?
  1.1598 -    }
  1.1599 -    break;
  1.1600 -
  1.1601 -  case _bound_ref_mh:
  1.1602 -  case _bound_int_mh:
  1.1603 -  case _bound_long_mh:
  1.1604 -  case _bound_ref_direct_mh:
  1.1605 -  case _bound_int_direct_mh:
  1.1606 -  case _bound_long_direct_mh:
  1.1607 -    {
  1.1608 -      const bool direct_to_method = (ek >= _bound_ref_direct_mh);
  1.1609 -      BasicType arg_type  = ek_bound_mh_arg_type(ek);
  1.1610 -      int       arg_slots = type2size[arg_type];
  1.1611 -
  1.1612 -      // Make room for the new argument:
  1.1613 -      load_vmargslot(_masm, G3_bmh_vmargslot, O0_argslot);
  1.1614 -      __ add(__ argument_address(O0_argslot, O0_argslot), O0_argslot);
  1.1615 -
  1.1616 -      insert_arg_slots(_masm, arg_slots * stack_move_unit(), O0_argslot, O1_scratch, O2_scratch, O3_scratch);
  1.1617 -
  1.1618 -      // Store bound argument into the new stack slot:
  1.1619 -      __ load_heap_oop(G3_bmh_argument, O1_scratch);
  1.1620 -      if (arg_type == T_OBJECT) {
  1.1621 -        __ st_ptr(O1_scratch, Address(O0_argslot, 0));
  1.1622 -      } else {
  1.1623 -        Address prim_value_addr(O1_scratch, java_lang_boxing_object::value_offset_in_bytes(arg_type));
  1.1624 -        move_typed_arg(_masm, arg_type, false,
  1.1625 -                       prim_value_addr,
  1.1626 -                       Address(O0_argslot, 0),
  1.1627 -                      O2_scratch);  // must be an even register for !_LP64 long moves (uses O2/O3)
  1.1628 -      }
  1.1629 -
  1.1630 -      if (direct_to_method) {
  1.1631 -        __ load_heap_oop(G3_mh_vmtarget, G5_method);  // target is a methodOop
  1.1632 -        jump_from_method_handle(_masm, G5_method, O1_scratch, O2_scratch);
  1.1633 -      } else {
  1.1634 -        __ load_heap_oop(G3_mh_vmtarget, G3_method_handle);  // target is a methodOop
  1.1635 -        __ verify_oop(G3_method_handle);
  1.1636 -        __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
  1.1637 -      }
  1.1638 -    }
  1.1639 -    break;
  1.1640 -
  1.1641 -  case _adapter_opt_profiling:
  1.1642 -    if (java_lang_invoke_CountingMethodHandle::vmcount_offset_in_bytes() != 0) {
  1.1643 -      Address G3_mh_vmcount(G3_method_handle, java_lang_invoke_CountingMethodHandle::vmcount_offset_in_bytes());
  1.1644 -      __ ld(G3_mh_vmcount, O1_scratch);
  1.1645 -      __ add(O1_scratch, 1, O1_scratch);
  1.1646 -      __ st(O1_scratch, G3_mh_vmcount);
  1.1647 -    }
  1.1648 -    // fall through
  1.1649 -
  1.1650 -  case _adapter_retype_only:
  1.1651 -  case _adapter_retype_raw:
  1.1652 -    // Immediately jump to the next MH layer:
  1.1653 -    __ load_heap_oop(G3_mh_vmtarget, G3_method_handle);
  1.1654 -    __ verify_oop(G3_method_handle);
  1.1655 -    __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
  1.1656 -    // This is OK when all parameter types widen.
  1.1657 -    // It is also OK when a return type narrows.
  1.1658 -    break;
  1.1659 -
  1.1660 -  case _adapter_check_cast:
  1.1661 -    {
  1.1662 -      // Check a reference argument before jumping to the next layer of MH:
  1.1663 -      load_vmargslot(_masm, G3_amh_vmargslot, O0_argslot);
  1.1664 -      Address vmarg = __ argument_address(O0_argslot, O0_argslot);
  1.1665 -
  1.1666 -      // What class are we casting to?
  1.1667 -      Register O1_klass = O1_scratch;  // Interesting AMH data.
  1.1668 -      __ load_heap_oop(G3_amh_argument, O1_klass);  // This is a Class object!
  1.1669 -      load_klass_from_Class(_masm, O1_klass, O2_scratch, O3_scratch);
  1.1670 -
  1.1671 -      Label L_done;
  1.1672 -      __ ld_ptr(vmarg, O2_scratch);
  1.1673 -      __ br_null_short(O2_scratch, Assembler::pn, L_done);  // No cast if null.
  1.1674 -      __ load_klass(O2_scratch, O2_scratch);
  1.1675 -
  1.1676 -      // Live at this point:
  1.1677 -      // - O0_argslot      :  argslot index in vmarg; may be required in the failing path
  1.1678 -      // - O1_klass        :  klass required by the target method
  1.1679 -      // - O2_scratch      :  argument klass to test
  1.1680 -      // - G3_method_handle:  adapter method handle
  1.1681 -      __ check_klass_subtype(O2_scratch, O1_klass, O3_scratch, O4_scratch, L_done);
  1.1682 -
  1.1683 -      // If we get here, the type check failed!
  1.1684 -      __ load_heap_oop(G3_amh_argument,        O2_required);  // required class
  1.1685 -      __ ld_ptr(       vmarg,                  O1_actual);    // bad object
  1.1686 -      __ jump_to(AddressLiteral(from_interpreted_entry(_raise_exception)), O3_scratch);
  1.1687 -      __ delayed()->mov(Bytecodes::_checkcast, O0_code);      // who is complaining?
  1.1688 -
  1.1689 -      __ BIND(L_done);
  1.1690 -      // Get the new MH:
  1.1691 -      __ load_heap_oop(G3_mh_vmtarget, G3_method_handle);
  1.1692 -      __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
  1.1693 -    }
  1.1694 -    break;
  1.1695 -
  1.1696 -  case _adapter_prim_to_prim:
  1.1697 -  case _adapter_ref_to_prim:
  1.1698 -    // Handled completely by optimized cases.
  1.1699 -    __ stop("init_AdapterMethodHandle should not issue this");
  1.1700 -    break;
  1.1701 -
  1.1702 -  case _adapter_opt_i2i:        // optimized subcase of adapt_prim_to_prim
  1.1703 -//case _adapter_opt_f2i:        // optimized subcase of adapt_prim_to_prim
  1.1704 -  case _adapter_opt_l2i:        // optimized subcase of adapt_prim_to_prim
  1.1705 -  case _adapter_opt_unboxi:     // optimized subcase of adapt_ref_to_prim
  1.1706 -    {
  1.1707 -      // Perform an in-place conversion to int or an int subword.
  1.1708 -      load_vmargslot(_masm, G3_amh_vmargslot, O0_argslot);
  1.1709 -      Address value;
  1.1710 -      Address vmarg;
  1.1711 -      bool value_left_justified = false;
  1.1712 -
  1.1713 -      switch (ek) {
  1.1714 -      case _adapter_opt_i2i:
  1.1715 -        value = vmarg = __ argument_address(O0_argslot, O0_argslot);
  1.1716 -        break;
  1.1717 -      case _adapter_opt_l2i:
  1.1718 -        {
  1.1719 -          // just delete the extra slot
  1.1720 -#ifdef _LP64
  1.1721 -          // In V9, longs are given 2 64-bit slots in the interpreter, but the
  1.1722 -          // data is passed in only 1 slot.
  1.1723 -          // Keep the second slot.
  1.1724 -          __ add(__ argument_address(O0_argslot, O0_argslot, -1), O0_argslot);
  1.1725 -          remove_arg_slots(_masm, -stack_move_unit(), O0_argslot, O1_scratch, O2_scratch, O3_scratch);
  1.1726 -          value = Address(O0_argslot, 4);  // Get least-significant 32-bit of 64-bit value.
  1.1727 -          vmarg = Address(O0_argslot, Interpreter::stackElementSize);
  1.1728 -#else
  1.1729 -          // Keep the first slot.
  1.1730 -          __ add(__ argument_address(O0_argslot, O0_argslot), O0_argslot);
  1.1731 -          remove_arg_slots(_masm, -stack_move_unit(), O0_argslot, O1_scratch, O2_scratch, O3_scratch);
  1.1732 -          value = Address(O0_argslot, 0);
  1.1733 -          vmarg = value;
  1.1734 -#endif
  1.1735 -        }
  1.1736 -        break;
  1.1737 -      case _adapter_opt_unboxi:
  1.1738 -        {
  1.1739 -          vmarg = __ argument_address(O0_argslot, O0_argslot);
  1.1740 -          // Load the value up from the heap.
  1.1741 -          __ ld_ptr(vmarg, O1_scratch);
  1.1742 -          int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_INT);
  1.1743 -#ifdef ASSERT
  1.1744 -          for (int bt = T_BOOLEAN; bt < T_INT; bt++) {
  1.1745 -            if (is_subword_type(BasicType(bt)))
  1.1746 -              assert(value_offset == java_lang_boxing_object::value_offset_in_bytes(BasicType(bt)), "");
  1.1747 -          }
  1.1748 -#endif
  1.1749 -          __ null_check(O1_scratch, value_offset);
  1.1750 -          value = Address(O1_scratch, value_offset);
  1.1751 -#ifdef _BIG_ENDIAN
  1.1752 -          // Values stored in objects are packed.
  1.1753 -          value_left_justified = true;
  1.1754 -#endif
  1.1755 -        }
  1.1756 -        break;
  1.1757 -      default:
  1.1758 -        ShouldNotReachHere();
  1.1759 -      }
  1.1760 -
  1.1761 -      // This check is required on _BIG_ENDIAN
  1.1762 -      Register G5_vminfo = G5_scratch;
  1.1763 -      __ ldsw(G3_amh_conversion, G5_vminfo);
  1.1764 -      assert(CONV_VMINFO_SHIFT == 0, "preshifted");
  1.1765 -
  1.1766 -      // Original 32-bit vmdata word must be of this form:
  1.1767 -      // | MBZ:6 | signBitCount:8 | srcDstTypes:8 | conversionOp:8 |
  1.1768 -      __ lduw(value, O1_scratch);
  1.1769 -      if (!value_left_justified)
  1.1770 -        __ sll(O1_scratch, G5_vminfo, O1_scratch);
  1.1771 -      Label zero_extend, done;
  1.1772 -      __ btst(CONV_VMINFO_SIGN_FLAG, G5_vminfo);
  1.1773 -      __ br(Assembler::zero, false, Assembler::pn, zero_extend);
  1.1774 -      __ delayed()->nop();
  1.1775 -
  1.1776 -      // this path is taken for int->byte, int->short
  1.1777 -      __ sra(O1_scratch, G5_vminfo, O1_scratch);
  1.1778 -      __ ba_short(done);
  1.1779 -
  1.1780 -      __ bind(zero_extend);
  1.1781 -      // this is taken for int->char
  1.1782 -      __ srl(O1_scratch, G5_vminfo, O1_scratch);
  1.1783 -
  1.1784 -      __ bind(done);
  1.1785 -      __ st(O1_scratch, vmarg);
  1.1786 -
  1.1787 -      // Get the new MH:
  1.1788 -      __ load_heap_oop(G3_mh_vmtarget, G3_method_handle);
  1.1789 -      __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
  1.1790 -    }
  1.1791 -    break;
  1.1792 -
  1.1793 -  case _adapter_opt_i2l:        // optimized subcase of adapt_prim_to_prim
  1.1794 -  case _adapter_opt_unboxl:     // optimized subcase of adapt_ref_to_prim
  1.1795 -    {
  1.1796 -      // Perform an in-place int-to-long or ref-to-long conversion.
  1.1797 -      load_vmargslot(_masm, G3_amh_vmargslot, O0_argslot);
  1.1798 -
  1.1799 -      // On big-endian machine we duplicate the slot and store the MSW
  1.1800 -      // in the first slot.
  1.1801 -      __ add(__ argument_address(O0_argslot, O0_argslot, 1), O0_argslot);
  1.1802 -
  1.1803 -      insert_arg_slots(_masm, stack_move_unit(), O0_argslot, O1_scratch, O2_scratch, O3_scratch);
  1.1804 -
  1.1805 -      Address arg_lsw(O0_argslot, 0);
  1.1806 -      Address arg_msw(O0_argslot, -Interpreter::stackElementSize);
  1.1807 -
  1.1808 -      switch (ek) {
  1.1809 -      case _adapter_opt_i2l:
  1.1810 -        {
  1.1811 -#ifdef _LP64
  1.1812 -          __ ldsw(arg_lsw, O2_scratch);                 // Load LSW sign-extended
  1.1813 -#else
  1.1814 -          __ ldsw(arg_lsw, O3_scratch);                 // Load LSW sign-extended
  1.1815 -          __ srlx(O3_scratch, BitsPerInt, O2_scratch);  // Move MSW value to lower 32-bits for std
  1.1816 -#endif
  1.1817 -          __ st_long(O2_scratch, arg_msw);              // Uses O2/O3 on !_LP64
  1.1818 -        }
  1.1819 -        break;
  1.1820 -      case _adapter_opt_unboxl:
  1.1821 -        {
  1.1822 -          // Load the value up from the heap.
  1.1823 -          __ ld_ptr(arg_lsw, O1_scratch);
  1.1824 -          int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_LONG);
  1.1825 -          assert(value_offset == java_lang_boxing_object::value_offset_in_bytes(T_DOUBLE), "");
  1.1826 -          __ null_check(O1_scratch, value_offset);
  1.1827 -          __ ld_long(Address(O1_scratch, value_offset), O2_scratch);  // Uses O2/O3 on !_LP64
  1.1828 -          __ st_long(O2_scratch, arg_msw);
  1.1829 -        }
  1.1830 -        break;
  1.1831 -      default:
  1.1832 -        ShouldNotReachHere();
  1.1833 -      }
  1.1834 -
  1.1835 -      __ load_heap_oop(G3_mh_vmtarget, G3_method_handle);
  1.1836 -      __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
  1.1837 -    }
  1.1838 -    break;
  1.1839 -
  1.1840 -  case _adapter_opt_f2d:        // optimized subcase of adapt_prim_to_prim
  1.1841 -  case _adapter_opt_d2f:        // optimized subcase of adapt_prim_to_prim
  1.1842 -    {
  1.1843 -      // perform an in-place floating primitive conversion
  1.1844 -      __ unimplemented(entry_name(ek));
  1.1845 -    }
  1.1846 -    break;
  1.1847 -
  1.1848 -  case _adapter_prim_to_ref:
  1.1849 -    __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
  1.1850 -    break;
  1.1851 -
  1.1852 -  case _adapter_swap_args:
  1.1853 -  case _adapter_rot_args:
  1.1854 -    // handled completely by optimized cases
  1.1855 -    __ stop("init_AdapterMethodHandle should not issue this");
  1.1856 -    break;
  1.1857 -
  1.1858 -  case _adapter_opt_swap_1:
  1.1859 -  case _adapter_opt_swap_2:
  1.1860 -  case _adapter_opt_rot_1_up:
  1.1861 -  case _adapter_opt_rot_1_down:
  1.1862 -  case _adapter_opt_rot_2_up:
  1.1863 -  case _adapter_opt_rot_2_down:
  1.1864 -    {
  1.1865 -      int swap_slots = ek_adapter_opt_swap_slots(ek);
  1.1866 -      int rotate     = ek_adapter_opt_swap_mode(ek);
  1.1867 -
  1.1868 -      // 'argslot' is the position of the first argument to swap.
  1.1869 -      load_vmargslot(_masm, G3_amh_vmargslot, O0_argslot);
  1.1870 -      __ add(__ argument_address(O0_argslot, O0_argslot), O0_argslot);
  1.1871 -      if (VerifyMethodHandles)
  1.1872 -        verify_argslot(_masm, O0_argslot, O2_scratch, "swap point must fall within current frame");
  1.1873 -
  1.1874 -      // 'vminfo' is the second.
  1.1875 -      Register O1_destslot = O1_scratch;
  1.1876 -      load_conversion_vminfo(_masm, G3_amh_conversion, O1_destslot);
  1.1877 -      __ add(__ argument_address(O1_destslot, O1_destslot), O1_destslot);
  1.1878 -      if (VerifyMethodHandles)
  1.1879 -        verify_argslot(_masm, O1_destslot, O2_scratch, "swap point must fall within current frame");
  1.1880 -
  1.1881 -      assert(Interpreter::stackElementSize == wordSize, "else rethink use of wordSize here");
  1.1882 -      if (!rotate) {
  1.1883 -        // simple swap
  1.1884 -        for (int i = 0; i < swap_slots; i++) {
  1.1885 -          __ ld_ptr(            Address(O0_argslot,  i * wordSize), O2_scratch);
  1.1886 -          __ ld_ptr(            Address(O1_destslot, i * wordSize), O3_scratch);
  1.1887 -          __ st_ptr(O3_scratch, Address(O0_argslot,  i * wordSize));
  1.1888 -          __ st_ptr(O2_scratch, Address(O1_destslot, i * wordSize));
  1.1889 -        }
  1.1890 -      } else {
  1.1891 -        // A rotate is actually pair of moves, with an "odd slot" (or pair)
  1.1892 -        // changing place with a series of other slots.
  1.1893 -        // First, push the "odd slot", which is going to get overwritten
  1.1894 -        switch (swap_slots) {
  1.1895 -        case 2 :  __ ld_ptr(Address(O0_argslot, 1 * wordSize), O4_scratch); // fall-thru
  1.1896 -        case 1 :  __ ld_ptr(Address(O0_argslot, 0 * wordSize), O3_scratch); break;
  1.1897 -        default:  ShouldNotReachHere();
  1.1898 -        }
  1.1899 -        if (rotate > 0) {
  1.1900 -          // Here is rotate > 0:
  1.1901 -          // (low mem)                                          (high mem)
  1.1902 -          //     | dest:     more_slots...     | arg: odd_slot :arg+1 |
  1.1903 -          // =>
  1.1904 -          //     | dest: odd_slot | dest+1: more_slots...      :arg+1 |
  1.1905 -          // work argslot down to destslot, copying contiguous data upwards
  1.1906 -          // pseudo-code:
  1.1907 -          //   argslot  = src_addr - swap_bytes
  1.1908 -          //   destslot = dest_addr
  1.1909 -          //   while (argslot >= destslot) *(argslot + swap_bytes) = *(argslot + 0), argslot--;
  1.1910 -          move_arg_slots_up(_masm,
  1.1911 -                            O1_destslot,
  1.1912 -                            Address(O0_argslot, 0),
  1.1913 -                            swap_slots,
  1.1914 -                            O0_argslot, O2_scratch);
  1.1915 -        } else {
  1.1916 -          // Here is the other direction, rotate < 0:
  1.1917 -          // (low mem)                                          (high mem)
  1.1918 -          //     | arg: odd_slot | arg+1: more_slots...       :dest+1 |
  1.1919 -          // =>
  1.1920 -          //     | arg:    more_slots...     | dest: odd_slot :dest+1 |
  1.1921 -          // work argslot up to destslot, copying contiguous data downwards
  1.1922 -          // pseudo-code:
  1.1923 -          //   argslot  = src_addr + swap_bytes
  1.1924 -          //   destslot = dest_addr
  1.1925 -          //   while (argslot <= destslot) *(argslot - swap_bytes) = *(argslot + 0), argslot++;
  1.1926 -          // dest_slot denotes an exclusive upper limit
  1.1927 -          int limit_bias = OP_ROT_ARGS_DOWN_LIMIT_BIAS;
  1.1928 -          if (limit_bias != 0)
  1.1929 -            __ add(O1_destslot, - limit_bias * wordSize, O1_destslot);
  1.1930 -          move_arg_slots_down(_masm,
  1.1931 -                              Address(O0_argslot, swap_slots * wordSize),
  1.1932 -                              O1_destslot,
  1.1933 -                              -swap_slots,
  1.1934 -                              O0_argslot, O2_scratch);
  1.1935 -
  1.1936 -          __ sub(O1_destslot, swap_slots * wordSize, O1_destslot);
  1.1937 -        }
  1.1938 -        // pop the original first chunk into the destination slot, now free
  1.1939 -        switch (swap_slots) {
  1.1940 -        case 2 :  __ st_ptr(O4_scratch, Address(O1_destslot, 1 * wordSize)); // fall-thru
  1.1941 -        case 1 :  __ st_ptr(O3_scratch, Address(O1_destslot, 0 * wordSize)); break;
  1.1942 -        default:  ShouldNotReachHere();
  1.1943 -        }
  1.1944 -      }
  1.1945 -
  1.1946 -      __ load_heap_oop(G3_mh_vmtarget, G3_method_handle);
  1.1947 -      __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
  1.1948 -    }
  1.1949 -    break;
  1.1950 -
  1.1951 -  case _adapter_dup_args:
  1.1952 -    {
  1.1953 -      // 'argslot' is the position of the first argument to duplicate.
  1.1954 -      load_vmargslot(_masm, G3_amh_vmargslot, O0_argslot);
  1.1955 -      __ add(__ argument_address(O0_argslot, O0_argslot), O0_argslot);
  1.1956 -
  1.1957 -      // 'stack_move' is negative number of words to duplicate.
  1.1958 -      Register O1_stack_move = O1_scratch;
  1.1959 -      load_stack_move(_masm, G3_amh_conversion, O1_stack_move);
  1.1960 -
  1.1961 -      if (VerifyMethodHandles) {
  1.1962 -        verify_argslots(_masm, O1_stack_move, O0_argslot, O2_scratch, O3_scratch, true,
  1.1963 -                        "copied argument(s) must fall within current frame");
  1.1964 -      }
  1.1965 -
  1.1966 -      // insert location is always the bottom of the argument list:
  1.1967 -      __ neg(O1_stack_move);
  1.1968 -      push_arg_slots(_masm, O0_argslot, O1_stack_move, O2_scratch, O3_scratch);
  1.1969 -
  1.1970 -      __ load_heap_oop(G3_mh_vmtarget, G3_method_handle);
  1.1971 -      __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
  1.1972 -    }
  1.1973 -    break;
  1.1974 -
  1.1975 -  case _adapter_drop_args:
  1.1976 -    {
  1.1977 -      // 'argslot' is the position of the first argument to nuke.
  1.1978 -      load_vmargslot(_masm, G3_amh_vmargslot, O0_argslot);
  1.1979 -      __ add(__ argument_address(O0_argslot, O0_argslot), O0_argslot);
  1.1980 -
  1.1981 -      // 'stack_move' is number of words to drop.
  1.1982 -      Register O1_stack_move = O1_scratch;
  1.1983 -      load_stack_move(_masm, G3_amh_conversion, O1_stack_move);
  1.1984 -
  1.1985 -      remove_arg_slots(_masm, O1_stack_move, O0_argslot, O2_scratch, O3_scratch, O4_scratch);
  1.1986 -
  1.1987 -      __ load_heap_oop(G3_mh_vmtarget, G3_method_handle);
  1.1988 -      __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
  1.1989 -    }
  1.1990 -    break;
  1.1991 -
  1.1992 -  case _adapter_collect_args:
  1.1993 -  case _adapter_fold_args:
  1.1994 -  case _adapter_spread_args:
  1.1995 -    // Handled completely by optimized cases.
  1.1996 -    __ stop("init_AdapterMethodHandle should not issue this");
  1.1997 -    break;
  1.1998 -
  1.1999 -  case _adapter_opt_collect_ref:
  1.2000 -  case _adapter_opt_collect_int:
  1.2001 -  case _adapter_opt_collect_long:
  1.2002 -  case _adapter_opt_collect_float:
  1.2003 -  case _adapter_opt_collect_double:
  1.2004 -  case _adapter_opt_collect_void:
  1.2005 -  case _adapter_opt_collect_0_ref:
  1.2006 -  case _adapter_opt_collect_1_ref:
  1.2007 -  case _adapter_opt_collect_2_ref:
  1.2008 -  case _adapter_opt_collect_3_ref:
  1.2009 -  case _adapter_opt_collect_4_ref:
  1.2010 -  case _adapter_opt_collect_5_ref:
  1.2011 -  case _adapter_opt_filter_S0_ref:
  1.2012 -  case _adapter_opt_filter_S1_ref:
  1.2013 -  case _adapter_opt_filter_S2_ref:
  1.2014 -  case _adapter_opt_filter_S3_ref:
  1.2015 -  case _adapter_opt_filter_S4_ref:
  1.2016 -  case _adapter_opt_filter_S5_ref:
  1.2017 -  case _adapter_opt_collect_2_S0_ref:
  1.2018 -  case _adapter_opt_collect_2_S1_ref:
  1.2019 -  case _adapter_opt_collect_2_S2_ref:
  1.2020 -  case _adapter_opt_collect_2_S3_ref:
  1.2021 -  case _adapter_opt_collect_2_S4_ref:
  1.2022 -  case _adapter_opt_collect_2_S5_ref:
  1.2023 -  case _adapter_opt_fold_ref:
  1.2024 -  case _adapter_opt_fold_int:
  1.2025 -  case _adapter_opt_fold_long:
  1.2026 -  case _adapter_opt_fold_float:
  1.2027 -  case _adapter_opt_fold_double:
  1.2028 -  case _adapter_opt_fold_void:
  1.2029 -  case _adapter_opt_fold_1_ref:
  1.2030 -  case _adapter_opt_fold_2_ref:
  1.2031 -  case _adapter_opt_fold_3_ref:
  1.2032 -  case _adapter_opt_fold_4_ref:
  1.2033 -  case _adapter_opt_fold_5_ref:
  1.2034 -    {
  1.2035 -      // Given a fresh incoming stack frame, build a new ricochet frame.
  1.2036 -      // On entry, TOS points at a return PC, and FP is the callers frame ptr.
  1.2037 -      // RSI/R13 has the caller's exact stack pointer, which we must also preserve.
  1.2038 -      // RCX contains an AdapterMethodHandle of the indicated kind.
  1.2039 -
  1.2040 -      // Relevant AMH fields:
  1.2041 -      // amh.vmargslot:
  1.2042 -      //   points to the trailing edge of the arguments
  1.2043 -      //   to filter, collect, or fold.  For a boxing operation,
  1.2044 -      //   it points just after the single primitive value.
  1.2045 -      // amh.argument:
  1.2046 -      //   recursively called MH, on |collect| arguments
  1.2047 -      // amh.vmtarget:
  1.2048 -      //   final destination MH, on return value, etc.
  1.2049 -      // amh.conversion.dest:
  1.2050 -      //   tells what is the type of the return value
  1.2051 -      //   (not needed here, since dest is also derived from ek)
  1.2052 -      // amh.conversion.vminfo:
  1.2053 -      //   points to the trailing edge of the return value
  1.2054 -      //   when the vmtarget is to be called; this is
  1.2055 -      //   equal to vmargslot + (retained ? |collect| : 0)
  1.2056 -
  1.2057 -      // Pass 0 or more argument slots to the recursive target.
  1.2058 -      int collect_count_constant = ek_adapter_opt_collect_count(ek);
  1.2059 -
  1.2060 -      // The collected arguments are copied from the saved argument list:
  1.2061 -      int collect_slot_constant = ek_adapter_opt_collect_slot(ek);
  1.2062 -
  1.2063 -      assert(ek_orig == _adapter_collect_args ||
  1.2064 -             ek_orig == _adapter_fold_args, "");
  1.2065 -      bool retain_original_args = (ek_orig == _adapter_fold_args);
  1.2066 -
  1.2067 -      // The return value is replaced (or inserted) at the 'vminfo' argslot.
  1.2068 -      // Sometimes we can compute this statically.
  1.2069 -      int dest_slot_constant = -1;
  1.2070 -      if (!retain_original_args)
  1.2071 -        dest_slot_constant = collect_slot_constant;
  1.2072 -      else if (collect_slot_constant >= 0 && collect_count_constant >= 0)
  1.2073 -        // We are preserving all the arguments, and the return value is prepended,
  1.2074 -        // so the return slot is to the left (above) the |collect| sequence.
  1.2075 -        dest_slot_constant = collect_slot_constant + collect_count_constant;
  1.2076 -
  1.2077 -      // Replace all those slots by the result of the recursive call.
  1.2078 -      // The result type can be one of ref, int, long, float, double, void.
  1.2079 -      // In the case of void, nothing is pushed on the stack after return.
  1.2080 -      BasicType dest = ek_adapter_opt_collect_type(ek);
  1.2081 -      assert(dest == type2wfield[dest], "dest is a stack slot type");
  1.2082 -      int dest_count = type2size[dest];
  1.2083 -      assert(dest_count == 1 || dest_count == 2 || (dest_count == 0 && dest == T_VOID), "dest has a size");
  1.2084 -
  1.2085 -      // Choose a return continuation.
  1.2086 -      EntryKind ek_ret = _adapter_opt_return_any;
  1.2087 -      if (dest != T_CONFLICT && OptimizeMethodHandles) {
  1.2088 -        switch (dest) {
  1.2089 -        case T_INT    : ek_ret = _adapter_opt_return_int;     break;
  1.2090 -        case T_LONG   : ek_ret = _adapter_opt_return_long;    break;
  1.2091 -        case T_FLOAT  : ek_ret = _adapter_opt_return_float;   break;
  1.2092 -        case T_DOUBLE : ek_ret = _adapter_opt_return_double;  break;
  1.2093 -        case T_OBJECT : ek_ret = _adapter_opt_return_ref;     break;
  1.2094 -        case T_VOID   : ek_ret = _adapter_opt_return_void;    break;
  1.2095 -        default       : ShouldNotReachHere();
  1.2096 -        }
  1.2097 -        if (dest == T_OBJECT && dest_slot_constant >= 0) {
  1.2098 -          EntryKind ek_try = EntryKind(_adapter_opt_return_S0_ref + dest_slot_constant);
  1.2099 -          if (ek_try <= _adapter_opt_return_LAST &&
  1.2100 -              ek_adapter_opt_return_slot(ek_try) == dest_slot_constant) {
  1.2101 -            ek_ret = ek_try;
  1.2102 -          }
  1.2103 -        }
  1.2104 -        assert(ek_adapter_opt_return_type(ek_ret) == dest, "");
  1.2105 -      }
  1.2106 -
  1.2107 -      // Already pushed:  ... keep1 | collect | keep2 |
  1.2108 -
  1.2109 -      // Push a few extra argument words, if we need them to store the return value.
  1.2110 -      {
  1.2111 -        int extra_slots = 0;
  1.2112 -        if (retain_original_args) {
  1.2113 -          extra_slots = dest_count;
  1.2114 -        } else if (collect_count_constant == -1) {
  1.2115 -          extra_slots = dest_count;  // collect_count might be zero; be generous
  1.2116 -        } else if (dest_count > collect_count_constant) {
  1.2117 -          extra_slots = (dest_count - collect_count_constant);
  1.2118 -        } else {
  1.2119 -          // else we know we have enough dead space in |collect| to repurpose for return values
  1.2120 -        }
  1.2121 -        if (extra_slots != 0) {
  1.2122 -          __ sub(SP, round_to(extra_slots, 2) * Interpreter::stackElementSize, SP);
  1.2123 -        }
  1.2124 -      }
  1.2125 -
  1.2126 -      // Set up Ricochet Frame.
  1.2127 -      __ mov(SP, O5_savedSP);  // record SP for the callee
  1.2128 -
  1.2129 -      // One extra (empty) slot for outgoing target MH (see Gargs computation below).
  1.2130 -      __ save_frame(2);  // Note: we need to add 2 slots since frame::memory_parameter_word_sp_offset is 23.
  1.2131 -
  1.2132 -      // Note: Gargs is live throughout the following, until we make our recursive call.
  1.2133 -      // And the RF saves a copy in L4_saved_args_base.
  1.2134 -
  1.2135 -      RicochetFrame::enter_ricochet_frame(_masm, G3_method_handle, Gargs,
  1.2136 -                                          entry(ek_ret)->from_interpreted_entry());
  1.2137 -
  1.2138 -      // Compute argument base:
  1.2139 -      // Set up Gargs for current frame, extra (empty) slot is for outgoing target MH (space reserved by save_frame above).
  1.2140 -      __ add(FP, STACK_BIAS - (1 * Interpreter::stackElementSize), Gargs);
  1.2141 -
  1.2142 -      // Now pushed:  ... keep1 | collect | keep2 | extra | [RF]
  1.2143 -
  1.2144 -#ifdef ASSERT
  1.2145 -      if (VerifyMethodHandles && dest != T_CONFLICT) {
  1.2146 -        BLOCK_COMMENT("verify AMH.conv.dest {");
  1.2147 -        extract_conversion_dest_type(_masm, RicochetFrame::L5_conversion, O1_scratch);
  1.2148 -        Label L_dest_ok;
  1.2149 -        __ cmp(O1_scratch, (int) dest);
  1.2150 -        __ br(Assembler::equal, false, Assembler::pt, L_dest_ok);
  1.2151 -        __ delayed()->nop();
  1.2152 -        if (dest == T_INT) {
  1.2153 -          for (int bt = T_BOOLEAN; bt < T_INT; bt++) {
  1.2154 -            if (is_subword_type(BasicType(bt))) {
  1.2155 -              __ cmp(O1_scratch, (int) bt);
  1.2156 -              __ br(Assembler::equal, false, Assembler::pt, L_dest_ok);
  1.2157 -              __ delayed()->nop();
  1.2158 -            }
  1.2159 -          }
  1.2160 -        }
  1.2161 -        __ stop("bad dest in AMH.conv");
  1.2162 -        __ BIND(L_dest_ok);
  1.2163 -        BLOCK_COMMENT("} verify AMH.conv.dest");
  1.2164 -      }
  1.2165 -#endif //ASSERT
  1.2166 -
  1.2167 -      // Find out where the original copy of the recursive argument sequence begins.
  1.2168 -      Register O0_coll = O0_scratch;
  1.2169 -      {
  1.2170 -        RegisterOrConstant collect_slot = collect_slot_constant;
  1.2171 -        if (collect_slot_constant == -1) {
  1.2172 -          load_vmargslot(_masm, G3_amh_vmargslot, O1_scratch);
  1.2173 -          collect_slot = O1_scratch;
  1.2174 -        }
  1.2175 -        // collect_slot might be 0, but we need the move anyway.
  1.2176 -        __ add(RicochetFrame::L4_saved_args_base, __ argument_offset(collect_slot, collect_slot.register_or_noreg()), O0_coll);
  1.2177 -        // O0_coll now points at the trailing edge of |collect| and leading edge of |keep2|
  1.2178 -      }
  1.2179 -
  1.2180 -      // Replace the old AMH with the recursive MH.  (No going back now.)
  1.2181 -      // In the case of a boxing call, the recursive call is to a 'boxer' method,
  1.2182 -      // such as Integer.valueOf or Long.valueOf.  In the case of a filter
  1.2183 -      // or collect call, it will take one or more arguments, transform them,
  1.2184 -      // and return some result, to store back into argument_base[vminfo].
  1.2185 -      __ load_heap_oop(G3_amh_argument, G3_method_handle);
  1.2186 -      if (VerifyMethodHandles)  verify_method_handle(_masm, G3_method_handle, O1_scratch, O2_scratch);
  1.2187 -
  1.2188 -      // Calculate |collect|, the number of arguments we are collecting.
  1.2189 -      Register O1_collect_count = O1_scratch;
  1.2190 -      RegisterOrConstant collect_count;
  1.2191 -      if (collect_count_constant < 0) {
  1.2192 -        __ load_method_handle_vmslots(O1_collect_count, G3_method_handle, O2_scratch);
  1.2193 -        collect_count = O1_collect_count;
  1.2194 -      } else {
  1.2195 -        collect_count = collect_count_constant;
  1.2196 -#ifdef ASSERT
  1.2197 -        if (VerifyMethodHandles) {
  1.2198 -          BLOCK_COMMENT("verify collect_count_constant {");
  1.2199 -          __ load_method_handle_vmslots(O3_scratch, G3_method_handle, O2_scratch);
  1.2200 -          Label L_count_ok;
  1.2201 -          __ cmp_and_br_short(O3_scratch, collect_count_constant, Assembler::equal, Assembler::pt, L_count_ok);
  1.2202 -          __ stop("bad vminfo in AMH.conv");
  1.2203 -          __ BIND(L_count_ok);
  1.2204 -          BLOCK_COMMENT("} verify collect_count_constant");
  1.2205 -        }
  1.2206 -#endif //ASSERT
  1.2207 -      }
  1.2208 -
  1.2209 -      // copy |collect| slots directly to TOS:
  1.2210 -      push_arg_slots(_masm, O0_coll, collect_count, O2_scratch, O3_scratch);
  1.2211 -      // Now pushed:  ... keep1 | collect | keep2 | RF... | collect |
  1.2212 -      // O0_coll still points at the trailing edge of |collect| and leading edge of |keep2|
  1.2213 -
  1.2214 -      // If necessary, adjust the saved arguments to make room for the eventual return value.
  1.2215 -      // Normal adjustment:  ... keep1 | +dest+ | -collect- | keep2 | RF... | collect |
  1.2216 -      // If retaining args:  ... keep1 | +dest+ |  collect  | keep2 | RF... | collect |
  1.2217 -      // In the non-retaining case, this might move keep2 either up or down.
  1.2218 -      // We don't have to copy the whole | RF... collect | complex,
  1.2219 -      // but we must adjust RF.saved_args_base.
  1.2220 -      // Also, from now on, we will forget about the original copy of |collect|.
  1.2221 -      // If we are retaining it, we will treat it as part of |keep2|.
  1.2222 -      // For clarity we will define |keep3| = |collect|keep2| or |keep2|.
  1.2223 -
  1.2224 -      BLOCK_COMMENT("adjust trailing arguments {");
  1.2225 -      // Compare the sizes of |+dest+| and |-collect-|, which are opposed opening and closing movements.
  1.2226 -      int                open_count  = dest_count;
  1.2227 -      RegisterOrConstant close_count = collect_count_constant;
  1.2228 -      Register O1_close_count = O1_collect_count;
  1.2229 -      if (retain_original_args) {
  1.2230 -        close_count = constant(0);
  1.2231 -      } else if (collect_count_constant == -1) {
  1.2232 -        close_count = O1_collect_count;
  1.2233 -      }
  1.2234 -
  1.2235 -      // How many slots need moving?  This is simply dest_slot (0 => no |keep3|).
  1.2236 -      RegisterOrConstant keep3_count;
  1.2237 -      Register O2_keep3_count = O2_scratch;
  1.2238 -      if (dest_slot_constant < 0) {
  1.2239 -        extract_conversion_vminfo(_masm, RicochetFrame::L5_conversion, O2_keep3_count);
  1.2240 -        keep3_count = O2_keep3_count;
  1.2241 -      } else  {
  1.2242 -        keep3_count = dest_slot_constant;
  1.2243 -#ifdef ASSERT
  1.2244 -        if (VerifyMethodHandles && dest_slot_constant < 0) {
  1.2245 -          BLOCK_COMMENT("verify dest_slot_constant {");
  1.2246 -          extract_conversion_vminfo(_masm, RicochetFrame::L5_conversion, O3_scratch);
  1.2247 -          Label L_vminfo_ok;
  1.2248 -          __ cmp_and_br_short(O3_scratch, dest_slot_constant, Assembler::equal, Assembler::pt, L_vminfo_ok);
  1.2249 -          __ stop("bad vminfo in AMH.conv");
  1.2250 -          __ BIND(L_vminfo_ok);
  1.2251 -          BLOCK_COMMENT("} verify dest_slot_constant");
  1.2252 -        }
  1.2253 -#endif //ASSERT
  1.2254 -      }
  1.2255 -
  1.2256 -      // tasks remaining:
  1.2257 -      bool move_keep3 = (!keep3_count.is_constant() || keep3_count.as_constant() != 0);
  1.2258 -      bool stomp_dest = (NOT_DEBUG(dest == T_OBJECT) DEBUG_ONLY(dest_count != 0));
  1.2259 -      bool fix_arg_base = (!close_count.is_constant() || open_count != close_count.as_constant());
  1.2260 -
  1.2261 -      // Old and new argument locations (based at slot 0).
  1.2262 -      // Net shift (&new_argv - &old_argv) is (close_count - open_count).
  1.2263 -      bool zero_open_count = (open_count == 0);  // remember this bit of info
  1.2264 -      if (move_keep3 && fix_arg_base) {
  1.2265 -        // It will be easier to have everything in one register:
  1.2266 -        if (close_count.is_register()) {
  1.2267 -          // Deduct open_count from close_count register to get a clean +/- value.
  1.2268 -          __ sub(close_count.as_register(), open_count, close_count.as_register());
  1.2269 -        } else {
  1.2270 -          close_count = close_count.as_constant() - open_count;
  1.2271 -        }
  1.2272 -        open_count = 0;
  1.2273 -      }
  1.2274 -      Register L4_old_argv = RicochetFrame::L4_saved_args_base;
  1.2275 -      Register O3_new_argv = O3_scratch;
  1.2276 -      if (fix_arg_base) {
  1.2277 -        __ add(L4_old_argv, __ argument_offset(close_count, O4_scratch), O3_new_argv,
  1.2278 -               -(open_count * Interpreter::stackElementSize));
  1.2279 -      }
  1.2280 -
  1.2281 -      // First decide if any actual data are to be moved.
  1.2282 -      // We can skip if (a) |keep3| is empty, or (b) the argument list size didn't change.
  1.2283 -      // (As it happens, all movements involve an argument list size change.)
  1.2284 -
  1.2285 -      // If there are variable parameters, use dynamic checks to skip around the whole mess.
  1.2286 -      Label L_done;
  1.2287 -      if (keep3_count.is_register()) {
  1.2288 -        __ cmp_and_br_short(keep3_count.as_register(), 0, Assembler::equal, Assembler::pn, L_done);
  1.2289 -      }
  1.2290 -      if (close_count.is_register()) {
  1.2291 -        __ cmp_and_br_short(close_count.as_register(), open_count, Assembler::equal, Assembler::pn, L_done);
  1.2292 -      }
  1.2293 -
  1.2294 -      if (move_keep3 && fix_arg_base) {
  1.2295 -        bool emit_move_down = false, emit_move_up = false, emit_guard = false;
  1.2296 -        if (!close_count.is_constant()) {
  1.2297 -          emit_move_down = emit_guard = !zero_open_count;
  1.2298 -          emit_move_up   = true;
  1.2299 -        } else if (open_count != close_count.as_constant()) {
  1.2300 -          emit_move_down = (open_count > close_count.as_constant());
  1.2301 -          emit_move_up   = !emit_move_down;
  1.2302 -        }
  1.2303 -        Label L_move_up;
  1.2304 -        if (emit_guard) {
  1.2305 -          __ cmp(close_count.as_register(), open_count);
  1.2306 -          __ br(Assembler::greater, false, Assembler::pn, L_move_up);
  1.2307 -          __ delayed()->nop();
  1.2308 -        }
  1.2309 -
  1.2310 -        if (emit_move_down) {
  1.2311 -          // Move arguments down if |+dest+| > |-collect-|
  1.2312 -          // (This is rare, except when arguments are retained.)
  1.2313 -          // This opens space for the return value.
  1.2314 -          if (keep3_count.is_constant()) {
  1.2315 -            for (int i = 0; i < keep3_count.as_constant(); i++) {
  1.2316 -              __ ld_ptr(            Address(L4_old_argv, i * Interpreter::stackElementSize), O4_scratch);
  1.2317 -              __ st_ptr(O4_scratch, Address(O3_new_argv, i * Interpreter::stackElementSize)            );
  1.2318 -            }
  1.2319 -          } else {
  1.2320 -            // Live: O1_close_count, O2_keep3_count, O3_new_argv
  1.2321 -            Register argv_top = O0_scratch;
  1.2322 -            __ add(L4_old_argv, __ argument_offset(keep3_count, O4_scratch), argv_top);
  1.2323 -            move_arg_slots_down(_masm,
  1.2324 -                                Address(L4_old_argv, 0),  // beginning of old argv
  1.2325 -                                argv_top,                 // end of old argv
  1.2326 -                                close_count,              // distance to move down (must be negative)
  1.2327 -                                O4_scratch, G5_scratch);
  1.2328 -          }
  1.2329 -        }
  1.2330 -
  1.2331 -        if (emit_guard) {
  1.2332 -          __ ba_short(L_done);  // assumes emit_move_up is true also
  1.2333 -          __ BIND(L_move_up);
  1.2334 -        }
  1.2335 -
  1.2336 -        if (emit_move_up) {
  1.2337 -          // Move arguments up if |+dest+| < |-collect-|
  1.2338 -          // (This is usual, except when |keep3| is empty.)
  1.2339 -          // This closes up the space occupied by the now-deleted collect values.
  1.2340 -          if (keep3_count.is_constant()) {
  1.2341 -            for (int i = keep3_count.as_constant() - 1; i >= 0; i--) {
  1.2342 -              __ ld_ptr(            Address(L4_old_argv, i * Interpreter::stackElementSize), O4_scratch);
  1.2343 -              __ st_ptr(O4_scratch, Address(O3_new_argv, i * Interpreter::stackElementSize)            );
  1.2344 -            }
  1.2345 -          } else {
  1.2346 -            Address argv_top(L4_old_argv, __ argument_offset(keep3_count, O4_scratch));
  1.2347 -            // Live: O1_close_count, O2_keep3_count, O3_new_argv
  1.2348 -            move_arg_slots_up(_masm,
  1.2349 -                              L4_old_argv,  // beginning of old argv
  1.2350 -                              argv_top,     // end of old argv
  1.2351 -                              close_count,  // distance to move up (must be positive)
  1.2352 -                              O4_scratch, G5_scratch);
  1.2353 -          }
  1.2354 -        }
  1.2355 -      }
  1.2356 -      __ BIND(L_done);
  1.2357 -
  1.2358 -      if (fix_arg_base) {
  1.2359 -        // adjust RF.saved_args_base
  1.2360 -        __ mov(O3_new_argv, RicochetFrame::L4_saved_args_base);
  1.2361 -      }
  1.2362 -
  1.2363 -      if (stomp_dest) {
  1.2364 -        // Stomp the return slot, so it doesn't hold garbage.
  1.2365 -        // This isn't strictly necessary, but it may help detect bugs.
  1.2366 -        __ set(RicochetFrame::RETURN_VALUE_PLACEHOLDER, O4_scratch);
  1.2367 -        __ st_ptr(O4_scratch, Address(RicochetFrame::L4_saved_args_base,
  1.2368 -                                      __ argument_offset(keep3_count, keep3_count.register_or_noreg())));  // uses O2_keep3_count
  1.2369 -      }
  1.2370 -      BLOCK_COMMENT("} adjust trailing arguments");
  1.2371 -
  1.2372 -      BLOCK_COMMENT("do_recursive_call");
  1.2373 -      __ mov(SP, O5_savedSP);  // record SP for the callee
  1.2374 -      __ set(ExternalAddress(SharedRuntime::ricochet_blob()->bounce_addr() - frame::pc_return_offset), O7);
  1.2375 -      // The globally unique bounce address has two purposes:
  1.2376 -      // 1. It helps the JVM recognize this frame (frame::is_ricochet_frame).
  1.2377 -      // 2. When returned to, it cuts back the stack and redirects control flow
  1.2378 -      //    to the return handler.
  1.2379 -      // The return handler will further cut back the stack when it takes
  1.2380 -      // down the RF.  Perhaps there is a way to streamline this further.
  1.2381 -
  1.2382 -      // State during recursive call:
  1.2383 -      // ... keep1 | dest | dest=42 | keep3 | RF... | collect | bounce_pc |
  1.2384 -      __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
  1.2385 -    }
  1.2386 -    break;
  1.2387 -
  1.2388 -  case _adapter_opt_return_ref:
  1.2389 -  case _adapter_opt_return_int:
  1.2390 -  case _adapter_opt_return_long:
  1.2391 -  case _adapter_opt_return_float:
  1.2392 -  case _adapter_opt_return_double:
  1.2393 -  case _adapter_opt_return_void:
  1.2394 -  case _adapter_opt_return_S0_ref:
  1.2395 -  case _adapter_opt_return_S1_ref:
  1.2396 -  case _adapter_opt_return_S2_ref:
  1.2397 -  case _adapter_opt_return_S3_ref:
  1.2398 -  case _adapter_opt_return_S4_ref:
  1.2399 -  case _adapter_opt_return_S5_ref:
  1.2400 -    {
  1.2401 -      BasicType dest_type_constant = ek_adapter_opt_return_type(ek);
  1.2402 -      int       dest_slot_constant = ek_adapter_opt_return_slot(ek);
  1.2403 -
  1.2404 -      if (VerifyMethodHandles)  RicochetFrame::verify_clean(_masm);
  1.2405 -
  1.2406 -      if (dest_slot_constant == -1) {
  1.2407 -        // The current stub is a general handler for this dest_type.
  1.2408 -        // It can be called from _adapter_opt_return_any below.
  1.2409 -        // Stash the address in a little table.
  1.2410 -        assert((dest_type_constant & CONV_TYPE_MASK) == dest_type_constant, "oob");
  1.2411 -        address return_handler = __ pc();
  1.2412 -        _adapter_return_handlers[dest_type_constant] = return_handler;
  1.2413 -        if (dest_type_constant == T_INT) {
  1.2414 -          // do the subword types too
  1.2415 -          for (int bt = T_BOOLEAN; bt < T_INT; bt++) {
  1.2416 -            if (is_subword_type(BasicType(bt)) &&
  1.2417 -                _adapter_return_handlers[bt] == NULL) {
  1.2418 -              _adapter_return_handlers[bt] = return_handler;
  1.2419 -            }
  1.2420 -          }
  1.2421 -        }
  1.2422 -      }
  1.2423 -
  1.2424 -      // On entry to this continuation handler, make Gargs live again.
  1.2425 -      __ mov(RicochetFrame::L4_saved_args_base, Gargs);
  1.2426 -
  1.2427 -      Register O7_temp   = O7;
  1.2428 -      Register O5_vminfo = O5;
  1.2429 -
  1.2430 -      RegisterOrConstant dest_slot = dest_slot_constant;
  1.2431 -      if (dest_slot_constant == -1) {
  1.2432 -        extract_conversion_vminfo(_masm, RicochetFrame::L5_conversion, O5_vminfo);
  1.2433 -        dest_slot = O5_vminfo;
  1.2434 -      }
  1.2435 -      // Store the result back into the argslot.
  1.2436 -      // This code uses the interpreter calling sequence, in which the return value
  1.2437 -      // is usually left in the TOS register, as defined by InterpreterMacroAssembler::pop.
  1.2438 -      // There are certain irregularities with floating point values, which can be seen
  1.2439 -      // in TemplateInterpreterGenerator::generate_return_entry_for.
  1.2440 -      move_return_value(_masm, dest_type_constant, __ argument_address(dest_slot, O7_temp));
  1.2441 -
  1.2442 -      RicochetFrame::leave_ricochet_frame(_masm, G3_method_handle, I5_savedSP, I7);
  1.2443 -
  1.2444 -      // Load the final target and go.
  1.2445 -      if (VerifyMethodHandles)  verify_method_handle(_masm, G3_method_handle, O0_scratch, O1_scratch);
  1.2446 -      __ restore(I5_savedSP, G0, SP);
  1.2447 -      __ jump_to_method_handle_entry(G3_method_handle, O0_scratch);
  1.2448 -      __ illtrap(0);
  1.2449 -    }
  1.2450 -    break;
  1.2451 -
  1.2452 -  case _adapter_opt_return_any:
  1.2453 -    {
  1.2454 -      Register O7_temp      = O7;
  1.2455 -      Register O5_dest_type = O5;
  1.2456 -
  1.2457 -      if (VerifyMethodHandles)  RicochetFrame::verify_clean(_masm);
  1.2458 -      extract_conversion_dest_type(_masm, RicochetFrame::L5_conversion, O5_dest_type);
  1.2459 -      __ set(ExternalAddress((address) &_adapter_return_handlers[0]), O7_temp);
  1.2460 -      __ sll_ptr(O5_dest_type, LogBytesPerWord, O5_dest_type);
  1.2461 -      __ ld_ptr(O7_temp, O5_dest_type, O7_temp);
  1.2462 -
  1.2463 -#ifdef ASSERT
  1.2464 -      { Label L_ok;
  1.2465 -        __ br_notnull_short(O7_temp, Assembler::pt, L_ok);
  1.2466 -        __ stop("bad method handle return");
  1.2467 -        __ BIND(L_ok);
  1.2468 -      }
  1.2469 -#endif //ASSERT
  1.2470 -      __ JMP(O7_temp, 0);
  1.2471 -      __ delayed()->nop();
  1.2472 -    }
  1.2473 -    break;
  1.2474 -
  1.2475 -  case _adapter_opt_spread_0:
  1.2476 -  case _adapter_opt_spread_1_ref:
  1.2477 -  case _adapter_opt_spread_2_ref:
  1.2478 -  case _adapter_opt_spread_3_ref:
  1.2479 -  case _adapter_opt_spread_4_ref:
  1.2480 -  case _adapter_opt_spread_5_ref:
  1.2481 -  case _adapter_opt_spread_ref:
  1.2482 -  case _adapter_opt_spread_byte:
  1.2483 -  case _adapter_opt_spread_char:
  1.2484 -  case _adapter_opt_spread_short:
  1.2485 -  case _adapter_opt_spread_int:
  1.2486 -  case _adapter_opt_spread_long:
  1.2487 -  case _adapter_opt_spread_float:
  1.2488 -  case _adapter_opt_spread_double:
  1.2489 -    {
  1.2490 -      // spread an array out into a group of arguments
  1.2491 -      int  length_constant    = ek_adapter_opt_spread_count(ek);
  1.2492 -      bool length_can_be_zero = (length_constant == 0);
  1.2493 -      if (length_constant < 0) {
  1.2494 -        // some adapters with variable length must handle the zero case
  1.2495 -        if (!OptimizeMethodHandles ||
  1.2496 -            ek_adapter_opt_spread_type(ek) != T_OBJECT)
  1.2497 -          length_can_be_zero = true;
  1.2498 -      }
  1.2499 -
  1.2500 -      // find the address of the array argument
  1.2501 -      load_vmargslot(_masm, G3_amh_vmargslot, O0_argslot);
  1.2502 -      __ add(__ argument_address(O0_argslot, O0_argslot), O0_argslot);
  1.2503 -
  1.2504 -      // O0_argslot points both to the array and to the first output arg
  1.2505 -      Address vmarg = Address(O0_argslot, 0);
  1.2506 -
  1.2507 -      // Get the array value.
  1.2508 -      Register  O1_array       = O1_scratch;
  1.2509 -      Register  O2_array_klass = O2_scratch;
  1.2510 -      BasicType elem_type      = ek_adapter_opt_spread_type(ek);
  1.2511 -      int       elem_slots     = type2size[elem_type];  // 1 or 2
  1.2512 -      int       array_slots    = 1;  // array is always a T_OBJECT
  1.2513 -      int       length_offset  = arrayOopDesc::length_offset_in_bytes();
  1.2514 -      int       elem0_offset   = arrayOopDesc::base_offset_in_bytes(elem_type);
  1.2515 -      __ ld_ptr(vmarg, O1_array);
  1.2516 -
  1.2517 -      Label L_array_is_empty, L_insert_arg_space, L_copy_args, L_args_done;
  1.2518 -      if (length_can_be_zero) {
  1.2519 -        // handle the null pointer case, if zero is allowed
  1.2520 -        Label L_skip;
  1.2521 -        if (length_constant < 0) {
  1.2522 -          load_conversion_vminfo(_masm, G3_amh_conversion, O3_scratch);
  1.2523 -          __ cmp_zero_and_br(Assembler::notZero, O3_scratch, L_skip);
  1.2524 -          __ delayed()->nop(); // to avoid back-to-back cbcond instructions
  1.2525 -        }
  1.2526 -        __ br_null_short(O1_array, Assembler::pn, L_array_is_empty);
  1.2527 -        __ BIND(L_skip);
  1.2528 -      }
  1.2529 -      __ null_check(O1_array, oopDesc::klass_offset_in_bytes());
  1.2530 -      __ load_klass(O1_array, O2_array_klass);
  1.2531 -
  1.2532 -      // Check the array type.
  1.2533 -      Register O3_klass = O3_scratch;
  1.2534 -      __ load_heap_oop(G3_amh_argument, O3_klass);  // this is a Class object!
  1.2535 -      load_klass_from_Class(_masm, O3_klass, O4_scratch, G5_scratch);
  1.2536 -
  1.2537 -      Label L_ok_array_klass, L_bad_array_klass, L_bad_array_length;
  1.2538 -      __ check_klass_subtype(O2_array_klass, O3_klass, O4_scratch, G5_scratch, L_ok_array_klass);
  1.2539 -      // If we get here, the type check failed!
  1.2540 -      __ ba_short(L_bad_array_klass);
  1.2541 -      __ BIND(L_ok_array_klass);
  1.2542 -
  1.2543 -      // Check length.
  1.2544 -      if (length_constant >= 0) {
  1.2545 -        __ ldsw(Address(O1_array, length_offset), O4_scratch);
  1.2546 -        __ cmp(O4_scratch, length_constant);
  1.2547 -      } else {
  1.2548 -        Register O3_vminfo = O3_scratch;
  1.2549 -        load_conversion_vminfo(_masm, G3_amh_conversion, O3_vminfo);
  1.2550 -        __ ldsw(Address(O1_array, length_offset), O4_scratch);
  1.2551 -        __ cmp(O3_vminfo, O4_scratch);
  1.2552 -      }
  1.2553 -      __ br(Assembler::notEqual, false, Assembler::pn, L_bad_array_length);
  1.2554 -      __ delayed()->nop();
  1.2555 -
  1.2556 -      Register O2_argslot_limit = O2_scratch;
  1.2557 -
  1.2558 -      // Array length checks out.  Now insert any required stack slots.
  1.2559 -      if (length_constant == -1) {
  1.2560 -        // Form a pointer to the end of the affected region.
  1.2561 -        __ add(O0_argslot, Interpreter::stackElementSize, O2_argslot_limit);
  1.2562 -        // 'stack_move' is negative number of words to insert
  1.2563 -        // This number already accounts for elem_slots.
  1.2564 -        Register O3_stack_move = O3_scratch;
  1.2565 -        load_stack_move(_masm, G3_amh_conversion, O3_stack_move);
  1.2566 -        __ cmp(O3_stack_move, 0);
  1.2567 -        assert(stack_move_unit() < 0, "else change this comparison");
  1.2568 -        __ br(Assembler::less, false, Assembler::pn, L_insert_arg_space);
  1.2569 -        __ delayed()->nop();
  1.2570 -        __ br(Assembler::equal, false, Assembler::pn, L_copy_args);
  1.2571 -        __ delayed()->nop();
  1.2572 -        // single argument case, with no array movement
  1.2573 -        __ BIND(L_array_is_empty);
  1.2574 -        remove_arg_slots(_masm, -stack_move_unit() * array_slots,
  1.2575 -                         O0_argslot, O1_scratch, O2_scratch, O3_scratch);
  1.2576 -        __ ba_short(L_args_done);  // no spreading to do
  1.2577 -        __ BIND(L_insert_arg_space);
  1.2578 -        // come here in the usual case, stack_move < 0 (2 or more spread arguments)
  1.2579 -        // Live: O1_array, O2_argslot_limit, O3_stack_move
  1.2580 -        insert_arg_slots(_masm, O3_stack_move,
  1.2581 -                         O0_argslot, O4_scratch, G5_scratch, O1_scratch);
  1.2582 -        // reload from rdx_argslot_limit since rax_argslot is now decremented
  1.2583 -        __ ld_ptr(Address(O2_argslot_limit, -Interpreter::stackElementSize), O1_array);
  1.2584 -      } else if (length_constant >= 1) {
  1.2585 -        int new_slots = (length_constant * elem_slots) - array_slots;
  1.2586 -        insert_arg_slots(_masm, new_slots * stack_move_unit(),
  1.2587 -                         O0_argslot, O2_scratch, O3_scratch, O4_scratch);
  1.2588 -      } else if (length_constant == 0) {
  1.2589 -        __ BIND(L_array_is_empty);
  1.2590 -        remove_arg_slots(_masm, -stack_move_unit() * array_slots,
  1.2591 -                         O0_argslot, O1_scratch, O2_scratch, O3_scratch);
  1.2592 -      } else {
  1.2593 -        ShouldNotReachHere();
  1.2594 -      }
  1.2595 -
  1.2596 -      // Copy from the array to the new slots.
  1.2597 -      // Note: Stack change code preserves integrity of O0_argslot pointer.
  1.2598 -      // So even after slot insertions, O0_argslot still points to first argument.
  1.2599 -      // Beware:  Arguments that are shallow on the stack are deep in the array,
  1.2600 -      // and vice versa.  So a downward-growing stack (the usual) has to be copied
  1.2601 -      // elementwise in reverse order from the source array.
  1.2602 -      __ BIND(L_copy_args);
  1.2603 -      if (length_constant == -1) {
  1.2604 -        // [O0_argslot, O2_argslot_limit) is the area we are inserting into.
  1.2605 -        // Array element [0] goes at O0_argslot_limit[-wordSize].
  1.2606 -        Register O1_source = O1_array;
  1.2607 -        __ add(Address(O1_array, elem0_offset), O1_source);
  1.2608 -        Register O4_fill_ptr = O4_scratch;
  1.2609 -        __ mov(O2_argslot_limit, O4_fill_ptr);
  1.2610 -        Label L_loop;
  1.2611 -        __ BIND(L_loop);
  1.2612 -        __ add(O4_fill_ptr, -Interpreter::stackElementSize * elem_slots, O4_fill_ptr);
  1.2613 -        move_typed_arg(_masm, elem_type, true,
  1.2614 -                       Address(O1_source, 0), Address(O4_fill_ptr, 0),
  1.2615 -                       O2_scratch);  // must be an even register for !_LP64 long moves (uses O2/O3)
  1.2616 -        __ add(O1_source, type2aelembytes(elem_type), O1_source);
  1.2617 -        __ cmp_and_brx_short(O4_fill_ptr, O0_argslot, Assembler::greaterUnsigned, Assembler::pt, L_loop);
  1.2618 -      } else if (length_constant == 0) {
  1.2619 -        // nothing to copy
  1.2620 -      } else {
  1.2621 -        int elem_offset = elem0_offset;
  1.2622 -        int slot_offset = length_constant * Interpreter::stackElementSize;
  1.2623 -        for (int index = 0; index < length_constant; index++) {
  1.2624 -          slot_offset -= Interpreter::stackElementSize * elem_slots;  // fill backward
  1.2625 -          move_typed_arg(_masm, elem_type, true,
  1.2626 -                         Address(O1_array, elem_offset), Address(O0_argslot, slot_offset),
  1.2627 -                         O2_scratch);  // must be an even register for !_LP64 long moves (uses O2/O3)
  1.2628 -          elem_offset += type2aelembytes(elem_type);
  1.2629 -        }
  1.2630 -      }
  1.2631 -      __ BIND(L_args_done);
  1.2632 -
  1.2633 -      // Arguments are spread.  Move to next method handle.
  1.2634 -      __ load_heap_oop(G3_mh_vmtarget, G3_method_handle);
  1.2635 -      __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
  1.2636 -
  1.2637 -      __ BIND(L_bad_array_klass);
  1.2638 -      assert(!vmarg.uses(O2_required), "must be different registers");
  1.2639 -      __ load_heap_oop(Address(O2_array_klass, java_mirror_offset), O2_required);  // required class
  1.2640 -      __ ld_ptr(       vmarg,                                       O1_actual);    // bad object
  1.2641 -      __ jump_to(AddressLiteral(from_interpreted_entry(_raise_exception)), O3_scratch);
  1.2642 -      __ delayed()->mov(Bytecodes::_aaload,                         O0_code);      // who is complaining?
  1.2643 -
  1.2644 -      __ bind(L_bad_array_length);
  1.2645 -      assert(!vmarg.uses(O2_required), "must be different registers");
  1.2646 -      __ mov(   G3_method_handle,                O2_required);  // required class
  1.2647 -      __ ld_ptr(vmarg,                           O1_actual);    // bad object
  1.2648 -      __ jump_to(AddressLiteral(from_interpreted_entry(_raise_exception)), O3_scratch);
  1.2649 -      __ delayed()->mov(Bytecodes::_arraylength, O0_code);      // who is complaining?
  1.2650 -    }
  1.2651 -    break;
  1.2652 -
  1.2653 -  default:
  1.2654 -    DEBUG_ONLY(tty->print_cr("bad ek=%d (%s)", (int)ek, entry_name(ek)));
  1.2655 -    ShouldNotReachHere();
  1.2656 -  }
  1.2657 -  BLOCK_COMMENT(err_msg("} Entry %s", entry_name(ek)));
  1.2658 -
  1.2659 -  address me_cookie = MethodHandleEntry::start_compiled_entry(_masm, interp_entry);
  1.2660 -  __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
  1.2661 -
  1.2662 -  init_entry(ek, MethodHandleEntry::finish_compiled_entry(_masm, me_cookie));
  1.2663 -}

mercurial