src/cpu/x86/vm/methodHandles_x86.cpp

Fri, 06 May 2011 16:33:13 -0700

author
never
date
Fri, 06 May 2011 16:33:13 -0700
changeset 2895
167b70ff3abc
parent 2868
2e038ad0c1d0
child 2903
fabcf26ee72f
permissions
-rw-r--r--

6939861: JVM should handle more conversion operations
Reviewed-by: twisti, jrose

     1 /*
     2  * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "interpreter/interpreter.hpp"
    27 #include "memory/allocation.inline.hpp"
    28 #include "prims/methodHandles.hpp"
    30 #define __ _masm->
    32 #ifdef PRODUCT
    33 #define BLOCK_COMMENT(str) /* nothing */
    34 #else
    35 #define BLOCK_COMMENT(str) __ block_comment(str)
    36 #endif
    38 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
    40 address MethodHandleEntry::start_compiled_entry(MacroAssembler* _masm,
    41                                                 address interpreted_entry) {
    42   // Just before the actual machine code entry point, allocate space
    43   // for a MethodHandleEntry::Data record, so that we can manage everything
    44   // from one base pointer.
    45   __ align(wordSize);
    46   address target = __ pc() + sizeof(Data);
    47   while (__ pc() < target) {
    48     __ nop();
    49     __ align(wordSize);
    50   }
    52   MethodHandleEntry* me = (MethodHandleEntry*) __ pc();
    53   me->set_end_address(__ pc());         // set a temporary end_address
    54   me->set_from_interpreted_entry(interpreted_entry);
    55   me->set_type_checking_entry(NULL);
    57   return (address) me;
    58 }
    60 MethodHandleEntry* MethodHandleEntry::finish_compiled_entry(MacroAssembler* _masm,
    61                                                 address start_addr) {
    62   MethodHandleEntry* me = (MethodHandleEntry*) start_addr;
    63   assert(me->end_address() == start_addr, "valid ME");
    65   // Fill in the real end_address:
    66   __ align(wordSize);
    67   me->set_end_address(__ pc());
    69   return me;
    70 }
    72 // stack walking support
    74 frame MethodHandles::ricochet_frame_sender(const frame& fr, RegisterMap *map) {
    75   RicochetFrame* f = RicochetFrame::from_frame(fr);
    76   if (map->update_map())
    77     frame::update_map_with_saved_link(map, &f->_sender_link);
    78   return frame(f->extended_sender_sp(), f->exact_sender_sp(), f->sender_link(), f->sender_pc());
    79 }
    81 void MethodHandles::ricochet_frame_oops_do(const frame& fr, OopClosure* blk, const RegisterMap* reg_map) {
    82   RicochetFrame* f = RicochetFrame::from_frame(fr);
    84   // pick up the argument type descriptor:
    85   Thread* thread = Thread::current();
    86   Handle cookie(thread, f->compute_saved_args_layout(true, true));
    88   // process fixed part
    89   blk->do_oop((oop*)f->saved_target_addr());
    90   blk->do_oop((oop*)f->saved_args_layout_addr());
    92   // process variable arguments:
    93   if (cookie.is_null())  return;  // no arguments to describe
    95   // the cookie is actually the invokeExact method for my target
    96   // his argument signature is what I'm interested in
    97   assert(cookie->is_method(), "");
    98   methodHandle invoker(thread, methodOop(cookie()));
    99   assert(invoker->name() == vmSymbols::invokeExact_name(), "must be this kind of method");
   100   assert(!invoker->is_static(), "must have MH argument");
   101   int slot_count = invoker->size_of_parameters();
   102   assert(slot_count >= 1, "must include 'this'");
   103   intptr_t* base = f->saved_args_base();
   104   intptr_t* retval = NULL;
   105   if (f->has_return_value_slot())
   106     retval = f->return_value_slot_addr();
   107   int slot_num = slot_count;
   108   intptr_t* loc = &base[slot_num -= 1];
   109   //blk->do_oop((oop*) loc);   // original target, which is irrelevant
   110   int arg_num = 0;
   111   for (SignatureStream ss(invoker->signature()); !ss.is_done(); ss.next()) {
   112     if (ss.at_return_type())  continue;
   113     BasicType ptype = ss.type();
   114     if (ptype == T_ARRAY)  ptype = T_OBJECT; // fold all refs to T_OBJECT
   115     assert(ptype >= T_BOOLEAN && ptype <= T_OBJECT, "not array or void");
   116     loc = &base[slot_num -= type2size[ptype]];
   117     bool is_oop = (ptype == T_OBJECT && loc != retval);
   118     if (is_oop)  blk->do_oop((oop*)loc);
   119     arg_num += 1;
   120   }
   121   assert(slot_num == 0, "must have processed all the arguments");
   122 }
   124 oop MethodHandles::RicochetFrame::compute_saved_args_layout(bool read_cache, bool write_cache) {
   125   oop cookie = NULL;
   126   if (read_cache) {
   127     cookie = saved_args_layout();
   128     if (cookie != NULL)  return cookie;
   129   }
   130   oop target = saved_target();
   131   oop mtype  = java_lang_invoke_MethodHandle::type(target);
   132   oop mtform = java_lang_invoke_MethodType::form(mtype);
   133   cookie = java_lang_invoke_MethodTypeForm::vmlayout(mtform);
   134   if (write_cache)  {
   135     (*saved_args_layout_addr()) = cookie;
   136   }
   137   return cookie;
   138 }
   140 void MethodHandles::RicochetFrame::generate_ricochet_blob(MacroAssembler* _masm,
   141                                                           // output params:
   142                                                           int* frame_size_in_words,
   143                                                           int* bounce_offset,
   144                                                           int* exception_offset) {
   145   (*frame_size_in_words) = RicochetFrame::frame_size_in_bytes() / wordSize;
   147   address start = __ pc();
   149 #ifdef ASSERT
   150   __ hlt(); __ hlt(); __ hlt();
   151   // here's a hint of something special:
   152   __ push(MAGIC_NUMBER_1);
   153   __ push(MAGIC_NUMBER_2);
   154 #endif //ASSERT
   155   __ hlt();  // not reached
   157   // A return PC has just been popped from the stack.
   158   // Return values are in registers.
   159   // The ebp points into the RicochetFrame, which contains
   160   // a cleanup continuation we must return to.
   162   (*bounce_offset) = __ pc() - start;
   163   BLOCK_COMMENT("ricochet_blob.bounce");
   165   if (VerifyMethodHandles)  RicochetFrame::verify_clean(_masm);
   166   trace_method_handle(_masm, "ricochet_blob.bounce");
   168   __ jmp(frame_address(continuation_offset_in_bytes()));
   169   __ hlt();
   170   DEBUG_ONLY(__ push(MAGIC_NUMBER_2));
   172   (*exception_offset) = __ pc() - start;
   173   BLOCK_COMMENT("ricochet_blob.exception");
   175   // compare this to Interpreter::rethrow_exception_entry, which is parallel code
   176   // for example, see TemplateInterpreterGenerator::generate_throw_exception
   177   // Live registers in:
   178   //   rax: exception
   179   //   rdx: return address/pc that threw exception (ignored, always equal to bounce addr)
   180   __ verify_oop(rax);
   182   // no need to empty_FPU_stack or reinit_heapbase, since caller frame will do the same if needed
   184   // Take down the frame.
   186   // Cf. InterpreterMacroAssembler::remove_activation.
   187   leave_ricochet_frame(_masm, /*rcx_recv=*/ noreg,
   188                        saved_last_sp_register(),
   189                        /*sender_pc_reg=*/ rdx);
   191   // In between activations - previous activation type unknown yet
   192   // compute continuation point - the continuation point expects the
   193   // following registers set up:
   194   //
   195   // rax: exception
   196   // rdx: return address/pc that threw exception
   197   // rsp: expression stack of caller
   198   // rbp: ebp of caller
   199   __ push(rax);                                  // save exception
   200   __ push(rdx);                                  // save return address
   201   Register thread_reg = LP64_ONLY(r15_thread) NOT_LP64(rdi);
   202   NOT_LP64(__ get_thread(thread_reg));
   203   __ call_VM_leaf(CAST_FROM_FN_PTR(address,
   204                                    SharedRuntime::exception_handler_for_return_address),
   205                   thread_reg, rdx);
   206   __ mov(rbx, rax);                              // save exception handler
   207   __ pop(rdx);                                   // restore return address
   208   __ pop(rax);                                   // restore exception
   209   __ jmp(rbx);                                   // jump to exception
   210                                                  // handler of caller
   211 }
   213 void MethodHandles::RicochetFrame::enter_ricochet_frame(MacroAssembler* _masm,
   214                                                         Register rcx_recv,
   215                                                         Register rax_argv,
   216                                                         address return_handler,
   217                                                         Register rbx_temp) {
   218   const Register saved_last_sp = saved_last_sp_register();
   219   Address rcx_mh_vmtarget(    rcx_recv, java_lang_invoke_MethodHandle::vmtarget_offset_in_bytes() );
   220   Address rcx_amh_conversion( rcx_recv, java_lang_invoke_AdapterMethodHandle::conversion_offset_in_bytes() );
   222   // Push the RicochetFrame a word at a time.
   223   // This creates something similar to an interpreter frame.
   224   // Cf. TemplateInterpreterGenerator::generate_fixed_frame.
   225   BLOCK_COMMENT("push RicochetFrame {");
   226   DEBUG_ONLY(int rfo = (int) sizeof(RicochetFrame));
   227   assert((rfo -= wordSize) == RicochetFrame::sender_pc_offset_in_bytes(), "");
   228 #define RF_FIELD(push_value, name)                                      \
   229   { push_value;                                                         \
   230     assert((rfo -= wordSize) == RicochetFrame::name##_offset_in_bytes(), ""); }
   231   RF_FIELD(__ push(rbp),                   sender_link);
   232   RF_FIELD(__ push(saved_last_sp),         exact_sender_sp);  // rsi/r13
   233   RF_FIELD(__ pushptr(rcx_amh_conversion), conversion);
   234   RF_FIELD(__ push(rax_argv),              saved_args_base);   // can be updated if args are shifted
   235   RF_FIELD(__ push((int32_t) NULL_WORD),   saved_args_layout); // cache for GC layout cookie
   236   if (UseCompressedOops) {
   237     __ load_heap_oop(rbx_temp, rcx_mh_vmtarget);
   238     RF_FIELD(__ push(rbx_temp),            saved_target);
   239   } else {
   240     RF_FIELD(__ pushptr(rcx_mh_vmtarget),  saved_target);
   241   }
   242   __ lea(rbx_temp, ExternalAddress(return_handler));
   243   RF_FIELD(__ push(rbx_temp),              continuation);
   244 #undef RF_FIELD
   245   assert(rfo == 0, "fully initialized the RicochetFrame");
   246   // compute new frame pointer:
   247   __ lea(rbp, Address(rsp, RicochetFrame::sender_link_offset_in_bytes()));
   248   // Push guard word #1 in debug mode.
   249   DEBUG_ONLY(__ push((int32_t) RicochetFrame::MAGIC_NUMBER_1));
   250   // For debugging, leave behind an indication of which stub built this frame.
   251   DEBUG_ONLY({ Label L; __ call(L, relocInfo::none); __ bind(L); });
   252   BLOCK_COMMENT("} RicochetFrame");
   253 }
   255 void MethodHandles::RicochetFrame::leave_ricochet_frame(MacroAssembler* _masm,
   256                                                         Register rcx_recv,
   257                                                         Register new_sp_reg,
   258                                                         Register sender_pc_reg) {
   259   assert_different_registers(rcx_recv, new_sp_reg, sender_pc_reg);
   260   const Register saved_last_sp = saved_last_sp_register();
   261   // Take down the frame.
   262   // Cf. InterpreterMacroAssembler::remove_activation.
   263   BLOCK_COMMENT("end_ricochet_frame {");
   264   // TO DO: If (exact_sender_sp - extended_sender_sp) > THRESH, compact the frame down.
   265   // This will keep stack in bounds even with unlimited tailcalls, each with an adapter.
   266   if (rcx_recv->is_valid())
   267     __ movptr(rcx_recv,    RicochetFrame::frame_address(RicochetFrame::saved_target_offset_in_bytes()));
   268   __ movptr(sender_pc_reg, RicochetFrame::frame_address(RicochetFrame::sender_pc_offset_in_bytes()));
   269   __ movptr(saved_last_sp, RicochetFrame::frame_address(RicochetFrame::exact_sender_sp_offset_in_bytes()));
   270   __ movptr(rbp,           RicochetFrame::frame_address(RicochetFrame::sender_link_offset_in_bytes()));
   271   __ mov(rsp, new_sp_reg);
   272   BLOCK_COMMENT("} end_ricochet_frame");
   273 }
   275 // Emit code to verify that RBP is pointing at a valid ricochet frame.
   276 #ifdef ASSERT
   277 enum {
   278   ARG_LIMIT = 255, SLOP = 4,
   279   // use this parameter for checking for garbage stack movements:
   280   UNREASONABLE_STACK_MOVE = (ARG_LIMIT + SLOP)
   281   // the slop defends against false alarms due to fencepost errors
   282 };
   284 void MethodHandles::RicochetFrame::verify_clean(MacroAssembler* _masm) {
   285   // The stack should look like this:
   286   //    ... keep1 | dest=42 | keep2 | RF | magic | handler | magic | recursive args |
   287   // Check various invariants.
   288   verify_offsets();
   290   Register rdi_temp = rdi;
   291   Register rcx_temp = rcx;
   292   { __ push(rdi_temp); __ push(rcx_temp); }
   293 #define UNPUSH_TEMPS \
   294   { __ pop(rcx_temp);  __ pop(rdi_temp); }
   296   Address magic_number_1_addr  = RicochetFrame::frame_address(RicochetFrame::magic_number_1_offset_in_bytes());
   297   Address magic_number_2_addr  = RicochetFrame::frame_address(RicochetFrame::magic_number_2_offset_in_bytes());
   298   Address continuation_addr    = RicochetFrame::frame_address(RicochetFrame::continuation_offset_in_bytes());
   299   Address conversion_addr      = RicochetFrame::frame_address(RicochetFrame::conversion_offset_in_bytes());
   300   Address saved_args_base_addr = RicochetFrame::frame_address(RicochetFrame::saved_args_base_offset_in_bytes());
   302   Label L_bad, L_ok;
   303   BLOCK_COMMENT("verify_clean {");
   304   // Magic numbers must check out:
   305   __ cmpptr(magic_number_1_addr, (int32_t) MAGIC_NUMBER_1);
   306   __ jcc(Assembler::notEqual, L_bad);
   307   __ cmpptr(magic_number_2_addr, (int32_t) MAGIC_NUMBER_2);
   308   __ jcc(Assembler::notEqual, L_bad);
   310   // Arguments pointer must look reasonable:
   311   __ movptr(rcx_temp, saved_args_base_addr);
   312   __ cmpptr(rcx_temp, rbp);
   313   __ jcc(Assembler::below, L_bad);
   314   __ subptr(rcx_temp, UNREASONABLE_STACK_MOVE * Interpreter::stackElementSize);
   315   __ cmpptr(rcx_temp, rbp);
   316   __ jcc(Assembler::above, L_bad);
   318   load_conversion_dest_type(_masm, rdi_temp, conversion_addr);
   319   __ cmpl(rdi_temp, T_VOID);
   320   __ jcc(Assembler::equal, L_ok);
   321   __ movptr(rcx_temp, saved_args_base_addr);
   322   load_conversion_vminfo(_masm, rdi_temp, conversion_addr);
   323   __ cmpptr(Address(rcx_temp, rdi_temp, Interpreter::stackElementScale()),
   324             (int32_t) RETURN_VALUE_PLACEHOLDER);
   325   __ jcc(Assembler::equal, L_ok);
   326   __ BIND(L_bad);
   327   UNPUSH_TEMPS;
   328   __ stop("damaged ricochet frame");
   329   __ BIND(L_ok);
   330   UNPUSH_TEMPS;
   331   BLOCK_COMMENT("} verify_clean");
   333 #undef UNPUSH_TEMPS
   335 }
   336 #endif //ASSERT
   338 void MethodHandles::load_klass_from_Class(MacroAssembler* _masm, Register klass_reg) {
   339   if (VerifyMethodHandles)
   340     verify_klass(_masm, klass_reg, SystemDictionaryHandles::Class_klass(),
   341                  "AMH argument is a Class");
   342   __ load_heap_oop(klass_reg, Address(klass_reg, java_lang_Class::klass_offset_in_bytes()));
   343 }
   345 void MethodHandles::load_conversion_vminfo(MacroAssembler* _masm, Register reg, Address conversion_field_addr) {
   346   int bits   = BitsPerByte;
   347   int offset = (CONV_VMINFO_SHIFT / bits);
   348   int shift  = (CONV_VMINFO_SHIFT % bits);
   349   __ load_unsigned_byte(reg, conversion_field_addr.plus_disp(offset));
   350   assert(CONV_VMINFO_MASK == right_n_bits(bits - shift), "else change type of previous load");
   351   assert(shift == 0, "no shift needed");
   352 }
   354 void MethodHandles::load_conversion_dest_type(MacroAssembler* _masm, Register reg, Address conversion_field_addr) {
   355   int bits   = BitsPerByte;
   356   int offset = (CONV_DEST_TYPE_SHIFT / bits);
   357   int shift  = (CONV_DEST_TYPE_SHIFT % bits);
   358   __ load_unsigned_byte(reg, conversion_field_addr.plus_disp(offset));
   359   assert(CONV_TYPE_MASK == right_n_bits(bits - shift), "else change type of previous load");
   360   __ shrl(reg, shift);
   361   DEBUG_ONLY(int conv_type_bits = (int) exact_log2(CONV_TYPE_MASK+1));
   362   assert((shift + conv_type_bits) == bits, "left justified in byte");
   363 }
   365 void MethodHandles::load_stack_move(MacroAssembler* _masm,
   366                                     Register rdi_stack_move,
   367                                     Register rcx_amh,
   368                                     bool might_be_negative) {
   369   BLOCK_COMMENT("load_stack_move");
   370   Address rcx_amh_conversion(rcx_amh, java_lang_invoke_AdapterMethodHandle::conversion_offset_in_bytes());
   371   __ movl(rdi_stack_move, rcx_amh_conversion);
   372   __ sarl(rdi_stack_move, CONV_STACK_MOVE_SHIFT);
   373 #ifdef _LP64
   374   if (might_be_negative) {
   375     // clean high bits of stack motion register (was loaded as an int)
   376     __ movslq(rdi_stack_move, rdi_stack_move);
   377   }
   378 #endif //_LP64
   379   if (VerifyMethodHandles) {
   380     Label L_ok, L_bad;
   381     int32_t stack_move_limit = 0x4000;  // extra-large
   382     __ cmpptr(rdi_stack_move, stack_move_limit);
   383     __ jcc(Assembler::greaterEqual, L_bad);
   384     __ cmpptr(rdi_stack_move, -stack_move_limit);
   385     __ jcc(Assembler::greater, L_ok);
   386     __ bind(L_bad);
   387     __ stop("load_stack_move of garbage value");
   388     __ BIND(L_ok);
   389   }
   390 }
   392 #ifndef PRODUCT
   393 void MethodHandles::RicochetFrame::verify_offsets() {
   394   // Check compatibility of this struct with the more generally used offsets of class frame:
   395   int ebp_off = sender_link_offset_in_bytes();  // offset from struct base to local rbp value
   396   assert(ebp_off + wordSize*frame::interpreter_frame_method_offset      == saved_args_base_offset_in_bytes(), "");
   397   assert(ebp_off + wordSize*frame::interpreter_frame_last_sp_offset     == conversion_offset_in_bytes(), "");
   398   assert(ebp_off + wordSize*frame::interpreter_frame_sender_sp_offset   == exact_sender_sp_offset_in_bytes(), "");
   399   // These last two have to be exact:
   400   assert(ebp_off + wordSize*frame::link_offset                          == sender_link_offset_in_bytes(), "");
   401   assert(ebp_off + wordSize*frame::return_addr_offset                   == sender_pc_offset_in_bytes(), "");
   402 }
   404 void MethodHandles::RicochetFrame::verify() const {
   405   verify_offsets();
   406   assert(magic_number_1() == MAGIC_NUMBER_1, "");
   407   assert(magic_number_2() == MAGIC_NUMBER_2, "");
   408   if (!Universe::heap()->is_gc_active()) {
   409     if (saved_args_layout() != NULL) {
   410       assert(saved_args_layout()->is_method(), "must be valid oop");
   411     }
   412     if (saved_target() != NULL) {
   413       assert(java_lang_invoke_MethodHandle::is_instance(saved_target()), "checking frame value");
   414     }
   415   }
   416   int conv_op = adapter_conversion_op(conversion());
   417   assert(conv_op == java_lang_invoke_AdapterMethodHandle::OP_COLLECT_ARGS ||
   418          conv_op == java_lang_invoke_AdapterMethodHandle::OP_FOLD_ARGS ||
   419          conv_op == java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_REF,
   420          "must be a sane conversion");
   421   if (has_return_value_slot()) {
   422     assert(*return_value_slot_addr() == RETURN_VALUE_PLACEHOLDER, "");
   423   }
   424 }
   425 #endif //PRODUCT
   427 #ifdef ASSERT
   428 void MethodHandles::verify_argslot(MacroAssembler* _masm,
   429                                    Register argslot_reg,
   430                                    const char* error_message) {
   431   // Verify that argslot lies within (rsp, rbp].
   432   Label L_ok, L_bad;
   433   BLOCK_COMMENT("verify_argslot {");
   434   __ cmpptr(argslot_reg, rbp);
   435   __ jccb(Assembler::above, L_bad);
   436   __ cmpptr(rsp, argslot_reg);
   437   __ jccb(Assembler::below, L_ok);
   438   __ bind(L_bad);
   439   __ stop(error_message);
   440   __ BIND(L_ok);
   441   BLOCK_COMMENT("} verify_argslot");
   442 }
   444 void MethodHandles::verify_argslots(MacroAssembler* _masm,
   445                                     RegisterOrConstant arg_slots,
   446                                     Register arg_slot_base_reg,
   447                                     bool negate_argslots,
   448                                     const char* error_message) {
   449   // Verify that [argslot..argslot+size) lies within (rsp, rbp).
   450   Label L_ok, L_bad;
   451   Register rdi_temp = rdi;
   452   BLOCK_COMMENT("verify_argslots {");
   453   __ push(rdi_temp);
   454   if (negate_argslots) {
   455     if (arg_slots.is_constant()) {
   456       arg_slots = -1 * arg_slots.as_constant();
   457     } else {
   458       __ movptr(rdi_temp, arg_slots);
   459       __ negptr(rdi_temp);
   460       arg_slots = rdi_temp;
   461     }
   462   }
   463   __ lea(rdi_temp, Address(arg_slot_base_reg, arg_slots, Interpreter::stackElementScale()));
   464   __ cmpptr(rdi_temp, rbp);
   465   __ pop(rdi_temp);
   466   __ jcc(Assembler::above, L_bad);
   467   __ cmpptr(rsp, arg_slot_base_reg);
   468   __ jcc(Assembler::below, L_ok);
   469   __ bind(L_bad);
   470   __ stop(error_message);
   471   __ BIND(L_ok);
   472   BLOCK_COMMENT("} verify_argslots");
   473 }
   475 // Make sure that arg_slots has the same sign as the given direction.
   476 // If (and only if) arg_slots is a assembly-time constant, also allow it to be zero.
   477 void MethodHandles::verify_stack_move(MacroAssembler* _masm,
   478                                       RegisterOrConstant arg_slots, int direction) {
   479   bool allow_zero = arg_slots.is_constant();
   480   if (direction == 0) { direction = +1; allow_zero = true; }
   481   assert(stack_move_unit() == -1, "else add extra checks here");
   482   if (arg_slots.is_register()) {
   483     Label L_ok, L_bad;
   484     BLOCK_COMMENT("verify_stack_move {");
   485     // testl(arg_slots.as_register(), -stack_move_unit() - 1);  // no need
   486     // jcc(Assembler::notZero, L_bad);
   487     __ cmpptr(arg_slots.as_register(), (int32_t) NULL_WORD);
   488     if (direction > 0) {
   489       __ jcc(allow_zero ? Assembler::less : Assembler::lessEqual, L_bad);
   490       __ cmpptr(arg_slots.as_register(), (int32_t) UNREASONABLE_STACK_MOVE);
   491       __ jcc(Assembler::less, L_ok);
   492     } else {
   493       __ jcc(allow_zero ? Assembler::greater : Assembler::greaterEqual, L_bad);
   494       __ cmpptr(arg_slots.as_register(), (int32_t) -UNREASONABLE_STACK_MOVE);
   495       __ jcc(Assembler::greater, L_ok);
   496     }
   497     __ bind(L_bad);
   498     if (direction > 0)
   499       __ stop("assert arg_slots > 0");
   500     else
   501       __ stop("assert arg_slots < 0");
   502     __ BIND(L_ok);
   503     BLOCK_COMMENT("} verify_stack_move");
   504   } else {
   505     intptr_t size = arg_slots.as_constant();
   506     if (direction < 0)  size = -size;
   507     assert(size >= 0, "correct direction of constant move");
   508     assert(size < UNREASONABLE_STACK_MOVE, "reasonable size of constant move");
   509   }
   510 }
   512 void MethodHandles::verify_klass(MacroAssembler* _masm,
   513                                  Register obj, KlassHandle klass,
   514                                  const char* error_message) {
   515   oop* klass_addr = klass.raw_value();
   516   assert(klass_addr >= SystemDictionaryHandles::Object_klass().raw_value() &&
   517          klass_addr <= SystemDictionaryHandles::Long_klass().raw_value(),
   518          "must be one of the SystemDictionaryHandles");
   519   Register temp = rdi;
   520   Label L_ok, L_bad;
   521   BLOCK_COMMENT("verify_klass {");
   522   __ verify_oop(obj);
   523   __ testptr(obj, obj);
   524   __ jcc(Assembler::zero, L_bad);
   525   __ push(temp);
   526   __ load_klass(temp, obj);
   527   __ cmpptr(temp, ExternalAddress((address) klass_addr));
   528   __ jcc(Assembler::equal, L_ok);
   529   intptr_t super_check_offset = klass->super_check_offset();
   530   __ movptr(temp, Address(temp, super_check_offset));
   531   __ cmpptr(temp, ExternalAddress((address) klass_addr));
   532   __ jcc(Assembler::equal, L_ok);
   533   __ pop(temp);
   534   __ bind(L_bad);
   535   __ stop(error_message);
   536   __ BIND(L_ok);
   537   __ pop(temp);
   538   BLOCK_COMMENT("} verify_klass");
   539 }
   540 #endif //ASSERT
   542 // Code generation
   543 address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm) {
   544   // rbx: methodOop
   545   // rcx: receiver method handle (must load from sp[MethodTypeForm.vmslots])
   546   // rsi/r13: sender SP (must preserve; see prepare_to_jump_from_interpreted)
   547   // rdx, rdi: garbage temp, blown away
   549   Register rbx_method = rbx;
   550   Register rcx_recv   = rcx;
   551   Register rax_mtype  = rax;
   552   Register rdx_temp   = rdx;
   553   Register rdi_temp   = rdi;
   555   // emit WrongMethodType path first, to enable jccb back-branch from main path
   556   Label wrong_method_type;
   557   __ bind(wrong_method_type);
   558   Label invoke_generic_slow_path;
   559   assert(methodOopDesc::intrinsic_id_size_in_bytes() == sizeof(u1), "");;
   560   __ cmpb(Address(rbx_method, methodOopDesc::intrinsic_id_offset_in_bytes()), (int) vmIntrinsics::_invokeExact);
   561   __ jcc(Assembler::notEqual, invoke_generic_slow_path);
   562   __ push(rax_mtype);       // required mtype
   563   __ push(rcx_recv);        // bad mh (1st stacked argument)
   564   __ jump(ExternalAddress(Interpreter::throw_WrongMethodType_entry()));
   566   // here's where control starts out:
   567   __ align(CodeEntryAlignment);
   568   address entry_point = __ pc();
   570   // fetch the MethodType from the method handle into rax (the 'check' register)
   571   // FIXME: Interpreter should transmit pre-popped stack pointer, to locate base of arg list.
   572   // This would simplify several touchy bits of code.
   573   // See 6984712: JSR 292 method handle calls need a clean argument base pointer
   574   {
   575     Register tem = rbx_method;
   576     for (jint* pchase = methodOopDesc::method_type_offsets_chain(); (*pchase) != -1; pchase++) {
   577       __ movptr(rax_mtype, Address(tem, *pchase));
   578       tem = rax_mtype;          // in case there is another indirection
   579     }
   580   }
   582   // given the MethodType, find out where the MH argument is buried
   583   __ load_heap_oop(rdx_temp, Address(rax_mtype, __ delayed_value(java_lang_invoke_MethodType::form_offset_in_bytes, rdi_temp)));
   584   Register rdx_vmslots = rdx_temp;
   585   __ movl(rdx_vmslots, Address(rdx_temp, __ delayed_value(java_lang_invoke_MethodTypeForm::vmslots_offset_in_bytes, rdi_temp)));
   586   Address mh_receiver_slot_addr = __ argument_address(rdx_vmslots);
   587   __ movptr(rcx_recv, mh_receiver_slot_addr);
   589   trace_method_handle(_masm, "invokeExact");
   591   __ check_method_handle_type(rax_mtype, rcx_recv, rdi_temp, wrong_method_type);
   593   // Nobody uses the MH receiver slot after this.  Make sure.
   594   DEBUG_ONLY(__ movptr(mh_receiver_slot_addr, (int32_t)0x999999));
   596   __ jump_to_method_handle_entry(rcx_recv, rdi_temp);
   598   // for invokeGeneric (only), apply argument and result conversions on the fly
   599   __ bind(invoke_generic_slow_path);
   600 #ifdef ASSERT
   601   if (VerifyMethodHandles) {
   602     Label L;
   603     __ cmpb(Address(rbx_method, methodOopDesc::intrinsic_id_offset_in_bytes()), (int) vmIntrinsics::_invokeGeneric);
   604     __ jcc(Assembler::equal, L);
   605     __ stop("bad methodOop::intrinsic_id");
   606     __ bind(L);
   607   }
   608 #endif //ASSERT
   609   Register rbx_temp = rbx_method;  // don't need it now
   611   // make room on the stack for another pointer:
   612   Register rcx_argslot = rcx_recv;
   613   __ lea(rcx_argslot, __ argument_address(rdx_vmslots, 1));
   614   insert_arg_slots(_masm, 2 * stack_move_unit(),
   615                    rcx_argslot, rbx_temp, rdx_temp);
   617   // load up an adapter from the calling type (Java weaves this)
   618   __ load_heap_oop(rdx_temp, Address(rax_mtype, __ delayed_value(java_lang_invoke_MethodType::form_offset_in_bytes, rdi_temp)));
   619   Register rdx_adapter = rdx_temp;
   620   // __ load_heap_oop(rdx_adapter, Address(rdx_temp, java_lang_invoke_MethodTypeForm::genericInvoker_offset_in_bytes()));
   621   // deal with old JDK versions:
   622   __ lea(rdi_temp, Address(rdx_temp, __ delayed_value(java_lang_invoke_MethodTypeForm::genericInvoker_offset_in_bytes, rdi_temp)));
   623   __ cmpptr(rdi_temp, rdx_temp);
   624   Label sorry_no_invoke_generic;
   625   __ jcc(Assembler::below, sorry_no_invoke_generic);
   627   __ load_heap_oop(rdx_adapter, Address(rdi_temp, 0));
   628   __ testptr(rdx_adapter, rdx_adapter);
   629   __ jcc(Assembler::zero, sorry_no_invoke_generic);
   630   __ movptr(Address(rcx_argslot, 1 * Interpreter::stackElementSize), rdx_adapter);
   631   // As a trusted first argument, pass the type being called, so the adapter knows
   632   // the actual types of the arguments and return values.
   633   // (Generic invokers are shared among form-families of method-type.)
   634   __ movptr(Address(rcx_argslot, 0 * Interpreter::stackElementSize), rax_mtype);
   635   // FIXME: assert that rdx_adapter is of the right method-type.
   636   __ mov(rcx, rdx_adapter);
   637   trace_method_handle(_masm, "invokeGeneric");
   638   __ jump_to_method_handle_entry(rcx, rdi_temp);
   640   __ bind(sorry_no_invoke_generic); // no invokeGeneric implementation available!
   641   __ movptr(rcx_recv, Address(rcx_argslot, -1 * Interpreter::stackElementSize));  // recover original MH
   642   __ push(rax_mtype);       // required mtype
   643   __ push(rcx_recv);        // bad mh (1st stacked argument)
   644   __ jump(ExternalAddress(Interpreter::throw_WrongMethodType_entry()));
   646   return entry_point;
   647 }
   649 // Workaround for C++ overloading nastiness on '0' for RegisterOrConstant.
   650 static RegisterOrConstant constant(int value) {
   651   return RegisterOrConstant(value);
   652 }
   654 // Helper to insert argument slots into the stack.
   655 // arg_slots must be a multiple of stack_move_unit() and < 0
   656 // rax_argslot is decremented to point to the new (shifted) location of the argslot
   657 // But, rdx_temp ends up holding the original value of rax_argslot.
   658 void MethodHandles::insert_arg_slots(MacroAssembler* _masm,
   659                                      RegisterOrConstant arg_slots,
   660                                      Register rax_argslot,
   661                                      Register rbx_temp, Register rdx_temp) {
   662   // allow constant zero
   663   if (arg_slots.is_constant() && arg_slots.as_constant() == 0)
   664     return;
   665   assert_different_registers(rax_argslot, rbx_temp, rdx_temp,
   666                              (!arg_slots.is_register() ? rsp : arg_slots.as_register()));
   667   if (VerifyMethodHandles)
   668     verify_argslot(_masm, rax_argslot, "insertion point must fall within current frame");
   669   if (VerifyMethodHandles)
   670     verify_stack_move(_masm, arg_slots, -1);
   672   // Make space on the stack for the inserted argument(s).
   673   // Then pull down everything shallower than rax_argslot.
   674   // The stacked return address gets pulled down with everything else.
   675   // That is, copy [rsp, argslot) downward by -size words.  In pseudo-code:
   676   //   rsp -= size;
   677   //   for (rdx = rsp + size; rdx < argslot; rdx++)
   678   //     rdx[-size] = rdx[0]
   679   //   argslot -= size;
   680   BLOCK_COMMENT("insert_arg_slots {");
   681   __ mov(rdx_temp, rsp);                        // source pointer for copy
   682   __ lea(rsp, Address(rsp, arg_slots, Interpreter::stackElementScale()));
   683   {
   684     Label loop;
   685     __ BIND(loop);
   686     // pull one word down each time through the loop
   687     __ movptr(rbx_temp, Address(rdx_temp, 0));
   688     __ movptr(Address(rdx_temp, arg_slots, Interpreter::stackElementScale()), rbx_temp);
   689     __ addptr(rdx_temp, wordSize);
   690     __ cmpptr(rdx_temp, rax_argslot);
   691     __ jcc(Assembler::less, loop);
   692   }
   694   // Now move the argslot down, to point to the opened-up space.
   695   __ lea(rax_argslot, Address(rax_argslot, arg_slots, Interpreter::stackElementScale()));
   696   BLOCK_COMMENT("} insert_arg_slots");
   697 }
   699 // Helper to remove argument slots from the stack.
   700 // arg_slots must be a multiple of stack_move_unit() and > 0
   701 void MethodHandles::remove_arg_slots(MacroAssembler* _masm,
   702                                      RegisterOrConstant arg_slots,
   703                                      Register rax_argslot,
   704                                      Register rbx_temp, Register rdx_temp) {
   705   // allow constant zero
   706   if (arg_slots.is_constant() && arg_slots.as_constant() == 0)
   707     return;
   708   assert_different_registers(rax_argslot, rbx_temp, rdx_temp,
   709                              (!arg_slots.is_register() ? rsp : arg_slots.as_register()));
   710   if (VerifyMethodHandles)
   711     verify_argslots(_masm, arg_slots, rax_argslot, false,
   712                     "deleted argument(s) must fall within current frame");
   713   if (VerifyMethodHandles)
   714     verify_stack_move(_masm, arg_slots, +1);
   716   BLOCK_COMMENT("remove_arg_slots {");
   717   // Pull up everything shallower than rax_argslot.
   718   // Then remove the excess space on the stack.
   719   // The stacked return address gets pulled up with everything else.
   720   // That is, copy [rsp, argslot) upward by size words.  In pseudo-code:
   721   //   for (rdx = argslot-1; rdx >= rsp; --rdx)
   722   //     rdx[size] = rdx[0]
   723   //   argslot += size;
   724   //   rsp += size;
   725   __ lea(rdx_temp, Address(rax_argslot, -wordSize)); // source pointer for copy
   726   {
   727     Label loop;
   728     __ BIND(loop);
   729     // pull one word up each time through the loop
   730     __ movptr(rbx_temp, Address(rdx_temp, 0));
   731     __ movptr(Address(rdx_temp, arg_slots, Interpreter::stackElementScale()), rbx_temp);
   732     __ addptr(rdx_temp, -wordSize);
   733     __ cmpptr(rdx_temp, rsp);
   734     __ jcc(Assembler::greaterEqual, loop);
   735   }
   737   // Now move the argslot up, to point to the just-copied block.
   738   __ lea(rsp, Address(rsp, arg_slots, Interpreter::stackElementScale()));
   739   // And adjust the argslot address to point at the deletion point.
   740   __ lea(rax_argslot, Address(rax_argslot, arg_slots, Interpreter::stackElementScale()));
   741   BLOCK_COMMENT("} remove_arg_slots");
   742 }
   744 // Helper to copy argument slots to the top of the stack.
   745 // The sequence starts with rax_argslot and is counted by slot_count
   746 // slot_count must be a multiple of stack_move_unit() and >= 0
   747 // This function blows the temps but does not change rax_argslot.
   748 void MethodHandles::push_arg_slots(MacroAssembler* _masm,
   749                                    Register rax_argslot,
   750                                    RegisterOrConstant slot_count,
   751                                    int skip_words_count,
   752                                    Register rbx_temp, Register rdx_temp) {
   753   assert_different_registers(rax_argslot, rbx_temp, rdx_temp,
   754                              (!slot_count.is_register() ? rbp : slot_count.as_register()),
   755                              rsp);
   756   assert(Interpreter::stackElementSize == wordSize, "else change this code");
   758   if (VerifyMethodHandles)
   759     verify_stack_move(_masm, slot_count, 0);
   761   // allow constant zero
   762   if (slot_count.is_constant() && slot_count.as_constant() == 0)
   763     return;
   765   BLOCK_COMMENT("push_arg_slots {");
   767   Register rbx_top = rbx_temp;
   769   // There is at most 1 word to carry down with the TOS.
   770   switch (skip_words_count) {
   771   case 1: __ pop(rdx_temp); break;
   772   case 0:                   break;
   773   default: ShouldNotReachHere();
   774   }
   776   if (slot_count.is_constant()) {
   777     for (int i = slot_count.as_constant() - 1; i >= 0; i--) {
   778       __ pushptr(Address(rax_argslot, i * wordSize));
   779     }
   780   } else {
   781     Label L_plural, L_loop, L_break;
   782     // Emit code to dynamically check for the common cases, zero and one slot.
   783     __ cmpl(slot_count.as_register(), (int32_t) 1);
   784     __ jccb(Assembler::greater, L_plural);
   785     __ jccb(Assembler::less, L_break);
   786     __ pushptr(Address(rax_argslot, 0));
   787     __ jmpb(L_break);
   788     __ BIND(L_plural);
   790     // Loop for 2 or more:
   791     //   rbx = &rax[slot_count]
   792     //   while (rbx > rax)  *(--rsp) = *(--rbx)
   793     __ lea(rbx_top, Address(rax_argslot, slot_count, Address::times_ptr));
   794     __ BIND(L_loop);
   795     __ subptr(rbx_top, wordSize);
   796     __ pushptr(Address(rbx_top, 0));
   797     __ cmpptr(rbx_top, rax_argslot);
   798     __ jcc(Assembler::above, L_loop);
   799     __ bind(L_break);
   800   }
   801   switch (skip_words_count) {
   802   case 1: __ push(rdx_temp); break;
   803   case 0:                    break;
   804   default: ShouldNotReachHere();
   805   }
   806   BLOCK_COMMENT("} push_arg_slots");
   807 }
   809 // in-place movement; no change to rsp
   810 // blows rax_temp, rdx_temp
   811 void MethodHandles::move_arg_slots_up(MacroAssembler* _masm,
   812                                       Register rbx_bottom,  // invariant
   813                                       Address  top_addr,     // can use rax_temp
   814                                       RegisterOrConstant positive_distance_in_slots,
   815                                       Register rax_temp, Register rdx_temp) {
   816   BLOCK_COMMENT("move_arg_slots_up {");
   817   assert_different_registers(rbx_bottom,
   818                              rax_temp, rdx_temp,
   819                              positive_distance_in_slots.register_or_noreg());
   820   Label L_loop, L_break;
   821   Register rax_top = rax_temp;
   822   if (!top_addr.is_same_address(Address(rax_top, 0)))
   823     __ lea(rax_top, top_addr);
   824   // Detect empty (or broken) loop:
   825 #ifdef ASSERT
   826   if (VerifyMethodHandles) {
   827     // Verify that &bottom < &top (non-empty interval)
   828     Label L_ok, L_bad;
   829     if (positive_distance_in_slots.is_register()) {
   830       __ cmpptr(positive_distance_in_slots.as_register(), (int32_t) 0);
   831       __ jcc(Assembler::lessEqual, L_bad);
   832     }
   833     __ cmpptr(rbx_bottom, rax_top);
   834     __ jcc(Assembler::below, L_ok);
   835     __ bind(L_bad);
   836     __ stop("valid bounds (copy up)");
   837     __ BIND(L_ok);
   838   }
   839 #endif
   840   __ cmpptr(rbx_bottom, rax_top);
   841   __ jccb(Assembler::aboveEqual, L_break);
   842   // work rax down to rbx, copying contiguous data upwards
   843   // In pseudo-code:
   844   //   [rbx, rax) = &[bottom, top)
   845   //   while (--rax >= rbx) *(rax + distance) = *(rax + 0), rax--;
   846   __ BIND(L_loop);
   847   __ subptr(rax_top, wordSize);
   848   __ movptr(rdx_temp, Address(rax_top, 0));
   849   __ movptr(          Address(rax_top, positive_distance_in_slots, Address::times_ptr), rdx_temp);
   850   __ cmpptr(rax_top, rbx_bottom);
   851   __ jcc(Assembler::above, L_loop);
   852   assert(Interpreter::stackElementSize == wordSize, "else change loop");
   853   __ bind(L_break);
   854   BLOCK_COMMENT("} move_arg_slots_up");
   855 }
   857 // in-place movement; no change to rsp
   858 // blows rax_temp, rdx_temp
   859 void MethodHandles::move_arg_slots_down(MacroAssembler* _masm,
   860                                         Address  bottom_addr,  // can use rax_temp
   861                                         Register rbx_top,      // invariant
   862                                         RegisterOrConstant negative_distance_in_slots,
   863                                         Register rax_temp, Register rdx_temp) {
   864   BLOCK_COMMENT("move_arg_slots_down {");
   865   assert_different_registers(rbx_top,
   866                              negative_distance_in_slots.register_or_noreg(),
   867                              rax_temp, rdx_temp);
   868   Label L_loop, L_break;
   869   Register rax_bottom = rax_temp;
   870   if (!bottom_addr.is_same_address(Address(rax_bottom, 0)))
   871     __ lea(rax_bottom, bottom_addr);
   872   // Detect empty (or broken) loop:
   873 #ifdef ASSERT
   874   assert(!negative_distance_in_slots.is_constant() || negative_distance_in_slots.as_constant() < 0, "");
   875   if (VerifyMethodHandles) {
   876     // Verify that &bottom < &top (non-empty interval)
   877     Label L_ok, L_bad;
   878     if (negative_distance_in_slots.is_register()) {
   879       __ cmpptr(negative_distance_in_slots.as_register(), (int32_t) 0);
   880       __ jcc(Assembler::greaterEqual, L_bad);
   881     }
   882     __ cmpptr(rax_bottom, rbx_top);
   883     __ jcc(Assembler::below, L_ok);
   884     __ bind(L_bad);
   885     __ stop("valid bounds (copy down)");
   886     __ BIND(L_ok);
   887   }
   888 #endif
   889   __ cmpptr(rax_bottom, rbx_top);
   890   __ jccb(Assembler::aboveEqual, L_break);
   891   // work rax up to rbx, copying contiguous data downwards
   892   // In pseudo-code:
   893   //   [rax, rbx) = &[bottom, top)
   894   //   while (rax < rbx) *(rax - distance) = *(rax + 0), rax++;
   895   __ BIND(L_loop);
   896   __ movptr(rdx_temp, Address(rax_bottom, 0));
   897   __ movptr(          Address(rax_bottom, negative_distance_in_slots, Address::times_ptr), rdx_temp);
   898   __ addptr(rax_bottom, wordSize);
   899   __ cmpptr(rax_bottom, rbx_top);
   900   __ jcc(Assembler::below, L_loop);
   901   assert(Interpreter::stackElementSize == wordSize, "else change loop");
   902   __ bind(L_break);
   903   BLOCK_COMMENT("} move_arg_slots_down");
   904 }
   906 // Copy from a field or array element to a stacked argument slot.
   907 // is_element (ignored) says whether caller is loading an array element instead of an instance field.
   908 void MethodHandles::move_typed_arg(MacroAssembler* _masm,
   909                                    BasicType type, bool is_element,
   910                                    Address slot_dest, Address value_src,
   911                                    Register rbx_temp, Register rdx_temp) {
   912   BLOCK_COMMENT(!is_element ? "move_typed_arg {" : "move_typed_arg { (array element)");
   913   if (type == T_OBJECT || type == T_ARRAY) {
   914     __ load_heap_oop(rbx_temp, value_src);
   915     __ movptr(slot_dest, rbx_temp);
   916   } else if (type != T_VOID) {
   917     int  arg_size      = type2aelembytes(type);
   918     bool arg_is_signed = is_signed_subword_type(type);
   919     int  slot_size     = (arg_size > wordSize) ? arg_size : wordSize;
   920     __ load_sized_value(  rdx_temp,  value_src, arg_size, arg_is_signed, rbx_temp);
   921     __ store_sized_value( slot_dest, rdx_temp,  slot_size,               rbx_temp);
   922   }
   923   BLOCK_COMMENT("} move_typed_arg");
   924 }
   926 void MethodHandles::move_return_value(MacroAssembler* _masm, BasicType type,
   927                                       Address return_slot) {
   928   BLOCK_COMMENT("move_return_value {");
   929   // Old versions of the JVM must clean the FPU stack after every return.
   930 #ifndef _LP64
   931 #ifdef COMPILER2
   932   // The FPU stack is clean if UseSSE >= 2 but must be cleaned in other cases
   933   if ((type == T_FLOAT && UseSSE < 1) || (type == T_DOUBLE && UseSSE < 2)) {
   934     for (int i = 1; i < 8; i++) {
   935         __ ffree(i);
   936     }
   937   } else if (UseSSE < 2) {
   938     __ empty_FPU_stack();
   939   }
   940 #endif //COMPILER2
   941 #endif //!_LP64
   943   // Look at the type and pull the value out of the corresponding register.
   944   if (type == T_VOID) {
   945     // nothing to do
   946   } else if (type == T_OBJECT) {
   947     __ movptr(return_slot, rax);
   948   } else if (type == T_INT || is_subword_type(type)) {
   949     // write the whole word, even if only 32 bits is significant
   950     __ movptr(return_slot, rax);
   951   } else if (type == T_LONG) {
   952     // store the value by parts
   953     // Note: We assume longs are continguous (if misaligned) on the interpreter stack.
   954     __ store_sized_value(return_slot, rax, BytesPerLong, rdx);
   955   } else if (NOT_LP64((type == T_FLOAT  && UseSSE < 1) ||
   956                       (type == T_DOUBLE && UseSSE < 2) ||)
   957              false) {
   958     // Use old x86 FPU registers:
   959     if (type == T_FLOAT)
   960       __ fstp_s(return_slot);
   961     else
   962       __ fstp_d(return_slot);
   963   } else if (type == T_FLOAT) {
   964     __ movflt(return_slot, xmm0);
   965   } else if (type == T_DOUBLE) {
   966     __ movdbl(return_slot, xmm0);
   967   } else {
   968     ShouldNotReachHere();
   969   }
   970   BLOCK_COMMENT("} move_return_value");
   971 }
   974 #ifndef PRODUCT
   975 extern "C" void print_method_handle(oop mh);
   976 void trace_method_handle_stub(const char* adaptername,
   977                               oop mh,
   978                               intptr_t* saved_regs,
   979                               intptr_t* entry_sp,
   980                               intptr_t* saved_sp,
   981                               intptr_t* saved_bp) {
   982   // called as a leaf from native code: do not block the JVM!
   983   intptr_t* last_sp = (intptr_t*) saved_bp[frame::interpreter_frame_last_sp_offset];
   984   intptr_t* base_sp = (intptr_t*) saved_bp[frame::interpreter_frame_monitor_block_top_offset];
   985   tty->print_cr("MH %s mh="INTPTR_FORMAT" sp=("INTPTR_FORMAT"+"INTX_FORMAT") stack_size="INTX_FORMAT" bp="INTPTR_FORMAT,
   986                 adaptername, (intptr_t)mh, (intptr_t)entry_sp, (intptr_t)(saved_sp - entry_sp), (intptr_t)(base_sp - last_sp), (intptr_t)saved_bp);
   987   if (last_sp != saved_sp && last_sp != NULL)
   988     tty->print_cr("*** last_sp="INTPTR_FORMAT, (intptr_t)last_sp);
   989   if (Verbose) {
   990     tty->print(" reg dump: ");
   991     int saved_regs_count = (entry_sp-1) - saved_regs;
   992     // 32 bit: rdi rsi rbp rsp; rbx rdx rcx (*) rax
   993     int i;
   994     for (i = 0; i <= saved_regs_count; i++) {
   995       if (i > 0 && i % 4 == 0 && i != saved_regs_count) {
   996         tty->cr();
   997         tty->print("   + dump: ");
   998       }
   999       tty->print(" %d: "INTPTR_FORMAT, i, saved_regs[i]);
  1001     tty->cr();
  1002     int stack_dump_count = 16;
  1003     if (stack_dump_count < (int)(saved_bp + 2 - saved_sp))
  1004       stack_dump_count = (int)(saved_bp + 2 - saved_sp);
  1005     if (stack_dump_count > 64)  stack_dump_count = 48;
  1006     for (i = 0; i < stack_dump_count; i += 4) {
  1007       tty->print_cr(" dump at SP[%d] "INTPTR_FORMAT": "INTPTR_FORMAT" "INTPTR_FORMAT" "INTPTR_FORMAT" "INTPTR_FORMAT,
  1008                     i, (intptr_t) &entry_sp[i+0], entry_sp[i+0], entry_sp[i+1], entry_sp[i+2], entry_sp[i+3]);
  1010     print_method_handle(mh);
  1014 // The stub wraps the arguments in a struct on the stack to avoid
  1015 // dealing with the different calling conventions for passing 6
  1016 // arguments.
  1017 struct MethodHandleStubArguments {
  1018   const char* adaptername;
  1019   oopDesc* mh;
  1020   intptr_t* saved_regs;
  1021   intptr_t* entry_sp;
  1022   intptr_t* saved_sp;
  1023   intptr_t* saved_bp;
  1024 };
  1025 void trace_method_handle_stub_wrapper(MethodHandleStubArguments* args) {
  1026   trace_method_handle_stub(args->adaptername,
  1027                            args->mh,
  1028                            args->saved_regs,
  1029                            args->entry_sp,
  1030                            args->saved_sp,
  1031                            args->saved_bp);
  1034 void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adaptername) {
  1035   if (!TraceMethodHandles)  return;
  1036   BLOCK_COMMENT("trace_method_handle {");
  1037   __ push(rax);
  1038   __ lea(rax, Address(rsp, wordSize * NOT_LP64(6) LP64_ONLY(14))); // entry_sp  __ pusha();
  1039   __ pusha();
  1040   __ mov(rbx, rsp);
  1041   __ enter();
  1042   // incoming state:
  1043   // rcx: method handle
  1044   // r13 or rsi: saved sp
  1045   // To avoid calling convention issues, build a record on the stack and pass the pointer to that instead.
  1046   __ push(rbp);               // saved_bp
  1047   __ push(rsi);               // saved_sp
  1048   __ push(rax);               // entry_sp
  1049   __ push(rbx);               // pusha saved_regs
  1050   __ push(rcx);               // mh
  1051   __ push(rcx);               // adaptername
  1052   __ movptr(Address(rsp, 0), (intptr_t) adaptername);
  1053   __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, trace_method_handle_stub_wrapper), rsp);
  1054   __ leave();
  1055   __ popa();
  1056   __ pop(rax);
  1057   BLOCK_COMMENT("} trace_method_handle");
  1059 #endif //PRODUCT
  1061 // which conversion op types are implemented here?
  1062 int MethodHandles::adapter_conversion_ops_supported_mask() {
  1063   return ((1<<java_lang_invoke_AdapterMethodHandle::OP_RETYPE_ONLY)
  1064          |(1<<java_lang_invoke_AdapterMethodHandle::OP_RETYPE_RAW)
  1065          |(1<<java_lang_invoke_AdapterMethodHandle::OP_CHECK_CAST)
  1066          |(1<<java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_PRIM)
  1067          |(1<<java_lang_invoke_AdapterMethodHandle::OP_REF_TO_PRIM)
  1068           //OP_PRIM_TO_REF is below...
  1069          |(1<<java_lang_invoke_AdapterMethodHandle::OP_SWAP_ARGS)
  1070          |(1<<java_lang_invoke_AdapterMethodHandle::OP_ROT_ARGS)
  1071          |(1<<java_lang_invoke_AdapterMethodHandle::OP_DUP_ARGS)
  1072          |(1<<java_lang_invoke_AdapterMethodHandle::OP_DROP_ARGS)
  1073           //OP_COLLECT_ARGS is below...
  1074          |(1<<java_lang_invoke_AdapterMethodHandle::OP_SPREAD_ARGS)
  1075          |(!UseRicochetFrames ? 0 :
  1076            LP64_ONLY(FLAG_IS_DEFAULT(UseRicochetFrames) ? 0 :)
  1077            java_lang_invoke_MethodTypeForm::vmlayout_offset_in_bytes() <= 0 ? 0 :
  1078            ((1<<java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_REF)
  1079            |(1<<java_lang_invoke_AdapterMethodHandle::OP_COLLECT_ARGS)
  1080            |(1<<java_lang_invoke_AdapterMethodHandle::OP_FOLD_ARGS)
  1081             ))
  1082          );
  1085 //------------------------------------------------------------------------------
  1086 // MethodHandles::generate_method_handle_stub
  1087 //
  1088 // Generate an "entry" field for a method handle.
  1089 // This determines how the method handle will respond to calls.
  1090 void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHandles::EntryKind ek) {
  1091   MethodHandles::EntryKind ek_orig = ek_original_kind(ek);
  1093   // Here is the register state during an interpreted call,
  1094   // as set up by generate_method_handle_interpreter_entry():
  1095   // - rbx: garbage temp (was MethodHandle.invoke methodOop, unused)
  1096   // - rcx: receiver method handle
  1097   // - rax: method handle type (only used by the check_mtype entry point)
  1098   // - rsi/r13: sender SP (must preserve; see prepare_to_jump_from_interpreted)
  1099   // - rdx: garbage temp, can blow away
  1101   const Register rcx_recv    = rcx;
  1102   const Register rax_argslot = rax;
  1103   const Register rbx_temp    = rbx;
  1104   const Register rdx_temp    = rdx;
  1105   const Register rdi_temp    = rdi;
  1107   // This guy is set up by prepare_to_jump_from_interpreted (from interpreted calls)
  1108   // and gen_c2i_adapter (from compiled calls):
  1109   const Register saved_last_sp = saved_last_sp_register();
  1111   // Argument registers for _raise_exception.
  1112   // 32-bit: Pass first two oop/int args in registers ECX and EDX.
  1113   const Register rarg0_code     = LP64_ONLY(j_rarg0) NOT_LP64(rcx);
  1114   const Register rarg1_actual   = LP64_ONLY(j_rarg1) NOT_LP64(rdx);
  1115   const Register rarg2_required = LP64_ONLY(j_rarg2) NOT_LP64(rdi);
  1116   assert_different_registers(rarg0_code, rarg1_actual, rarg2_required, saved_last_sp);
  1118   guarantee(java_lang_invoke_MethodHandle::vmentry_offset_in_bytes() != 0, "must have offsets");
  1120   // some handy addresses
  1121   Address rbx_method_fie(     rbx,      methodOopDesc::from_interpreted_offset() );
  1122   Address rbx_method_fce(     rbx,      methodOopDesc::from_compiled_offset() );
  1124   Address rcx_mh_vmtarget(    rcx_recv, java_lang_invoke_MethodHandle::vmtarget_offset_in_bytes() );
  1125   Address rcx_dmh_vmindex(    rcx_recv, java_lang_invoke_DirectMethodHandle::vmindex_offset_in_bytes() );
  1127   Address rcx_bmh_vmargslot(  rcx_recv, java_lang_invoke_BoundMethodHandle::vmargslot_offset_in_bytes() );
  1128   Address rcx_bmh_argument(   rcx_recv, java_lang_invoke_BoundMethodHandle::argument_offset_in_bytes() );
  1130   Address rcx_amh_vmargslot(  rcx_recv, java_lang_invoke_AdapterMethodHandle::vmargslot_offset_in_bytes() );
  1131   Address rcx_amh_argument(   rcx_recv, java_lang_invoke_AdapterMethodHandle::argument_offset_in_bytes() );
  1132   Address rcx_amh_conversion( rcx_recv, java_lang_invoke_AdapterMethodHandle::conversion_offset_in_bytes() );
  1133   Address vmarg;                // __ argument_address(vmargslot)
  1135   const int java_mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();
  1137   if (have_entry(ek)) {
  1138     __ nop();                   // empty stubs make SG sick
  1139     return;
  1142 #ifdef ASSERT
  1143   __ push((int32_t) 0xEEEEEEEE);
  1144   __ push((int32_t) (intptr_t) entry_name(ek));
  1145   LP64_ONLY(__ push((int32_t) high((intptr_t) entry_name(ek))));
  1146   __ push((int32_t) 0x33333333);
  1147 #endif //ASSERT
  1149   address interp_entry = __ pc();
  1151   trace_method_handle(_masm, entry_name(ek));
  1153   BLOCK_COMMENT(entry_name(ek));
  1155   switch ((int) ek) {
  1156   case _raise_exception:
  1158       // Not a real MH entry, but rather shared code for raising an
  1159       // exception.  Since we use the compiled entry, arguments are
  1160       // expected in compiler argument registers.
  1161       assert(raise_exception_method(), "must be set");
  1162       assert(raise_exception_method()->from_compiled_entry(), "method must be linked");
  1164       const Register rdi_pc = rax;
  1165       __ pop(rdi_pc);  // caller PC
  1166       __ mov(rsp, saved_last_sp);  // cut the stack back to where the caller started
  1168       Register rbx_method = rbx_temp;
  1169       Label L_no_method;
  1170       // FIXME: fill in _raise_exception_method with a suitable java.lang.invoke method
  1171       __ movptr(rbx_method, ExternalAddress((address) &_raise_exception_method));
  1172       __ testptr(rbx_method, rbx_method);
  1173       __ jccb(Assembler::zero, L_no_method);
  1175       const int jobject_oop_offset = 0;
  1176       __ movptr(rbx_method, Address(rbx_method, jobject_oop_offset));  // dereference the jobject
  1177       __ testptr(rbx_method, rbx_method);
  1178       __ jccb(Assembler::zero, L_no_method);
  1179       __ verify_oop(rbx_method);
  1181       NOT_LP64(__ push(rarg2_required));
  1182       __ push(rdi_pc);         // restore caller PC
  1183       __ jmp(rbx_method_fce);  // jump to compiled entry
  1185       // Do something that is at least causes a valid throw from the interpreter.
  1186       __ bind(L_no_method);
  1187       __ push(rarg2_required);
  1188       __ push(rarg1_actual);
  1189       __ jump(ExternalAddress(Interpreter::throw_WrongMethodType_entry()));
  1191     break;
  1193   case _invokestatic_mh:
  1194   case _invokespecial_mh:
  1196       Register rbx_method = rbx_temp;
  1197       __ load_heap_oop(rbx_method, rcx_mh_vmtarget); // target is a methodOop
  1198       __ verify_oop(rbx_method);
  1199       // same as TemplateTable::invokestatic or invokespecial,
  1200       // minus the CP setup and profiling:
  1201       if (ek == _invokespecial_mh) {
  1202         // Must load & check the first argument before entering the target method.
  1203         __ load_method_handle_vmslots(rax_argslot, rcx_recv, rdx_temp);
  1204         __ movptr(rcx_recv, __ argument_address(rax_argslot, -1));
  1205         __ null_check(rcx_recv);
  1206         __ verify_oop(rcx_recv);
  1208       __ jmp(rbx_method_fie);
  1210     break;
  1212   case _invokevirtual_mh:
  1214       // same as TemplateTable::invokevirtual,
  1215       // minus the CP setup and profiling:
  1217       // pick out the vtable index and receiver offset from the MH,
  1218       // and then we can discard it:
  1219       __ load_method_handle_vmslots(rax_argslot, rcx_recv, rdx_temp);
  1220       Register rbx_index = rbx_temp;
  1221       __ movl(rbx_index, rcx_dmh_vmindex);
  1222       // Note:  The verifier allows us to ignore rcx_mh_vmtarget.
  1223       __ movptr(rcx_recv, __ argument_address(rax_argslot, -1));
  1224       __ null_check(rcx_recv, oopDesc::klass_offset_in_bytes());
  1226       // get receiver klass
  1227       Register rax_klass = rax_argslot;
  1228       __ load_klass(rax_klass, rcx_recv);
  1229       __ verify_oop(rax_klass);
  1231       // get target methodOop & entry point
  1232       const int base = instanceKlass::vtable_start_offset() * wordSize;
  1233       assert(vtableEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
  1234       Address vtable_entry_addr(rax_klass,
  1235                                 rbx_index, Address::times_ptr,
  1236                                 base + vtableEntry::method_offset_in_bytes());
  1237       Register rbx_method = rbx_temp;
  1238       __ movptr(rbx_method, vtable_entry_addr);
  1240       __ verify_oop(rbx_method);
  1241       __ jmp(rbx_method_fie);
  1243     break;
  1245   case _invokeinterface_mh:
  1247       // same as TemplateTable::invokeinterface,
  1248       // minus the CP setup and profiling:
  1250       // pick out the interface and itable index from the MH.
  1251       __ load_method_handle_vmslots(rax_argslot, rcx_recv, rdx_temp);
  1252       Register rdx_intf  = rdx_temp;
  1253       Register rbx_index = rbx_temp;
  1254       __ load_heap_oop(rdx_intf, rcx_mh_vmtarget);
  1255       __ movl(rbx_index, rcx_dmh_vmindex);
  1256       __ movptr(rcx_recv, __ argument_address(rax_argslot, -1));
  1257       __ null_check(rcx_recv, oopDesc::klass_offset_in_bytes());
  1259       // get receiver klass
  1260       Register rax_klass = rax_argslot;
  1261       __ load_klass(rax_klass, rcx_recv);
  1262       __ verify_oop(rax_klass);
  1264       Register rbx_method = rbx_index;
  1266       // get interface klass
  1267       Label no_such_interface;
  1268       __ verify_oop(rdx_intf);
  1269       __ lookup_interface_method(rax_klass, rdx_intf,
  1270                                  // note: next two args must be the same:
  1271                                  rbx_index, rbx_method,
  1272                                  rdi_temp,
  1273                                  no_such_interface);
  1275       __ verify_oop(rbx_method);
  1276       __ jmp(rbx_method_fie);
  1277       __ hlt();
  1279       __ bind(no_such_interface);
  1280       // Throw an exception.
  1281       // For historical reasons, it will be IncompatibleClassChangeError.
  1282       __ mov(rbx_temp, rcx_recv);  // rarg2_required might be RCX
  1283       assert_different_registers(rarg2_required, rbx_temp);
  1284       __ movptr(rarg2_required, Address(rdx_intf, java_mirror_offset));  // required interface
  1285       __ mov(   rarg1_actual,   rbx_temp);                               // bad receiver
  1286       __ movl(  rarg0_code,     (int) Bytecodes::_invokeinterface);      // who is complaining?
  1287       __ jump(ExternalAddress(from_interpreted_entry(_raise_exception)));
  1289     break;
  1291   case _bound_ref_mh:
  1292   case _bound_int_mh:
  1293   case _bound_long_mh:
  1294   case _bound_ref_direct_mh:
  1295   case _bound_int_direct_mh:
  1296   case _bound_long_direct_mh:
  1298       bool direct_to_method = (ek >= _bound_ref_direct_mh);
  1299       BasicType arg_type  = ek_bound_mh_arg_type(ek);
  1300       int       arg_slots = type2size[arg_type];
  1302       // make room for the new argument:
  1303       __ movl(rax_argslot, rcx_bmh_vmargslot);
  1304       __ lea(rax_argslot, __ argument_address(rax_argslot));
  1306       insert_arg_slots(_masm, arg_slots * stack_move_unit(), rax_argslot, rbx_temp, rdx_temp);
  1308       // store bound argument into the new stack slot:
  1309       __ load_heap_oop(rbx_temp, rcx_bmh_argument);
  1310       if (arg_type == T_OBJECT) {
  1311         __ movptr(Address(rax_argslot, 0), rbx_temp);
  1312       } else {
  1313         Address prim_value_addr(rbx_temp, java_lang_boxing_object::value_offset_in_bytes(arg_type));
  1314         move_typed_arg(_masm, arg_type, false,
  1315                        Address(rax_argslot, 0),
  1316                        prim_value_addr,
  1317                        rbx_temp, rdx_temp);
  1320       if (direct_to_method) {
  1321         Register rbx_method = rbx_temp;
  1322         __ load_heap_oop(rbx_method, rcx_mh_vmtarget);
  1323         __ verify_oop(rbx_method);
  1324         __ jmp(rbx_method_fie);
  1325       } else {
  1326         __ load_heap_oop(rcx_recv, rcx_mh_vmtarget);
  1327         __ verify_oop(rcx_recv);
  1328         __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
  1331     break;
  1333   case _adapter_retype_only:
  1334   case _adapter_retype_raw:
  1335     // immediately jump to the next MH layer:
  1336     __ load_heap_oop(rcx_recv, rcx_mh_vmtarget);
  1337     __ verify_oop(rcx_recv);
  1338     __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
  1339     // This is OK when all parameter types widen.
  1340     // It is also OK when a return type narrows.
  1341     break;
  1343   case _adapter_check_cast:
  1345       // temps:
  1346       Register rbx_klass = rbx_temp; // interesting AMH data
  1348       // check a reference argument before jumping to the next layer of MH:
  1349       __ movl(rax_argslot, rcx_amh_vmargslot);
  1350       vmarg = __ argument_address(rax_argslot);
  1352       // What class are we casting to?
  1353       __ load_heap_oop(rbx_klass, rcx_amh_argument); // this is a Class object!
  1354       load_klass_from_Class(_masm, rbx_klass);
  1356       Label done;
  1357       __ movptr(rdx_temp, vmarg);
  1358       __ testptr(rdx_temp, rdx_temp);
  1359       __ jcc(Assembler::zero, done);         // no cast if null
  1360       __ load_klass(rdx_temp, rdx_temp);
  1362       // live at this point:
  1363       // - rbx_klass:  klass required by the target method
  1364       // - rdx_temp:   argument klass to test
  1365       // - rcx_recv:   adapter method handle
  1366       __ check_klass_subtype(rdx_temp, rbx_klass, rax_argslot, done);
  1368       // If we get here, the type check failed!
  1369       // Call the wrong_method_type stub, passing the failing argument type in rax.
  1370       Register rax_mtype = rax_argslot;
  1371       __ movl(rax_argslot, rcx_amh_vmargslot);  // reload argslot field
  1372       __ movptr(rdx_temp, vmarg);
  1374       assert_different_registers(rarg2_required, rdx_temp);
  1375       __ load_heap_oop(rarg2_required, rcx_amh_argument);             // required class
  1376       __ mov(          rarg1_actual,   rdx_temp);                     // bad object
  1377       __ movl(         rarg0_code,     (int) Bytecodes::_checkcast);  // who is complaining?
  1378       __ jump(ExternalAddress(from_interpreted_entry(_raise_exception)));
  1380       __ bind(done);
  1381       // get the new MH:
  1382       __ load_heap_oop(rcx_recv, rcx_mh_vmtarget);
  1383       __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
  1385     break;
  1387   case _adapter_prim_to_prim:
  1388   case _adapter_ref_to_prim:
  1389   case _adapter_prim_to_ref:
  1390     // handled completely by optimized cases
  1391     __ stop("init_AdapterMethodHandle should not issue this");
  1392     break;
  1394   case _adapter_opt_i2i:        // optimized subcase of adapt_prim_to_prim
  1395 //case _adapter_opt_f2i:        // optimized subcase of adapt_prim_to_prim
  1396   case _adapter_opt_l2i:        // optimized subcase of adapt_prim_to_prim
  1397   case _adapter_opt_unboxi:     // optimized subcase of adapt_ref_to_prim
  1399       // perform an in-place conversion to int or an int subword
  1400       __ movl(rax_argslot, rcx_amh_vmargslot);
  1401       vmarg = __ argument_address(rax_argslot);
  1403       switch (ek) {
  1404       case _adapter_opt_i2i:
  1405         __ movl(rdx_temp, vmarg);
  1406         break;
  1407       case _adapter_opt_l2i:
  1409           // just delete the extra slot; on a little-endian machine we keep the first
  1410           __ lea(rax_argslot, __ argument_address(rax_argslot, 1));
  1411           remove_arg_slots(_masm, -stack_move_unit(),
  1412                            rax_argslot, rbx_temp, rdx_temp);
  1413           vmarg = Address(rax_argslot, -Interpreter::stackElementSize);
  1414           __ movl(rdx_temp, vmarg);
  1416         break;
  1417       case _adapter_opt_unboxi:
  1419           // Load the value up from the heap.
  1420           __ movptr(rdx_temp, vmarg);
  1421           int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_INT);
  1422 #ifdef ASSERT
  1423           for (int bt = T_BOOLEAN; bt < T_INT; bt++) {
  1424             if (is_subword_type(BasicType(bt)))
  1425               assert(value_offset == java_lang_boxing_object::value_offset_in_bytes(BasicType(bt)), "");
  1427 #endif
  1428           __ null_check(rdx_temp, value_offset);
  1429           __ movl(rdx_temp, Address(rdx_temp, value_offset));
  1430           // We load this as a word.  Because we are little-endian,
  1431           // the low bits will be correct, but the high bits may need cleaning.
  1432           // The vminfo will guide us to clean those bits.
  1434         break;
  1435       default:
  1436         ShouldNotReachHere();
  1439       // Do the requested conversion and store the value.
  1440       Register rbx_vminfo = rbx_temp;
  1441       load_conversion_vminfo(_masm, rbx_vminfo, rcx_amh_conversion);
  1443       // get the new MH:
  1444       __ load_heap_oop(rcx_recv, rcx_mh_vmtarget);
  1445       // (now we are done with the old MH)
  1447       // original 32-bit vmdata word must be of this form:
  1448       //    | MBZ:6 | signBitCount:8 | srcDstTypes:8 | conversionOp:8 |
  1449       __ xchgptr(rcx, rbx_vminfo);                // free rcx for shifts
  1450       __ shll(rdx_temp /*, rcx*/);
  1451       Label zero_extend, done;
  1452       __ testl(rcx, CONV_VMINFO_SIGN_FLAG);
  1453       __ jccb(Assembler::zero, zero_extend);
  1455       // this path is taken for int->byte, int->short
  1456       __ sarl(rdx_temp /*, rcx*/);
  1457       __ jmpb(done);
  1459       __ bind(zero_extend);
  1460       // this is taken for int->char
  1461       __ shrl(rdx_temp /*, rcx*/);
  1463       __ bind(done);
  1464       __ movl(vmarg, rdx_temp);  // Store the value.
  1465       __ xchgptr(rcx, rbx_vminfo);                // restore rcx_recv
  1467       __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
  1469     break;
  1471   case _adapter_opt_i2l:        // optimized subcase of adapt_prim_to_prim
  1472   case _adapter_opt_unboxl:     // optimized subcase of adapt_ref_to_prim
  1474       // perform an in-place int-to-long or ref-to-long conversion
  1475       __ movl(rax_argslot, rcx_amh_vmargslot);
  1477       // on a little-endian machine we keep the first slot and add another after
  1478       __ lea(rax_argslot, __ argument_address(rax_argslot, 1));
  1479       insert_arg_slots(_masm, stack_move_unit(),
  1480                        rax_argslot, rbx_temp, rdx_temp);
  1481       Address vmarg1(rax_argslot, -Interpreter::stackElementSize);
  1482       Address vmarg2 = vmarg1.plus_disp(Interpreter::stackElementSize);
  1484       switch (ek) {
  1485       case _adapter_opt_i2l:
  1487 #ifdef _LP64
  1488           __ movslq(rdx_temp, vmarg1);  // Load sign-extended
  1489           __ movq(vmarg1, rdx_temp);    // Store into first slot
  1490 #else
  1491           __ movl(rdx_temp, vmarg1);
  1492           __ sarl(rdx_temp, BitsPerInt - 1);  // __ extend_sign()
  1493           __ movl(vmarg2, rdx_temp); // store second word
  1494 #endif
  1496         break;
  1497       case _adapter_opt_unboxl:
  1499           // Load the value up from the heap.
  1500           __ movptr(rdx_temp, vmarg1);
  1501           int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_LONG);
  1502           assert(value_offset == java_lang_boxing_object::value_offset_in_bytes(T_DOUBLE), "");
  1503           __ null_check(rdx_temp, value_offset);
  1504 #ifdef _LP64
  1505           __ movq(rbx_temp, Address(rdx_temp, value_offset));
  1506           __ movq(vmarg1, rbx_temp);
  1507 #else
  1508           __ movl(rbx_temp, Address(rdx_temp, value_offset + 0*BytesPerInt));
  1509           __ movl(rdx_temp, Address(rdx_temp, value_offset + 1*BytesPerInt));
  1510           __ movl(vmarg1, rbx_temp);
  1511           __ movl(vmarg2, rdx_temp);
  1512 #endif
  1514         break;
  1515       default:
  1516         ShouldNotReachHere();
  1519       __ load_heap_oop(rcx_recv, rcx_mh_vmtarget);
  1520       __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
  1522     break;
  1524   case _adapter_opt_f2d:        // optimized subcase of adapt_prim_to_prim
  1525   case _adapter_opt_d2f:        // optimized subcase of adapt_prim_to_prim
  1527       // perform an in-place floating primitive conversion
  1528       __ movl(rax_argslot, rcx_amh_vmargslot);
  1529       __ lea(rax_argslot, __ argument_address(rax_argslot, 1));
  1530       if (ek == _adapter_opt_f2d) {
  1531         insert_arg_slots(_masm, stack_move_unit(),
  1532                          rax_argslot, rbx_temp, rdx_temp);
  1534       Address vmarg(rax_argslot, -Interpreter::stackElementSize);
  1536 #ifdef _LP64
  1537       if (ek == _adapter_opt_f2d) {
  1538         __ movflt(xmm0, vmarg);
  1539         __ cvtss2sd(xmm0, xmm0);
  1540         __ movdbl(vmarg, xmm0);
  1541       } else {
  1542         __ movdbl(xmm0, vmarg);
  1543         __ cvtsd2ss(xmm0, xmm0);
  1544         __ movflt(vmarg, xmm0);
  1546 #else //_LP64
  1547       if (ek == _adapter_opt_f2d) {
  1548         __ fld_s(vmarg);        // load float to ST0
  1549         __ fstp_s(vmarg);       // store single
  1550       } else {
  1551         __ fld_d(vmarg);        // load double to ST0
  1552         __ fstp_s(vmarg);       // store single
  1554 #endif //_LP64
  1556       if (ek == _adapter_opt_d2f) {
  1557         remove_arg_slots(_masm, -stack_move_unit(),
  1558                          rax_argslot, rbx_temp, rdx_temp);
  1561       __ load_heap_oop(rcx_recv, rcx_mh_vmtarget);
  1562       __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
  1564     break;
  1566   case _adapter_swap_args:
  1567   case _adapter_rot_args:
  1568     // handled completely by optimized cases
  1569     __ stop("init_AdapterMethodHandle should not issue this");
  1570     break;
  1572   case _adapter_opt_swap_1:
  1573   case _adapter_opt_swap_2:
  1574   case _adapter_opt_rot_1_up:
  1575   case _adapter_opt_rot_1_down:
  1576   case _adapter_opt_rot_2_up:
  1577   case _adapter_opt_rot_2_down:
  1579       int swap_slots = ek_adapter_opt_swap_slots(ek);
  1580       int rotate     = ek_adapter_opt_swap_mode(ek);
  1582       // 'argslot' is the position of the first argument to swap
  1583       __ movl(rax_argslot, rcx_amh_vmargslot);
  1584       __ lea(rax_argslot, __ argument_address(rax_argslot));
  1586       // 'vminfo' is the second
  1587       Register rbx_destslot = rbx_temp;
  1588       load_conversion_vminfo(_masm, rbx_destslot, rcx_amh_conversion);
  1589       __ lea(rbx_destslot, __ argument_address(rbx_destslot));
  1590       if (VerifyMethodHandles)
  1591         verify_argslot(_masm, rbx_destslot, "swap point must fall within current frame");
  1593       assert(Interpreter::stackElementSize == wordSize, "else rethink use of wordSize here");
  1594       if (!rotate) {
  1595         // simple swap
  1596         for (int i = 0; i < swap_slots; i++) {
  1597           __ movptr(rdi_temp, Address(rax_argslot,  i * wordSize));
  1598           __ movptr(rdx_temp, Address(rbx_destslot, i * wordSize));
  1599           __ movptr(Address(rax_argslot,  i * wordSize), rdx_temp);
  1600           __ movptr(Address(rbx_destslot, i * wordSize), rdi_temp);
  1602       } else {
  1603         // A rotate is actually pair of moves, with an "odd slot" (or pair)
  1604         // changing place with a series of other slots.
  1605         // First, push the "odd slot", which is going to get overwritten
  1606         for (int i = swap_slots - 1; i >= 0; i--) {
  1607           // handle one with rdi_temp instead of a push:
  1608           if (i == 0)  __ movptr(rdi_temp, Address(rax_argslot, i * wordSize));
  1609           else         __ pushptr(         Address(rax_argslot, i * wordSize));
  1611         if (rotate > 0) {
  1612           // Here is rotate > 0:
  1613           // (low mem)                                          (high mem)
  1614           //     | dest:     more_slots...     | arg: odd_slot :arg+1 |
  1615           // =>
  1616           //     | dest: odd_slot | dest+1: more_slots...      :arg+1 |
  1617           // work argslot down to destslot, copying contiguous data upwards
  1618           // pseudo-code:
  1619           //   rax = src_addr - swap_bytes
  1620           //   rbx = dest_addr
  1621           //   while (rax >= rbx) *(rax + swap_bytes) = *(rax + 0), rax--;
  1622           move_arg_slots_up(_masm,
  1623                             rbx_destslot,
  1624                             Address(rax_argslot, 0),
  1625                             swap_slots,
  1626                             rax_argslot, rdx_temp);
  1627         } else {
  1628           // Here is the other direction, rotate < 0:
  1629           // (low mem)                                          (high mem)
  1630           //     | arg: odd_slot | arg+1: more_slots...       :dest+1 |
  1631           // =>
  1632           //     | arg:    more_slots...     | dest: odd_slot :dest+1 |
  1633           // work argslot up to destslot, copying contiguous data downwards
  1634           // pseudo-code:
  1635           //   rax = src_addr + swap_bytes
  1636           //   rbx = dest_addr
  1637           //   while (rax <= rbx) *(rax - swap_bytes) = *(rax + 0), rax++;
  1638           __ addptr(rbx_destslot, wordSize);
  1639           move_arg_slots_down(_masm,
  1640                               Address(rax_argslot, swap_slots * wordSize),
  1641                               rbx_destslot,
  1642                               -swap_slots,
  1643                               rax_argslot, rdx_temp);
  1645           __ subptr(rbx_destslot, wordSize);
  1647         // pop the original first chunk into the destination slot, now free
  1648         for (int i = 0; i < swap_slots; i++) {
  1649           if (i == 0)  __ movptr(Address(rbx_destslot, i * wordSize), rdi_temp);
  1650           else         __ popptr(Address(rbx_destslot, i * wordSize));
  1654       __ load_heap_oop(rcx_recv, rcx_mh_vmtarget);
  1655       __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
  1657     break;
  1659   case _adapter_dup_args:
  1661       // 'argslot' is the position of the first argument to duplicate
  1662       __ movl(rax_argslot, rcx_amh_vmargslot);
  1663       __ lea(rax_argslot, __ argument_address(rax_argslot));
  1665       // 'stack_move' is negative number of words to duplicate
  1666       Register rdi_stack_move = rdi_temp;
  1667       load_stack_move(_masm, rdi_stack_move, rcx_recv, true);
  1669       if (VerifyMethodHandles) {
  1670         verify_argslots(_masm, rdi_stack_move, rax_argslot, true,
  1671                         "copied argument(s) must fall within current frame");
  1674       // insert location is always the bottom of the argument list:
  1675       Address insert_location = __ argument_address(constant(0));
  1676       int pre_arg_words = insert_location.disp() / wordSize;   // return PC is pushed
  1677       assert(insert_location.base() == rsp, "");
  1679       __ negl(rdi_stack_move);
  1680       push_arg_slots(_masm, rax_argslot, rdi_stack_move,
  1681                      pre_arg_words, rbx_temp, rdx_temp);
  1683       __ load_heap_oop(rcx_recv, rcx_mh_vmtarget);
  1684       __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
  1686     break;
  1688   case _adapter_drop_args:
  1690       // 'argslot' is the position of the first argument to nuke
  1691       __ movl(rax_argslot, rcx_amh_vmargslot);
  1692       __ lea(rax_argslot, __ argument_address(rax_argslot));
  1694       // (must do previous push after argslot address is taken)
  1696       // 'stack_move' is number of words to drop
  1697       Register rdi_stack_move = rdi_temp;
  1698       load_stack_move(_masm, rdi_stack_move, rcx_recv, false);
  1699       remove_arg_slots(_masm, rdi_stack_move,
  1700                        rax_argslot, rbx_temp, rdx_temp);
  1702       __ load_heap_oop(rcx_recv, rcx_mh_vmtarget);
  1703       __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
  1705     break;
  1707   case _adapter_collect_args:
  1708   case _adapter_fold_args:
  1709   case _adapter_spread_args:
  1710     // handled completely by optimized cases
  1711     __ stop("init_AdapterMethodHandle should not issue this");
  1712     break;
  1714   case _adapter_opt_collect_ref:
  1715   case _adapter_opt_collect_int:
  1716   case _adapter_opt_collect_long:
  1717   case _adapter_opt_collect_float:
  1718   case _adapter_opt_collect_double:
  1719   case _adapter_opt_collect_void:
  1720   case _adapter_opt_collect_0_ref:
  1721   case _adapter_opt_collect_1_ref:
  1722   case _adapter_opt_collect_2_ref:
  1723   case _adapter_opt_collect_3_ref:
  1724   case _adapter_opt_collect_4_ref:
  1725   case _adapter_opt_collect_5_ref:
  1726   case _adapter_opt_filter_S0_ref:
  1727   case _adapter_opt_filter_S1_ref:
  1728   case _adapter_opt_filter_S2_ref:
  1729   case _adapter_opt_filter_S3_ref:
  1730   case _adapter_opt_filter_S4_ref:
  1731   case _adapter_opt_filter_S5_ref:
  1732   case _adapter_opt_collect_2_S0_ref:
  1733   case _adapter_opt_collect_2_S1_ref:
  1734   case _adapter_opt_collect_2_S2_ref:
  1735   case _adapter_opt_collect_2_S3_ref:
  1736   case _adapter_opt_collect_2_S4_ref:
  1737   case _adapter_opt_collect_2_S5_ref:
  1738   case _adapter_opt_fold_ref:
  1739   case _adapter_opt_fold_int:
  1740   case _adapter_opt_fold_long:
  1741   case _adapter_opt_fold_float:
  1742   case _adapter_opt_fold_double:
  1743   case _adapter_opt_fold_void:
  1744   case _adapter_opt_fold_1_ref:
  1745   case _adapter_opt_fold_2_ref:
  1746   case _adapter_opt_fold_3_ref:
  1747   case _adapter_opt_fold_4_ref:
  1748   case _adapter_opt_fold_5_ref:
  1750       // Given a fresh incoming stack frame, build a new ricochet frame.
  1751       // On entry, TOS points at a return PC, and RBP is the callers frame ptr.
  1752       // RSI/R13 has the caller's exact stack pointer, which we must also preserve.
  1753       // RCX contains an AdapterMethodHandle of the indicated kind.
  1755       // Relevant AMH fields:
  1756       // amh.vmargslot:
  1757       //   points to the trailing edge of the arguments
  1758       //   to filter, collect, or fold.  For a boxing operation,
  1759       //   it points just after the single primitive value.
  1760       // amh.argument:
  1761       //   recursively called MH, on |collect| arguments
  1762       // amh.vmtarget:
  1763       //   final destination MH, on return value, etc.
  1764       // amh.conversion.dest:
  1765       //   tells what is the type of the return value
  1766       //   (not needed here, since dest is also derived from ek)
  1767       // amh.conversion.vminfo:
  1768       //   points to the trailing edge of the return value
  1769       //   when the vmtarget is to be called; this is
  1770       //   equal to vmargslot + (retained ? |collect| : 0)
  1772       // Pass 0 or more argument slots to the recursive target.
  1773       int collect_count_constant = ek_adapter_opt_collect_count(ek);
  1775       // The collected arguments are copied from the saved argument list:
  1776       int collect_slot_constant = ek_adapter_opt_collect_slot(ek);
  1778       assert(ek_orig == _adapter_collect_args ||
  1779              ek_orig == _adapter_fold_args, "");
  1780       bool retain_original_args = (ek_orig == _adapter_fold_args);
  1782       // The return value is replaced (or inserted) at the 'vminfo' argslot.
  1783       // Sometimes we can compute this statically.
  1784       int dest_slot_constant = -1;
  1785       if (!retain_original_args)
  1786         dest_slot_constant = collect_slot_constant;
  1787       else if (collect_slot_constant >= 0 && collect_count_constant >= 0)
  1788         // We are preserving all the arguments, and the return value is prepended,
  1789         // so the return slot is to the left (above) the |collect| sequence.
  1790         dest_slot_constant = collect_slot_constant + collect_count_constant;
  1792       // Replace all those slots by the result of the recursive call.
  1793       // The result type can be one of ref, int, long, float, double, void.
  1794       // In the case of void, nothing is pushed on the stack after return.
  1795       BasicType dest = ek_adapter_opt_collect_type(ek);
  1796       assert(dest == type2wfield[dest], "dest is a stack slot type");
  1797       int dest_count = type2size[dest];
  1798       assert(dest_count == 1 || dest_count == 2 || (dest_count == 0 && dest == T_VOID), "dest has a size");
  1800       // Choose a return continuation.
  1801       EntryKind ek_ret = _adapter_opt_return_any;
  1802       if (dest != T_CONFLICT && OptimizeMethodHandles) {
  1803         switch (dest) {
  1804         case T_INT    : ek_ret = _adapter_opt_return_int;     break;
  1805         case T_LONG   : ek_ret = _adapter_opt_return_long;    break;
  1806         case T_FLOAT  : ek_ret = _adapter_opt_return_float;   break;
  1807         case T_DOUBLE : ek_ret = _adapter_opt_return_double;  break;
  1808         case T_OBJECT : ek_ret = _adapter_opt_return_ref;     break;
  1809         case T_VOID   : ek_ret = _adapter_opt_return_void;    break;
  1810         default       : ShouldNotReachHere();
  1812         if (dest == T_OBJECT && dest_slot_constant >= 0) {
  1813           EntryKind ek_try = EntryKind(_adapter_opt_return_S0_ref + dest_slot_constant);
  1814           if (ek_try <= _adapter_opt_return_LAST &&
  1815               ek_adapter_opt_return_slot(ek_try) == dest_slot_constant) {
  1816             ek_ret = ek_try;
  1819         assert(ek_adapter_opt_return_type(ek_ret) == dest, "");
  1822       // Already pushed:  ... keep1 | collect | keep2 | sender_pc |
  1823       // push(sender_pc);
  1825       // Compute argument base:
  1826       Register rax_argv = rax_argslot;
  1827       __ lea(rax_argv, __ argument_address(constant(0)));
  1829       // Push a few extra argument words, if we need them to store the return value.
  1831         int extra_slots = 0;
  1832         if (retain_original_args) {
  1833           extra_slots = dest_count;
  1834         } else if (collect_count_constant == -1) {
  1835           extra_slots = dest_count;  // collect_count might be zero; be generous
  1836         } else if (dest_count > collect_count_constant) {
  1837           extra_slots = (dest_count - collect_count_constant);
  1838         } else {
  1839           // else we know we have enough dead space in |collect| to repurpose for return values
  1841         DEBUG_ONLY(extra_slots += 1);
  1842         if (extra_slots > 0) {
  1843           __ pop(rbx_temp);   // return value
  1844           __ subptr(rsp, (extra_slots * Interpreter::stackElementSize));
  1845           // Push guard word #2 in debug mode.
  1846           DEBUG_ONLY(__ movptr(Address(rsp, 0), (int32_t) RicochetFrame::MAGIC_NUMBER_2));
  1847           __ push(rbx_temp);
  1851       RicochetFrame::enter_ricochet_frame(_masm, rcx_recv, rax_argv,
  1852                                           entry(ek_ret)->from_interpreted_entry(), rbx_temp);
  1854       // Now pushed:  ... keep1 | collect | keep2 | RF |
  1855       // some handy frame slots:
  1856       Address exact_sender_sp_addr = RicochetFrame::frame_address(RicochetFrame::exact_sender_sp_offset_in_bytes());
  1857       Address conversion_addr      = RicochetFrame::frame_address(RicochetFrame::conversion_offset_in_bytes());
  1858       Address saved_args_base_addr = RicochetFrame::frame_address(RicochetFrame::saved_args_base_offset_in_bytes());
  1860 #ifdef ASSERT
  1861       if (VerifyMethodHandles && dest != T_CONFLICT) {
  1862         BLOCK_COMMENT("verify AMH.conv.dest");
  1863         load_conversion_dest_type(_masm, rbx_temp, conversion_addr);
  1864         Label L_dest_ok;
  1865         __ cmpl(rbx_temp, (int) dest);
  1866         __ jcc(Assembler::equal, L_dest_ok);
  1867         if (dest == T_INT) {
  1868           for (int bt = T_BOOLEAN; bt < T_INT; bt++) {
  1869             if (is_subword_type(BasicType(bt))) {
  1870               __ cmpl(rbx_temp, (int) bt);
  1871               __ jcc(Assembler::equal, L_dest_ok);
  1875         __ stop("bad dest in AMH.conv");
  1876         __ BIND(L_dest_ok);
  1878 #endif //ASSERT
  1880       // Find out where the original copy of the recursive argument sequence begins.
  1881       Register rax_coll = rax_argv;
  1883         RegisterOrConstant collect_slot = collect_slot_constant;
  1884         if (collect_slot_constant == -1) {
  1885           __ movl(rdi_temp, rcx_amh_vmargslot);
  1886           collect_slot = rdi_temp;
  1888         if (collect_slot_constant != 0)
  1889           __ lea(rax_coll, Address(rax_argv, collect_slot, Interpreter::stackElementScale()));
  1890         // rax_coll now points at the trailing edge of |collect| and leading edge of |keep2|
  1893       // Replace the old AMH with the recursive MH.  (No going back now.)
  1894       // In the case of a boxing call, the recursive call is to a 'boxer' method,
  1895       // such as Integer.valueOf or Long.valueOf.  In the case of a filter
  1896       // or collect call, it will take one or more arguments, transform them,
  1897       // and return some result, to store back into argument_base[vminfo].
  1898       __ load_heap_oop(rcx_recv, rcx_amh_argument);
  1899       if (VerifyMethodHandles)  verify_method_handle(_masm, rcx_recv);
  1901       // Push a space for the recursively called MH first:
  1902       __ push((int32_t)NULL_WORD);
  1904       // Calculate |collect|, the number of arguments we are collecting.
  1905       Register rdi_collect_count = rdi_temp;
  1906       RegisterOrConstant collect_count;
  1907       if (collect_count_constant >= 0) {
  1908         collect_count = collect_count_constant;
  1909       } else {
  1910         __ load_method_handle_vmslots(rdi_collect_count, rcx_recv, rdx_temp);
  1911         collect_count = rdi_collect_count;
  1913 #ifdef ASSERT
  1914       if (VerifyMethodHandles && collect_count_constant >= 0) {
  1915         __ load_method_handle_vmslots(rbx_temp, rcx_recv, rdx_temp);
  1916         Label L_count_ok;
  1917         __ cmpl(rbx_temp, collect_count_constant);
  1918         __ jcc(Assembler::equal, L_count_ok);
  1919         __ stop("bad vminfo in AMH.conv");
  1920         __ BIND(L_count_ok);
  1922 #endif //ASSERT
  1924       // copy |collect| slots directly to TOS:
  1925       push_arg_slots(_masm, rax_coll, collect_count, 0, rbx_temp, rdx_temp);
  1926       // Now pushed:  ... keep1 | collect | keep2 | RF... | collect |
  1927       // rax_coll still points at the trailing edge of |collect| and leading edge of |keep2|
  1929       // If necessary, adjust the saved arguments to make room for the eventual return value.
  1930       // Normal adjustment:  ... keep1 | +dest+ | -collect- | keep2 | RF... | collect |
  1931       // If retaining args:  ... keep1 | +dest+ |  collect  | keep2 | RF... | collect |
  1932       // In the non-retaining case, this might move keep2 either up or down.
  1933       // We don't have to copy the whole | RF... collect | complex,
  1934       // but we must adjust RF.saved_args_base.
  1935       // Also, from now on, we will forget about the origial copy of |collect|.
  1936       // If we are retaining it, we will treat it as part of |keep2|.
  1937       // For clarity we will define |keep3| = |collect|keep2| or |keep2|.
  1939       BLOCK_COMMENT("adjust trailing arguments {");
  1940       // Compare the sizes of |+dest+| and |-collect-|, which are opposed opening and closing movements.
  1941       int                open_count  = dest_count;
  1942       RegisterOrConstant close_count = collect_count_constant;
  1943       Register rdi_close_count = rdi_collect_count;
  1944       if (retain_original_args) {
  1945         close_count = constant(0);
  1946       } else if (collect_count_constant == -1) {
  1947         close_count = rdi_collect_count;
  1950       // How many slots need moving?  This is simply dest_slot (0 => no |keep3|).
  1951       RegisterOrConstant keep3_count;
  1952       Register rsi_keep3_count = rsi;  // can repair from RF.exact_sender_sp
  1953       if (dest_slot_constant >= 0) {
  1954         keep3_count = dest_slot_constant;
  1955       } else  {
  1956         load_conversion_vminfo(_masm, rsi_keep3_count, conversion_addr);
  1957         keep3_count = rsi_keep3_count;
  1959 #ifdef ASSERT
  1960       if (VerifyMethodHandles && dest_slot_constant >= 0) {
  1961         load_conversion_vminfo(_masm, rbx_temp, conversion_addr);
  1962         Label L_vminfo_ok;
  1963         __ cmpl(rbx_temp, dest_slot_constant);
  1964         __ jcc(Assembler::equal, L_vminfo_ok);
  1965         __ stop("bad vminfo in AMH.conv");
  1966         __ BIND(L_vminfo_ok);
  1968 #endif //ASSERT
  1970       // tasks remaining:
  1971       bool move_keep3 = (!keep3_count.is_constant() || keep3_count.as_constant() != 0);
  1972       bool stomp_dest = (NOT_DEBUG(dest == T_OBJECT) DEBUG_ONLY(dest_count != 0));
  1973       bool fix_arg_base = (!close_count.is_constant() || open_count != close_count.as_constant());
  1975       if (stomp_dest | fix_arg_base) {
  1976         // we will probably need an updated rax_argv value
  1977         if (collect_slot_constant >= 0) {
  1978           // rax_coll already holds the leading edge of |keep2|, so tweak it
  1979           assert(rax_coll == rax_argv, "elided a move");
  1980           if (collect_slot_constant != 0)
  1981             __ subptr(rax_argv, collect_slot_constant * Interpreter::stackElementSize);
  1982         } else {
  1983           // Just reload from RF.saved_args_base.
  1984           __ movptr(rax_argv, saved_args_base_addr);
  1988       // Old and new argument locations (based at slot 0).
  1989       // Net shift (&new_argv - &old_argv) is (close_count - open_count).
  1990       bool zero_open_count = (open_count == 0);  // remember this bit of info
  1991       if (move_keep3 && fix_arg_base) {
  1992         // It will be easier t have everything in one register:
  1993         if (close_count.is_register()) {
  1994           // Deduct open_count from close_count register to get a clean +/- value.
  1995           __ subptr(close_count.as_register(), open_count);
  1996         } else {
  1997           close_count = close_count.as_constant() - open_count;
  1999         open_count = 0;
  2001       Address old_argv(rax_argv, 0);
  2002       Address new_argv(rax_argv, close_count,  Interpreter::stackElementScale(),
  2003                                 - open_count * Interpreter::stackElementSize);
  2005       // First decide if any actual data are to be moved.
  2006       // We can skip if (a) |keep3| is empty, or (b) the argument list size didn't change.
  2007       // (As it happens, all movements involve an argument list size change.)
  2009       // If there are variable parameters, use dynamic checks to skip around the whole mess.
  2010       Label L_done;
  2011       if (!keep3_count.is_constant()) {
  2012         __ testl(keep3_count.as_register(), keep3_count.as_register());
  2013         __ jcc(Assembler::zero, L_done);
  2015       if (!close_count.is_constant()) {
  2016         __ cmpl(close_count.as_register(), open_count);
  2017         __ jcc(Assembler::equal, L_done);
  2020       if (move_keep3 && fix_arg_base) {
  2021         bool emit_move_down = false, emit_move_up = false, emit_guard = false;
  2022         if (!close_count.is_constant()) {
  2023           emit_move_down = emit_guard = !zero_open_count;
  2024           emit_move_up   = true;
  2025         } else if (open_count != close_count.as_constant()) {
  2026           emit_move_down = (open_count > close_count.as_constant());
  2027           emit_move_up   = !emit_move_down;
  2029         Label L_move_up;
  2030         if (emit_guard) {
  2031           __ cmpl(close_count.as_register(), open_count);
  2032           __ jcc(Assembler::greater, L_move_up);
  2035         if (emit_move_down) {
  2036           // Move arguments down if |+dest+| > |-collect-|
  2037           // (This is rare, except when arguments are retained.)
  2038           // This opens space for the return value.
  2039           if (keep3_count.is_constant()) {
  2040             for (int i = 0; i < keep3_count.as_constant(); i++) {
  2041               __ movptr(rdx_temp, old_argv.plus_disp(i * Interpreter::stackElementSize));
  2042               __ movptr(          new_argv.plus_disp(i * Interpreter::stackElementSize), rdx_temp);
  2044           } else {
  2045             Register rbx_argv_top = rbx_temp;
  2046             __ lea(rbx_argv_top, old_argv.plus_disp(keep3_count, Interpreter::stackElementScale()));
  2047             move_arg_slots_down(_masm,
  2048                                 old_argv,     // beginning of old argv
  2049                                 rbx_argv_top, // end of old argv
  2050                                 close_count,  // distance to move down (must be negative)
  2051                                 rax_argv, rdx_temp);
  2052             // Used argv as an iteration variable; reload from RF.saved_args_base.
  2053             __ movptr(rax_argv, saved_args_base_addr);
  2057         if (emit_guard) {
  2058           __ jmp(L_done);  // assumes emit_move_up is true also
  2059           __ BIND(L_move_up);
  2062         if (emit_move_up) {
  2064           // Move arguments up if |+dest+| < |-collect-|
  2065           // (This is usual, except when |keep3| is empty.)
  2066           // This closes up the space occupied by the now-deleted collect values.
  2067           if (keep3_count.is_constant()) {
  2068             for (int i = keep3_count.as_constant() - 1; i >= 0; i--) {
  2069               __ movptr(rdx_temp, old_argv.plus_disp(i * Interpreter::stackElementSize));
  2070               __ movptr(          new_argv.plus_disp(i * Interpreter::stackElementSize), rdx_temp);
  2072           } else {
  2073             Address argv_top = old_argv.plus_disp(keep3_count, Interpreter::stackElementScale());
  2074             move_arg_slots_up(_masm,
  2075                               rax_argv,     // beginning of old argv
  2076                               argv_top,     // end of old argv
  2077                               close_count,  // distance to move up (must be positive)
  2078                               rbx_temp, rdx_temp);
  2082       __ BIND(L_done);
  2084       if (fix_arg_base) {
  2085         // adjust RF.saved_args_base by adding (close_count - open_count)
  2086         if (!new_argv.is_same_address(Address(rax_argv, 0)))
  2087           __ lea(rax_argv, new_argv);
  2088         __ movptr(saved_args_base_addr, rax_argv);
  2091       if (stomp_dest) {
  2092         // Stomp the return slot, so it doesn't hold garbage.
  2093         // This isn't strictly necessary, but it may help detect bugs.
  2094         int forty_two = RicochetFrame::RETURN_VALUE_PLACEHOLDER;
  2095         __ movptr(Address(rax_argv, keep3_count, Address::times_ptr),
  2096                   (int32_t) forty_two);
  2097         // uses rsi_keep3_count
  2099       BLOCK_COMMENT("} adjust trailing arguments");
  2101       BLOCK_COMMENT("do_recursive_call");
  2102       __ mov(saved_last_sp, rsp);    // set rsi/r13 for callee
  2103       __ pushptr(ExternalAddress(SharedRuntime::ricochet_blob()->bounce_addr()).addr());
  2104       // The globally unique bounce address has two purposes:
  2105       // 1. It helps the JVM recognize this frame (frame::is_ricochet_frame).
  2106       // 2. When returned to, it cuts back the stack and redirects control flow
  2107       //    to the return handler.
  2108       // The return handler will further cut back the stack when it takes
  2109       // down the RF.  Perhaps there is a way to streamline this further.
  2111       // State during recursive call:
  2112       // ... keep1 | dest | dest=42 | keep3 | RF... | collect | bounce_pc |
  2113       __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
  2115       break;
  2118   case _adapter_opt_return_ref:
  2119   case _adapter_opt_return_int:
  2120   case _adapter_opt_return_long:
  2121   case _adapter_opt_return_float:
  2122   case _adapter_opt_return_double:
  2123   case _adapter_opt_return_void:
  2124   case _adapter_opt_return_S0_ref:
  2125   case _adapter_opt_return_S1_ref:
  2126   case _adapter_opt_return_S2_ref:
  2127   case _adapter_opt_return_S3_ref:
  2128   case _adapter_opt_return_S4_ref:
  2129   case _adapter_opt_return_S5_ref:
  2131       BasicType dest_type_constant = ek_adapter_opt_return_type(ek);
  2132       int       dest_slot_constant = ek_adapter_opt_return_slot(ek);
  2134       if (VerifyMethodHandles)  RicochetFrame::verify_clean(_masm);
  2136       if (dest_slot_constant == -1) {
  2137         // The current stub is a general handler for this dest_type.
  2138         // It can be called from _adapter_opt_return_any below.
  2139         // Stash the address in a little table.
  2140         assert((dest_type_constant & CONV_TYPE_MASK) == dest_type_constant, "oob");
  2141         address return_handler = __ pc();
  2142         _adapter_return_handlers[dest_type_constant] = return_handler;
  2143         if (dest_type_constant == T_INT) {
  2144           // do the subword types too
  2145           for (int bt = T_BOOLEAN; bt < T_INT; bt++) {
  2146             if (is_subword_type(BasicType(bt)) &&
  2147                 _adapter_return_handlers[bt] == NULL) {
  2148               _adapter_return_handlers[bt] = return_handler;
  2154       Register rbx_arg_base = rbx_temp;
  2155       assert_different_registers(rax, rdx,  // possibly live return value registers
  2156                                  rdi_temp, rbx_arg_base);
  2158       Address conversion_addr      = RicochetFrame::frame_address(RicochetFrame::conversion_offset_in_bytes());
  2159       Address saved_args_base_addr = RicochetFrame::frame_address(RicochetFrame::saved_args_base_offset_in_bytes());
  2161       __ movptr(rbx_arg_base, saved_args_base_addr);
  2162       RegisterOrConstant dest_slot = dest_slot_constant;
  2163       if (dest_slot_constant == -1) {
  2164         load_conversion_vminfo(_masm, rdi_temp, conversion_addr);
  2165         dest_slot = rdi_temp;
  2167       // Store the result back into the argslot.
  2168       // This code uses the interpreter calling sequence, in which the return value
  2169       // is usually left in the TOS register, as defined by InterpreterMacroAssembler::pop.
  2170       // There are certain irregularities with floating point values, which can be seen
  2171       // in TemplateInterpreterGenerator::generate_return_entry_for.
  2172       move_return_value(_masm, dest_type_constant, Address(rbx_arg_base, dest_slot, Interpreter::stackElementScale()));
  2174       RicochetFrame::leave_ricochet_frame(_masm, rcx_recv, rbx_arg_base, rdx_temp);
  2175       __ push(rdx_temp);  // repush the return PC
  2177       // Load the final target and go.
  2178       if (VerifyMethodHandles)  verify_method_handle(_masm, rcx_recv);
  2179       __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
  2180       __ hlt(); // --------------------
  2181       break;
  2184   case _adapter_opt_return_any:
  2186       if (VerifyMethodHandles)  RicochetFrame::verify_clean(_masm);
  2187       Register rdi_conv = rdi_temp;
  2188       assert_different_registers(rax, rdx,  // possibly live return value registers
  2189                                  rdi_conv, rbx_temp);
  2191       Address conversion_addr = RicochetFrame::frame_address(RicochetFrame::conversion_offset_in_bytes());
  2192       load_conversion_dest_type(_masm, rdi_conv, conversion_addr);
  2193       __ lea(rbx_temp, ExternalAddress((address) &_adapter_return_handlers[0]));
  2194       __ movptr(rbx_temp, Address(rbx_temp, rdi_conv, Address::times_ptr));
  2196 #ifdef ASSERT
  2197       { Label L_badconv;
  2198         __ testptr(rbx_temp, rbx_temp);
  2199         __ jccb(Assembler::zero, L_badconv);
  2200         __ jmp(rbx_temp);
  2201         __ bind(L_badconv);
  2202         __ stop("bad method handle return");
  2204 #else //ASSERT
  2205       __ jmp(rbx_temp);
  2206 #endif //ASSERT
  2207       break;
  2210   case _adapter_opt_spread_0:
  2211   case _adapter_opt_spread_1_ref:
  2212   case _adapter_opt_spread_2_ref:
  2213   case _adapter_opt_spread_3_ref:
  2214   case _adapter_opt_spread_4_ref:
  2215   case _adapter_opt_spread_5_ref:
  2216   case _adapter_opt_spread_ref:
  2217   case _adapter_opt_spread_byte:
  2218   case _adapter_opt_spread_char:
  2219   case _adapter_opt_spread_short:
  2220   case _adapter_opt_spread_int:
  2221   case _adapter_opt_spread_long:
  2222   case _adapter_opt_spread_float:
  2223   case _adapter_opt_spread_double:
  2225       // spread an array out into a group of arguments
  2226       int length_constant = ek_adapter_opt_spread_count(ek);
  2227       bool length_can_be_zero = (length_constant == 0);
  2228       if (length_constant < 0) {
  2229         // some adapters with variable length must handle the zero case
  2230         if (!OptimizeMethodHandles ||
  2231             ek_adapter_opt_spread_type(ek) != T_OBJECT)
  2232           length_can_be_zero = true;
  2235       // find the address of the array argument
  2236       __ movl(rax_argslot, rcx_amh_vmargslot);
  2237       __ lea(rax_argslot, __ argument_address(rax_argslot));
  2239       // grab another temp
  2240       Register rsi_temp = rsi;
  2241       { if (rsi_temp == saved_last_sp)  __ push(saved_last_sp); }
  2242       // (preceding push must be done after argslot address is taken!)
  2243 #define UNPUSH_RSI \
  2244       { if (rsi_temp == saved_last_sp)  __ pop(saved_last_sp); }
  2246       // arx_argslot points both to the array and to the first output arg
  2247       vmarg = Address(rax_argslot, 0);
  2249       // Get the array value.
  2250       Register  rsi_array       = rsi_temp;
  2251       Register  rdx_array_klass = rdx_temp;
  2252       BasicType elem_type = ek_adapter_opt_spread_type(ek);
  2253       int       elem_slots = type2size[elem_type];  // 1 or 2
  2254       int       array_slots = 1;  // array is always a T_OBJECT
  2255       int       length_offset   = arrayOopDesc::length_offset_in_bytes();
  2256       int       elem0_offset    = arrayOopDesc::base_offset_in_bytes(elem_type);
  2257       __ movptr(rsi_array, vmarg);
  2259       Label L_array_is_empty, L_insert_arg_space, L_copy_args, L_args_done;
  2260       if (length_can_be_zero) {
  2261         // handle the null pointer case, if zero is allowed
  2262         Label L_skip;
  2263         if (length_constant < 0) {
  2264           load_conversion_vminfo(_masm, rbx_temp, rcx_amh_conversion);
  2265           __ testl(rbx_temp, rbx_temp);
  2266           __ jcc(Assembler::notZero, L_skip);
  2268         __ testptr(rsi_array, rsi_array);
  2269         __ jcc(Assembler::zero, L_array_is_empty);
  2270         __ bind(L_skip);
  2272       __ null_check(rsi_array, oopDesc::klass_offset_in_bytes());
  2273       __ load_klass(rdx_array_klass, rsi_array);
  2275       // Check the array type.
  2276       Register rbx_klass = rbx_temp;
  2277       __ load_heap_oop(rbx_klass, rcx_amh_argument); // this is a Class object!
  2278       load_klass_from_Class(_masm, rbx_klass);
  2280       Label ok_array_klass, bad_array_klass, bad_array_length;
  2281       __ check_klass_subtype(rdx_array_klass, rbx_klass, rdi_temp, ok_array_klass);
  2282       // If we get here, the type check failed!
  2283       __ jmp(bad_array_klass);
  2284       __ BIND(ok_array_klass);
  2286       // Check length.
  2287       if (length_constant >= 0) {
  2288         __ cmpl(Address(rsi_array, length_offset), length_constant);
  2289       } else {
  2290         Register rbx_vminfo = rbx_temp;
  2291         load_conversion_vminfo(_masm, rbx_vminfo, rcx_amh_conversion);
  2292         __ cmpl(rbx_vminfo, Address(rsi_array, length_offset));
  2294       __ jcc(Assembler::notEqual, bad_array_length);
  2296       Register rdx_argslot_limit = rdx_temp;
  2298       // Array length checks out.  Now insert any required stack slots.
  2299       if (length_constant == -1) {
  2300         // Form a pointer to the end of the affected region.
  2301         __ lea(rdx_argslot_limit, Address(rax_argslot, Interpreter::stackElementSize));
  2302         // 'stack_move' is negative number of words to insert
  2303         // This number already accounts for elem_slots.
  2304         Register rdi_stack_move = rdi_temp;
  2305         load_stack_move(_masm, rdi_stack_move, rcx_recv, true);
  2306         __ cmpptr(rdi_stack_move, 0);
  2307         assert(stack_move_unit() < 0, "else change this comparison");
  2308         __ jcc(Assembler::less, L_insert_arg_space);
  2309         __ jcc(Assembler::equal, L_copy_args);
  2310         // single argument case, with no array movement
  2311         __ BIND(L_array_is_empty);
  2312         remove_arg_slots(_masm, -stack_move_unit() * array_slots,
  2313                          rax_argslot, rbx_temp, rdx_temp);
  2314         __ jmp(L_args_done);  // no spreading to do
  2315         __ BIND(L_insert_arg_space);
  2316         // come here in the usual case, stack_move < 0 (2 or more spread arguments)
  2317         Register rsi_temp = rsi_array;  // spill this
  2318         insert_arg_slots(_masm, rdi_stack_move,
  2319                          rax_argslot, rbx_temp, rsi_temp);
  2320         // reload the array since rsi was killed
  2321         // reload from rdx_argslot_limit since rax_argslot is now decremented
  2322         __ movptr(rsi_array, Address(rdx_argslot_limit, -Interpreter::stackElementSize));
  2323       } else if (length_constant >= 1) {
  2324         int new_slots = (length_constant * elem_slots) - array_slots;
  2325         insert_arg_slots(_masm, new_slots * stack_move_unit(),
  2326                          rax_argslot, rbx_temp, rdx_temp);
  2327       } else if (length_constant == 0) {
  2328         __ BIND(L_array_is_empty);
  2329         remove_arg_slots(_masm, -stack_move_unit() * array_slots,
  2330                          rax_argslot, rbx_temp, rdx_temp);
  2331       } else {
  2332         ShouldNotReachHere();
  2335       // Copy from the array to the new slots.
  2336       // Note: Stack change code preserves integrity of rax_argslot pointer.
  2337       // So even after slot insertions, rax_argslot still points to first argument.
  2338       // Beware:  Arguments that are shallow on the stack are deep in the array,
  2339       // and vice versa.  So a downward-growing stack (the usual) has to be copied
  2340       // elementwise in reverse order from the source array.
  2341       __ BIND(L_copy_args);
  2342       if (length_constant == -1) {
  2343         // [rax_argslot, rdx_argslot_limit) is the area we are inserting into.
  2344         // Array element [0] goes at rdx_argslot_limit[-wordSize].
  2345         Register rsi_source = rsi_array;
  2346         __ lea(rsi_source, Address(rsi_array, elem0_offset));
  2347         Register rdx_fill_ptr = rdx_argslot_limit;
  2348         Label loop;
  2349         __ BIND(loop);
  2350         __ addptr(rdx_fill_ptr, -Interpreter::stackElementSize * elem_slots);
  2351         move_typed_arg(_masm, elem_type, true,
  2352                        Address(rdx_fill_ptr, 0), Address(rsi_source, 0),
  2353                        rbx_temp, rdi_temp);
  2354         __ addptr(rsi_source, type2aelembytes(elem_type));
  2355         __ cmpptr(rdx_fill_ptr, rax_argslot);
  2356         __ jcc(Assembler::greater, loop);
  2357       } else if (length_constant == 0) {
  2358         // nothing to copy
  2359       } else {
  2360         int elem_offset = elem0_offset;
  2361         int slot_offset = length_constant * Interpreter::stackElementSize;
  2362         for (int index = 0; index < length_constant; index++) {
  2363           slot_offset -= Interpreter::stackElementSize * elem_slots;  // fill backward
  2364           move_typed_arg(_masm, elem_type, true,
  2365                          Address(rax_argslot, slot_offset), Address(rsi_array, elem_offset),
  2366                          rbx_temp, rdi_temp);
  2367           elem_offset += type2aelembytes(elem_type);
  2370       __ BIND(L_args_done);
  2372       // Arguments are spread.  Move to next method handle.
  2373       UNPUSH_RSI;
  2374       __ load_heap_oop(rcx_recv, rcx_mh_vmtarget);
  2375       __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
  2377       __ bind(bad_array_klass);
  2378       UNPUSH_RSI;
  2379       assert(!vmarg.uses(rarg2_required), "must be different registers");
  2380       __ load_heap_oop( rarg2_required, Address(rdx_array_klass, java_mirror_offset));  // required type
  2381       __ movptr(        rarg1_actual,   vmarg);                                         // bad array
  2382       __ movl(          rarg0_code,     (int) Bytecodes::_aaload);                      // who is complaining?
  2383       __ jump(ExternalAddress(from_interpreted_entry(_raise_exception)));
  2385       __ bind(bad_array_length);
  2386       UNPUSH_RSI;
  2387       assert(!vmarg.uses(rarg2_required), "must be different registers");
  2388       __ mov(    rarg2_required, rcx_recv);                       // AMH requiring a certain length
  2389       __ movptr( rarg1_actual,   vmarg);                          // bad array
  2390       __ movl(   rarg0_code,     (int) Bytecodes::_arraylength);  // who is complaining?
  2391       __ jump(ExternalAddress(from_interpreted_entry(_raise_exception)));
  2392 #undef UNPUSH_RSI
  2394       break;
  2397   default:
  2398     // do not require all platforms to recognize all adapter types
  2399     __ nop();
  2400     return;
  2402   __ hlt();
  2404   address me_cookie = MethodHandleEntry::start_compiled_entry(_masm, interp_entry);
  2405   __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
  2407   init_entry(ek, MethodHandleEntry::finish_compiled_entry(_masm, me_cookie));

mercurial