src/cpu/x86/vm/sharedRuntime_x86_64.cpp

Thu, 03 Nov 2011 04:12:49 -0700

author
twisti
date
Thu, 03 Nov 2011 04:12:49 -0700
changeset 3252
448691f285a5
parent 3130
5432047c7db7
child 3253
1feb272af3a7
permissions
-rw-r--r--

7106944: assert(_pc == *pc_addr) failed may be too strong
Reviewed-by: kvn, never

     1 /*
     2  * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "asm/assembler.hpp"
    27 #include "assembler_x86.inline.hpp"
    28 #include "code/debugInfoRec.hpp"
    29 #include "code/icBuffer.hpp"
    30 #include "code/vtableStubs.hpp"
    31 #include "interpreter/interpreter.hpp"
    32 #include "oops/compiledICHolderOop.hpp"
    33 #include "prims/jvmtiRedefineClassesTrace.hpp"
    34 #include "runtime/sharedRuntime.hpp"
    35 #include "runtime/vframeArray.hpp"
    36 #include "vmreg_x86.inline.hpp"
    37 #ifdef COMPILER1
    38 #include "c1/c1_Runtime1.hpp"
    39 #endif
    40 #ifdef COMPILER2
    41 #include "opto/runtime.hpp"
    42 #endif
    44 #define __ masm->
    46 const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size;
    48 class SimpleRuntimeFrame {
    50   public:
    52   // Most of the runtime stubs have this simple frame layout.
    53   // This class exists to make the layout shared in one place.
    54   // Offsets are for compiler stack slots, which are jints.
    55   enum layout {
    56     // The frame sender code expects that rbp will be in the "natural" place and
    57     // will override any oopMap setting for it. We must therefore force the layout
    58     // so that it agrees with the frame sender code.
    59     rbp_off = frame::arg_reg_save_area_bytes/BytesPerInt,
    60     rbp_off2,
    61     return_off, return_off2,
    62     framesize
    63   };
    64 };
    66 class RegisterSaver {
    67   // Capture info about frame layout.  Layout offsets are in jint
    68   // units because compiler frame slots are jints.
    69 #define DEF_XMM_OFFS(regnum) xmm ## regnum ## _off = xmm_off + (regnum)*16/BytesPerInt, xmm ## regnum ## H_off
    70   enum layout {
    71     fpu_state_off = frame::arg_reg_save_area_bytes/BytesPerInt, // fxsave save area
    72     xmm_off       = fpu_state_off + 160/BytesPerInt,            // offset in fxsave save area
    73     DEF_XMM_OFFS(0),
    74     DEF_XMM_OFFS(1),
    75     DEF_XMM_OFFS(2),
    76     DEF_XMM_OFFS(3),
    77     DEF_XMM_OFFS(4),
    78     DEF_XMM_OFFS(5),
    79     DEF_XMM_OFFS(6),
    80     DEF_XMM_OFFS(7),
    81     DEF_XMM_OFFS(8),
    82     DEF_XMM_OFFS(9),
    83     DEF_XMM_OFFS(10),
    84     DEF_XMM_OFFS(11),
    85     DEF_XMM_OFFS(12),
    86     DEF_XMM_OFFS(13),
    87     DEF_XMM_OFFS(14),
    88     DEF_XMM_OFFS(15),
    89     fpu_state_end = fpu_state_off + ((FPUStateSizeInWords-1)*wordSize / BytesPerInt),
    90     fpu_stateH_end,
    91     r15_off, r15H_off,
    92     r14_off, r14H_off,
    93     r13_off, r13H_off,
    94     r12_off, r12H_off,
    95     r11_off, r11H_off,
    96     r10_off, r10H_off,
    97     r9_off,  r9H_off,
    98     r8_off,  r8H_off,
    99     rdi_off, rdiH_off,
   100     rsi_off, rsiH_off,
   101     ignore_off, ignoreH_off,  // extra copy of rbp
   102     rsp_off, rspH_off,
   103     rbx_off, rbxH_off,
   104     rdx_off, rdxH_off,
   105     rcx_off, rcxH_off,
   106     rax_off, raxH_off,
   107     // 16-byte stack alignment fill word: see MacroAssembler::push/pop_IU_state
   108     align_off, alignH_off,
   109     flags_off, flagsH_off,
   110     // The frame sender code expects that rbp will be in the "natural" place and
   111     // will override any oopMap setting for it. We must therefore force the layout
   112     // so that it agrees with the frame sender code.
   113     rbp_off, rbpH_off,        // copy of rbp we will restore
   114     return_off, returnH_off,  // slot for return address
   115     reg_save_size             // size in compiler stack slots
   116   };
   118  public:
   119   static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words);
   120   static void restore_live_registers(MacroAssembler* masm);
   122   // Offsets into the register save area
   123   // Used by deoptimization when it is managing result register
   124   // values on its own
   126   static int rax_offset_in_bytes(void)    { return BytesPerInt * rax_off; }
   127   static int rdx_offset_in_bytes(void)    { return BytesPerInt * rdx_off; }
   128   static int rbx_offset_in_bytes(void)    { return BytesPerInt * rbx_off; }
   129   static int xmm0_offset_in_bytes(void)   { return BytesPerInt * xmm0_off; }
   130   static int return_offset_in_bytes(void) { return BytesPerInt * return_off; }
   132   // During deoptimization only the result registers need to be restored,
   133   // all the other values have already been extracted.
   134   static void restore_result_registers(MacroAssembler* masm);
   135 };
   137 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words) {
   139   // Always make the frame size 16-byte aligned
   140   int frame_size_in_bytes = round_to(additional_frame_words*wordSize +
   141                                      reg_save_size*BytesPerInt, 16);
   142   // OopMap frame size is in compiler stack slots (jint's) not bytes or words
   143   int frame_size_in_slots = frame_size_in_bytes / BytesPerInt;
   144   // The caller will allocate additional_frame_words
   145   int additional_frame_slots = additional_frame_words*wordSize / BytesPerInt;
   146   // CodeBlob frame size is in words.
   147   int frame_size_in_words = frame_size_in_bytes / wordSize;
   148   *total_frame_words = frame_size_in_words;
   150   // Save registers, fpu state, and flags.
   151   // We assume caller has already pushed the return address onto the
   152   // stack, so rsp is 8-byte aligned here.
   153   // We push rpb twice in this sequence because we want the real rbp
   154   // to be under the return like a normal enter.
   156   __ enter();          // rsp becomes 16-byte aligned here
   157   __ push_CPU_state(); // Push a multiple of 16 bytes
   158   if (frame::arg_reg_save_area_bytes != 0) {
   159     // Allocate argument register save area
   160     __ subptr(rsp, frame::arg_reg_save_area_bytes);
   161   }
   163   // Set an oopmap for the call site.  This oopmap will map all
   164   // oop-registers and debug-info registers as callee-saved.  This
   165   // will allow deoptimization at this safepoint to find all possible
   166   // debug-info recordings, as well as let GC find all oops.
   168   OopMapSet *oop_maps = new OopMapSet();
   169   OopMap* map = new OopMap(frame_size_in_slots, 0);
   170   map->set_callee_saved(VMRegImpl::stack2reg( rax_off  + additional_frame_slots), rax->as_VMReg());
   171   map->set_callee_saved(VMRegImpl::stack2reg( rcx_off  + additional_frame_slots), rcx->as_VMReg());
   172   map->set_callee_saved(VMRegImpl::stack2reg( rdx_off  + additional_frame_slots), rdx->as_VMReg());
   173   map->set_callee_saved(VMRegImpl::stack2reg( rbx_off  + additional_frame_slots), rbx->as_VMReg());
   174   // rbp location is known implicitly by the frame sender code, needs no oopmap
   175   // and the location where rbp was saved by is ignored
   176   map->set_callee_saved(VMRegImpl::stack2reg( rsi_off  + additional_frame_slots), rsi->as_VMReg());
   177   map->set_callee_saved(VMRegImpl::stack2reg( rdi_off  + additional_frame_slots), rdi->as_VMReg());
   178   map->set_callee_saved(VMRegImpl::stack2reg( r8_off   + additional_frame_slots), r8->as_VMReg());
   179   map->set_callee_saved(VMRegImpl::stack2reg( r9_off   + additional_frame_slots), r9->as_VMReg());
   180   map->set_callee_saved(VMRegImpl::stack2reg( r10_off  + additional_frame_slots), r10->as_VMReg());
   181   map->set_callee_saved(VMRegImpl::stack2reg( r11_off  + additional_frame_slots), r11->as_VMReg());
   182   map->set_callee_saved(VMRegImpl::stack2reg( r12_off  + additional_frame_slots), r12->as_VMReg());
   183   map->set_callee_saved(VMRegImpl::stack2reg( r13_off  + additional_frame_slots), r13->as_VMReg());
   184   map->set_callee_saved(VMRegImpl::stack2reg( r14_off  + additional_frame_slots), r14->as_VMReg());
   185   map->set_callee_saved(VMRegImpl::stack2reg( r15_off  + additional_frame_slots), r15->as_VMReg());
   186   map->set_callee_saved(VMRegImpl::stack2reg(xmm0_off  + additional_frame_slots), xmm0->as_VMReg());
   187   map->set_callee_saved(VMRegImpl::stack2reg(xmm1_off  + additional_frame_slots), xmm1->as_VMReg());
   188   map->set_callee_saved(VMRegImpl::stack2reg(xmm2_off  + additional_frame_slots), xmm2->as_VMReg());
   189   map->set_callee_saved(VMRegImpl::stack2reg(xmm3_off  + additional_frame_slots), xmm3->as_VMReg());
   190   map->set_callee_saved(VMRegImpl::stack2reg(xmm4_off  + additional_frame_slots), xmm4->as_VMReg());
   191   map->set_callee_saved(VMRegImpl::stack2reg(xmm5_off  + additional_frame_slots), xmm5->as_VMReg());
   192   map->set_callee_saved(VMRegImpl::stack2reg(xmm6_off  + additional_frame_slots), xmm6->as_VMReg());
   193   map->set_callee_saved(VMRegImpl::stack2reg(xmm7_off  + additional_frame_slots), xmm7->as_VMReg());
   194   map->set_callee_saved(VMRegImpl::stack2reg(xmm8_off  + additional_frame_slots), xmm8->as_VMReg());
   195   map->set_callee_saved(VMRegImpl::stack2reg(xmm9_off  + additional_frame_slots), xmm9->as_VMReg());
   196   map->set_callee_saved(VMRegImpl::stack2reg(xmm10_off + additional_frame_slots), xmm10->as_VMReg());
   197   map->set_callee_saved(VMRegImpl::stack2reg(xmm11_off + additional_frame_slots), xmm11->as_VMReg());
   198   map->set_callee_saved(VMRegImpl::stack2reg(xmm12_off + additional_frame_slots), xmm12->as_VMReg());
   199   map->set_callee_saved(VMRegImpl::stack2reg(xmm13_off + additional_frame_slots), xmm13->as_VMReg());
   200   map->set_callee_saved(VMRegImpl::stack2reg(xmm14_off + additional_frame_slots), xmm14->as_VMReg());
   201   map->set_callee_saved(VMRegImpl::stack2reg(xmm15_off + additional_frame_slots), xmm15->as_VMReg());
   203   // %%% These should all be a waste but we'll keep things as they were for now
   204   if (true) {
   205     map->set_callee_saved(VMRegImpl::stack2reg( raxH_off  + additional_frame_slots),
   206                           rax->as_VMReg()->next());
   207     map->set_callee_saved(VMRegImpl::stack2reg( rcxH_off  + additional_frame_slots),
   208                           rcx->as_VMReg()->next());
   209     map->set_callee_saved(VMRegImpl::stack2reg( rdxH_off  + additional_frame_slots),
   210                           rdx->as_VMReg()->next());
   211     map->set_callee_saved(VMRegImpl::stack2reg( rbxH_off  + additional_frame_slots),
   212                           rbx->as_VMReg()->next());
   213     // rbp location is known implicitly by the frame sender code, needs no oopmap
   214     map->set_callee_saved(VMRegImpl::stack2reg( rsiH_off  + additional_frame_slots),
   215                           rsi->as_VMReg()->next());
   216     map->set_callee_saved(VMRegImpl::stack2reg( rdiH_off  + additional_frame_slots),
   217                           rdi->as_VMReg()->next());
   218     map->set_callee_saved(VMRegImpl::stack2reg( r8H_off   + additional_frame_slots),
   219                           r8->as_VMReg()->next());
   220     map->set_callee_saved(VMRegImpl::stack2reg( r9H_off   + additional_frame_slots),
   221                           r9->as_VMReg()->next());
   222     map->set_callee_saved(VMRegImpl::stack2reg( r10H_off  + additional_frame_slots),
   223                           r10->as_VMReg()->next());
   224     map->set_callee_saved(VMRegImpl::stack2reg( r11H_off  + additional_frame_slots),
   225                           r11->as_VMReg()->next());
   226     map->set_callee_saved(VMRegImpl::stack2reg( r12H_off  + additional_frame_slots),
   227                           r12->as_VMReg()->next());
   228     map->set_callee_saved(VMRegImpl::stack2reg( r13H_off  + additional_frame_slots),
   229                           r13->as_VMReg()->next());
   230     map->set_callee_saved(VMRegImpl::stack2reg( r14H_off  + additional_frame_slots),
   231                           r14->as_VMReg()->next());
   232     map->set_callee_saved(VMRegImpl::stack2reg( r15H_off  + additional_frame_slots),
   233                           r15->as_VMReg()->next());
   234     map->set_callee_saved(VMRegImpl::stack2reg(xmm0H_off  + additional_frame_slots),
   235                           xmm0->as_VMReg()->next());
   236     map->set_callee_saved(VMRegImpl::stack2reg(xmm1H_off  + additional_frame_slots),
   237                           xmm1->as_VMReg()->next());
   238     map->set_callee_saved(VMRegImpl::stack2reg(xmm2H_off  + additional_frame_slots),
   239                           xmm2->as_VMReg()->next());
   240     map->set_callee_saved(VMRegImpl::stack2reg(xmm3H_off  + additional_frame_slots),
   241                           xmm3->as_VMReg()->next());
   242     map->set_callee_saved(VMRegImpl::stack2reg(xmm4H_off  + additional_frame_slots),
   243                           xmm4->as_VMReg()->next());
   244     map->set_callee_saved(VMRegImpl::stack2reg(xmm5H_off  + additional_frame_slots),
   245                           xmm5->as_VMReg()->next());
   246     map->set_callee_saved(VMRegImpl::stack2reg(xmm6H_off  + additional_frame_slots),
   247                           xmm6->as_VMReg()->next());
   248     map->set_callee_saved(VMRegImpl::stack2reg(xmm7H_off  + additional_frame_slots),
   249                           xmm7->as_VMReg()->next());
   250     map->set_callee_saved(VMRegImpl::stack2reg(xmm8H_off  + additional_frame_slots),
   251                           xmm8->as_VMReg()->next());
   252     map->set_callee_saved(VMRegImpl::stack2reg(xmm9H_off  + additional_frame_slots),
   253                           xmm9->as_VMReg()->next());
   254     map->set_callee_saved(VMRegImpl::stack2reg(xmm10H_off + additional_frame_slots),
   255                           xmm10->as_VMReg()->next());
   256     map->set_callee_saved(VMRegImpl::stack2reg(xmm11H_off + additional_frame_slots),
   257                           xmm11->as_VMReg()->next());
   258     map->set_callee_saved(VMRegImpl::stack2reg(xmm12H_off + additional_frame_slots),
   259                           xmm12->as_VMReg()->next());
   260     map->set_callee_saved(VMRegImpl::stack2reg(xmm13H_off + additional_frame_slots),
   261                           xmm13->as_VMReg()->next());
   262     map->set_callee_saved(VMRegImpl::stack2reg(xmm14H_off + additional_frame_slots),
   263                           xmm14->as_VMReg()->next());
   264     map->set_callee_saved(VMRegImpl::stack2reg(xmm15H_off + additional_frame_slots),
   265                           xmm15->as_VMReg()->next());
   266   }
   268   return map;
   269 }
   271 void RegisterSaver::restore_live_registers(MacroAssembler* masm) {
   272   if (frame::arg_reg_save_area_bytes != 0) {
   273     // Pop arg register save area
   274     __ addptr(rsp, frame::arg_reg_save_area_bytes);
   275   }
   276   // Recover CPU state
   277   __ pop_CPU_state();
   278   // Get the rbp described implicitly by the calling convention (no oopMap)
   279   __ pop(rbp);
   280 }
   282 void RegisterSaver::restore_result_registers(MacroAssembler* masm) {
   284   // Just restore result register. Only used by deoptimization. By
   285   // now any callee save register that needs to be restored to a c2
   286   // caller of the deoptee has been extracted into the vframeArray
   287   // and will be stuffed into the c2i adapter we create for later
   288   // restoration so only result registers need to be restored here.
   290   // Restore fp result register
   291   __ movdbl(xmm0, Address(rsp, xmm0_offset_in_bytes()));
   292   // Restore integer result register
   293   __ movptr(rax, Address(rsp, rax_offset_in_bytes()));
   294   __ movptr(rdx, Address(rsp, rdx_offset_in_bytes()));
   296   // Pop all of the register save are off the stack except the return address
   297   __ addptr(rsp, return_offset_in_bytes());
   298 }
   300 // The java_calling_convention describes stack locations as ideal slots on
   301 // a frame with no abi restrictions. Since we must observe abi restrictions
   302 // (like the placement of the register window) the slots must be biased by
   303 // the following value.
   304 static int reg2offset_in(VMReg r) {
   305   // Account for saved rbp and return address
   306   // This should really be in_preserve_stack_slots
   307   return (r->reg2stack() + 4) * VMRegImpl::stack_slot_size;
   308 }
   310 static int reg2offset_out(VMReg r) {
   311   return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
   312 }
   314 // ---------------------------------------------------------------------------
   315 // Read the array of BasicTypes from a signature, and compute where the
   316 // arguments should go.  Values in the VMRegPair regs array refer to 4-byte
   317 // quantities.  Values less than VMRegImpl::stack0 are registers, those above
   318 // refer to 4-byte stack slots.  All stack slots are based off of the stack pointer
   319 // as framesizes are fixed.
   320 // VMRegImpl::stack0 refers to the first slot 0(sp).
   321 // and VMRegImpl::stack0+1 refers to the memory word 4-byes higher.  Register
   322 // up to RegisterImpl::number_of_registers) are the 64-bit
   323 // integer registers.
   325 // Note: the INPUTS in sig_bt are in units of Java argument words, which are
   326 // either 32-bit or 64-bit depending on the build.  The OUTPUTS are in 32-bit
   327 // units regardless of build. Of course for i486 there is no 64 bit build
   329 // The Java calling convention is a "shifted" version of the C ABI.
   330 // By skipping the first C ABI register we can call non-static jni methods
   331 // with small numbers of arguments without having to shuffle the arguments
   332 // at all. Since we control the java ABI we ought to at least get some
   333 // advantage out of it.
   335 int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
   336                                            VMRegPair *regs,
   337                                            int total_args_passed,
   338                                            int is_outgoing) {
   340   // Create the mapping between argument positions and
   341   // registers.
   342   static const Register INT_ArgReg[Argument::n_int_register_parameters_j] = {
   343     j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5
   344   };
   345   static const XMMRegister FP_ArgReg[Argument::n_float_register_parameters_j] = {
   346     j_farg0, j_farg1, j_farg2, j_farg3,
   347     j_farg4, j_farg5, j_farg6, j_farg7
   348   };
   351   uint int_args = 0;
   352   uint fp_args = 0;
   353   uint stk_args = 0; // inc by 2 each time
   355   for (int i = 0; i < total_args_passed; i++) {
   356     switch (sig_bt[i]) {
   357     case T_BOOLEAN:
   358     case T_CHAR:
   359     case T_BYTE:
   360     case T_SHORT:
   361     case T_INT:
   362       if (int_args < Argument::n_int_register_parameters_j) {
   363         regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
   364       } else {
   365         regs[i].set1(VMRegImpl::stack2reg(stk_args));
   366         stk_args += 2;
   367       }
   368       break;
   369     case T_VOID:
   370       // halves of T_LONG or T_DOUBLE
   371       assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
   372       regs[i].set_bad();
   373       break;
   374     case T_LONG:
   375       assert(sig_bt[i + 1] == T_VOID, "expecting half");
   376       // fall through
   377     case T_OBJECT:
   378     case T_ARRAY:
   379     case T_ADDRESS:
   380       if (int_args < Argument::n_int_register_parameters_j) {
   381         regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
   382       } else {
   383         regs[i].set2(VMRegImpl::stack2reg(stk_args));
   384         stk_args += 2;
   385       }
   386       break;
   387     case T_FLOAT:
   388       if (fp_args < Argument::n_float_register_parameters_j) {
   389         regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
   390       } else {
   391         regs[i].set1(VMRegImpl::stack2reg(stk_args));
   392         stk_args += 2;
   393       }
   394       break;
   395     case T_DOUBLE:
   396       assert(sig_bt[i + 1] == T_VOID, "expecting half");
   397       if (fp_args < Argument::n_float_register_parameters_j) {
   398         regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
   399       } else {
   400         regs[i].set2(VMRegImpl::stack2reg(stk_args));
   401         stk_args += 2;
   402       }
   403       break;
   404     default:
   405       ShouldNotReachHere();
   406       break;
   407     }
   408   }
   410   return round_to(stk_args, 2);
   411 }
   413 // Patch the callers callsite with entry to compiled code if it exists.
   414 static void patch_callers_callsite(MacroAssembler *masm) {
   415   Label L;
   416   __ verify_oop(rbx);
   417   __ cmpptr(Address(rbx, in_bytes(methodOopDesc::code_offset())), (int32_t)NULL_WORD);
   418   __ jcc(Assembler::equal, L);
   420   // Save the current stack pointer
   421   __ mov(r13, rsp);
   422   // Schedule the branch target address early.
   423   // Call into the VM to patch the caller, then jump to compiled callee
   424   // rax isn't live so capture return address while we easily can
   425   __ movptr(rax, Address(rsp, 0));
   427   // align stack so push_CPU_state doesn't fault
   428   __ andptr(rsp, -(StackAlignmentInBytes));
   429   __ push_CPU_state();
   432   __ verify_oop(rbx);
   433   // VM needs caller's callsite
   434   // VM needs target method
   435   // This needs to be a long call since we will relocate this adapter to
   436   // the codeBuffer and it may not reach
   438   // Allocate argument register save area
   439   if (frame::arg_reg_save_area_bytes != 0) {
   440     __ subptr(rsp, frame::arg_reg_save_area_bytes);
   441   }
   442   __ mov(c_rarg0, rbx);
   443   __ mov(c_rarg1, rax);
   444   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)));
   446   // De-allocate argument register save area
   447   if (frame::arg_reg_save_area_bytes != 0) {
   448     __ addptr(rsp, frame::arg_reg_save_area_bytes);
   449   }
   451   __ pop_CPU_state();
   452   // restore sp
   453   __ mov(rsp, r13);
   454   __ bind(L);
   455 }
   458 static void gen_c2i_adapter(MacroAssembler *masm,
   459                             int total_args_passed,
   460                             int comp_args_on_stack,
   461                             const BasicType *sig_bt,
   462                             const VMRegPair *regs,
   463                             Label& skip_fixup) {
   464   // Before we get into the guts of the C2I adapter, see if we should be here
   465   // at all.  We've come from compiled code and are attempting to jump to the
   466   // interpreter, which means the caller made a static call to get here
   467   // (vcalls always get a compiled target if there is one).  Check for a
   468   // compiled target.  If there is one, we need to patch the caller's call.
   469   patch_callers_callsite(masm);
   471   __ bind(skip_fixup);
   473   // Since all args are passed on the stack, total_args_passed *
   474   // Interpreter::stackElementSize is the space we need. Plus 1 because
   475   // we also account for the return address location since
   476   // we store it first rather than hold it in rax across all the shuffling
   478   int extraspace = (total_args_passed * Interpreter::stackElementSize) + wordSize;
   480   // stack is aligned, keep it that way
   481   extraspace = round_to(extraspace, 2*wordSize);
   483   // Get return address
   484   __ pop(rax);
   486   // set senderSP value
   487   __ mov(r13, rsp);
   489   __ subptr(rsp, extraspace);
   491   // Store the return address in the expected location
   492   __ movptr(Address(rsp, 0), rax);
   494   // Now write the args into the outgoing interpreter space
   495   for (int i = 0; i < total_args_passed; i++) {
   496     if (sig_bt[i] == T_VOID) {
   497       assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
   498       continue;
   499     }
   501     // offset to start parameters
   502     int st_off   = (total_args_passed - i) * Interpreter::stackElementSize;
   503     int next_off = st_off - Interpreter::stackElementSize;
   505     // Say 4 args:
   506     // i   st_off
   507     // 0   32 T_LONG
   508     // 1   24 T_VOID
   509     // 2   16 T_OBJECT
   510     // 3    8 T_BOOL
   511     // -    0 return address
   512     //
   513     // However to make thing extra confusing. Because we can fit a long/double in
   514     // a single slot on a 64 bt vm and it would be silly to break them up, the interpreter
   515     // leaves one slot empty and only stores to a single slot. In this case the
   516     // slot that is occupied is the T_VOID slot. See I said it was confusing.
   518     VMReg r_1 = regs[i].first();
   519     VMReg r_2 = regs[i].second();
   520     if (!r_1->is_valid()) {
   521       assert(!r_2->is_valid(), "");
   522       continue;
   523     }
   524     if (r_1->is_stack()) {
   525       // memory to memory use rax
   526       int ld_off = r_1->reg2stack() * VMRegImpl::stack_slot_size + extraspace;
   527       if (!r_2->is_valid()) {
   528         // sign extend??
   529         __ movl(rax, Address(rsp, ld_off));
   530         __ movptr(Address(rsp, st_off), rax);
   532       } else {
   534         __ movq(rax, Address(rsp, ld_off));
   536         // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
   537         // T_DOUBLE and T_LONG use two slots in the interpreter
   538         if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
   539           // ld_off == LSW, ld_off+wordSize == MSW
   540           // st_off == MSW, next_off == LSW
   541           __ movq(Address(rsp, next_off), rax);
   542 #ifdef ASSERT
   543           // Overwrite the unused slot with known junk
   544           __ mov64(rax, CONST64(0xdeadffffdeadaaaa));
   545           __ movptr(Address(rsp, st_off), rax);
   546 #endif /* ASSERT */
   547         } else {
   548           __ movq(Address(rsp, st_off), rax);
   549         }
   550       }
   551     } else if (r_1->is_Register()) {
   552       Register r = r_1->as_Register();
   553       if (!r_2->is_valid()) {
   554         // must be only an int (or less ) so move only 32bits to slot
   555         // why not sign extend??
   556         __ movl(Address(rsp, st_off), r);
   557       } else {
   558         // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
   559         // T_DOUBLE and T_LONG use two slots in the interpreter
   560         if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
   561           // long/double in gpr
   562 #ifdef ASSERT
   563           // Overwrite the unused slot with known junk
   564           __ mov64(rax, CONST64(0xdeadffffdeadaaab));
   565           __ movptr(Address(rsp, st_off), rax);
   566 #endif /* ASSERT */
   567           __ movq(Address(rsp, next_off), r);
   568         } else {
   569           __ movptr(Address(rsp, st_off), r);
   570         }
   571       }
   572     } else {
   573       assert(r_1->is_XMMRegister(), "");
   574       if (!r_2->is_valid()) {
   575         // only a float use just part of the slot
   576         __ movflt(Address(rsp, st_off), r_1->as_XMMRegister());
   577       } else {
   578 #ifdef ASSERT
   579         // Overwrite the unused slot with known junk
   580         __ mov64(rax, CONST64(0xdeadffffdeadaaac));
   581         __ movptr(Address(rsp, st_off), rax);
   582 #endif /* ASSERT */
   583         __ movdbl(Address(rsp, next_off), r_1->as_XMMRegister());
   584       }
   585     }
   586   }
   588   // Schedule the branch target address early.
   589   __ movptr(rcx, Address(rbx, in_bytes(methodOopDesc::interpreter_entry_offset())));
   590   __ jmp(rcx);
   591 }
   593 static void gen_i2c_adapter(MacroAssembler *masm,
   594                             int total_args_passed,
   595                             int comp_args_on_stack,
   596                             const BasicType *sig_bt,
   597                             const VMRegPair *regs) {
   599   // Note: r13 contains the senderSP on entry. We must preserve it since
   600   // we may do a i2c -> c2i transition if we lose a race where compiled
   601   // code goes non-entrant while we get args ready.
   602   // In addition we use r13 to locate all the interpreter args as
   603   // we must align the stack to 16 bytes on an i2c entry else we
   604   // lose alignment we expect in all compiled code and register
   605   // save code can segv when fxsave instructions find improperly
   606   // aligned stack pointer.
   608   // Pick up the return address
   609   __ movptr(rax, Address(rsp, 0));
   611   // Must preserve original SP for loading incoming arguments because
   612   // we need to align the outgoing SP for compiled code.
   613   __ movptr(r11, rsp);
   615   // Cut-out for having no stack args.  Since up to 2 int/oop args are passed
   616   // in registers, we will occasionally have no stack args.
   617   int comp_words_on_stack = 0;
   618   if (comp_args_on_stack) {
   619     // Sig words on the stack are greater-than VMRegImpl::stack0.  Those in
   620     // registers are below.  By subtracting stack0, we either get a negative
   621     // number (all values in registers) or the maximum stack slot accessed.
   623     // Convert 4-byte c2 stack slots to words.
   624     comp_words_on_stack = round_to(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
   625     // Round up to miminum stack alignment, in wordSize
   626     comp_words_on_stack = round_to(comp_words_on_stack, 2);
   627     __ subptr(rsp, comp_words_on_stack * wordSize);
   628   }
   631   // Ensure compiled code always sees stack at proper alignment
   632   __ andptr(rsp, -16);
   634   // push the return address and misalign the stack that youngest frame always sees
   635   // as far as the placement of the call instruction
   636   __ push(rax);
   638   // Put saved SP in another register
   639   const Register saved_sp = rax;
   640   __ movptr(saved_sp, r11);
   642   // Will jump to the compiled code just as if compiled code was doing it.
   643   // Pre-load the register-jump target early, to schedule it better.
   644   __ movptr(r11, Address(rbx, in_bytes(methodOopDesc::from_compiled_offset())));
   646   // Now generate the shuffle code.  Pick up all register args and move the
   647   // rest through the floating point stack top.
   648   for (int i = 0; i < total_args_passed; i++) {
   649     if (sig_bt[i] == T_VOID) {
   650       // Longs and doubles are passed in native word order, but misaligned
   651       // in the 32-bit build.
   652       assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
   653       continue;
   654     }
   656     // Pick up 0, 1 or 2 words from SP+offset.
   658     assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
   659             "scrambled load targets?");
   660     // Load in argument order going down.
   661     int ld_off = (total_args_passed - i)*Interpreter::stackElementSize;
   662     // Point to interpreter value (vs. tag)
   663     int next_off = ld_off - Interpreter::stackElementSize;
   664     //
   665     //
   666     //
   667     VMReg r_1 = regs[i].first();
   668     VMReg r_2 = regs[i].second();
   669     if (!r_1->is_valid()) {
   670       assert(!r_2->is_valid(), "");
   671       continue;
   672     }
   673     if (r_1->is_stack()) {
   674       // Convert stack slot to an SP offset (+ wordSize to account for return address )
   675       int st_off = regs[i].first()->reg2stack()*VMRegImpl::stack_slot_size + wordSize;
   677       // We can use r13 as a temp here because compiled code doesn't need r13 as an input
   678       // and if we end up going thru a c2i because of a miss a reasonable value of r13
   679       // will be generated.
   680       if (!r_2->is_valid()) {
   681         // sign extend???
   682         __ movl(r13, Address(saved_sp, ld_off));
   683         __ movptr(Address(rsp, st_off), r13);
   684       } else {
   685         //
   686         // We are using two optoregs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE
   687         // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case
   688         // So we must adjust where to pick up the data to match the interpreter.
   689         //
   690         // Interpreter local[n] == MSW, local[n+1] == LSW however locals
   691         // are accessed as negative so LSW is at LOW address
   693         // ld_off is MSW so get LSW
   694         const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
   695                            next_off : ld_off;
   696         __ movq(r13, Address(saved_sp, offset));
   697         // st_off is LSW (i.e. reg.first())
   698         __ movq(Address(rsp, st_off), r13);
   699       }
   700     } else if (r_1->is_Register()) {  // Register argument
   701       Register r = r_1->as_Register();
   702       assert(r != rax, "must be different");
   703       if (r_2->is_valid()) {
   704         //
   705         // We are using two VMRegs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE
   706         // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case
   707         // So we must adjust where to pick up the data to match the interpreter.
   709         const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
   710                            next_off : ld_off;
   712         // this can be a misaligned move
   713         __ movq(r, Address(saved_sp, offset));
   714       } else {
   715         // sign extend and use a full word?
   716         __ movl(r, Address(saved_sp, ld_off));
   717       }
   718     } else {
   719       if (!r_2->is_valid()) {
   720         __ movflt(r_1->as_XMMRegister(), Address(saved_sp, ld_off));
   721       } else {
   722         __ movdbl(r_1->as_XMMRegister(), Address(saved_sp, next_off));
   723       }
   724     }
   725   }
   727   // 6243940 We might end up in handle_wrong_method if
   728   // the callee is deoptimized as we race thru here. If that
   729   // happens we don't want to take a safepoint because the
   730   // caller frame will look interpreted and arguments are now
   731   // "compiled" so it is much better to make this transition
   732   // invisible to the stack walking code. Unfortunately if
   733   // we try and find the callee by normal means a safepoint
   734   // is possible. So we stash the desired callee in the thread
   735   // and the vm will find there should this case occur.
   737   __ movptr(Address(r15_thread, JavaThread::callee_target_offset()), rbx);
   739   // put methodOop where a c2i would expect should we end up there
   740   // only needed becaus eof c2 resolve stubs return methodOop as a result in
   741   // rax
   742   __ mov(rax, rbx);
   743   __ jmp(r11);
   744 }
   746 // ---------------------------------------------------------------
   747 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
   748                                                             int total_args_passed,
   749                                                             int comp_args_on_stack,
   750                                                             const BasicType *sig_bt,
   751                                                             const VMRegPair *regs,
   752                                                             AdapterFingerPrint* fingerprint) {
   753   address i2c_entry = __ pc();
   755   gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);
   757   // -------------------------------------------------------------------------
   758   // Generate a C2I adapter.  On entry we know rbx holds the methodOop during calls
   759   // to the interpreter.  The args start out packed in the compiled layout.  They
   760   // need to be unpacked into the interpreter layout.  This will almost always
   761   // require some stack space.  We grow the current (compiled) stack, then repack
   762   // the args.  We  finally end in a jump to the generic interpreter entry point.
   763   // On exit from the interpreter, the interpreter will restore our SP (lest the
   764   // compiled code, which relys solely on SP and not RBP, get sick).
   766   address c2i_unverified_entry = __ pc();
   767   Label skip_fixup;
   768   Label ok;
   770   Register holder = rax;
   771   Register receiver = j_rarg0;
   772   Register temp = rbx;
   774   {
   775     __ verify_oop(holder);
   776     __ load_klass(temp, receiver);
   777     __ verify_oop(temp);
   779     __ cmpptr(temp, Address(holder, compiledICHolderOopDesc::holder_klass_offset()));
   780     __ movptr(rbx, Address(holder, compiledICHolderOopDesc::holder_method_offset()));
   781     __ jcc(Assembler::equal, ok);
   782     __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
   784     __ bind(ok);
   785     // Method might have been compiled since the call site was patched to
   786     // interpreted if that is the case treat it as a miss so we can get
   787     // the call site corrected.
   788     __ cmpptr(Address(rbx, in_bytes(methodOopDesc::code_offset())), (int32_t)NULL_WORD);
   789     __ jcc(Assembler::equal, skip_fixup);
   790     __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
   791   }
   793   address c2i_entry = __ pc();
   795   gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
   797   __ flush();
   798   return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
   799 }
   801 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
   802                                          VMRegPair *regs,
   803                                          int total_args_passed) {
   804 // We return the amount of VMRegImpl stack slots we need to reserve for all
   805 // the arguments NOT counting out_preserve_stack_slots.
   807 // NOTE: These arrays will have to change when c1 is ported
   808 #ifdef _WIN64
   809     static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
   810       c_rarg0, c_rarg1, c_rarg2, c_rarg3
   811     };
   812     static const XMMRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
   813       c_farg0, c_farg1, c_farg2, c_farg3
   814     };
   815 #else
   816     static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
   817       c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, c_rarg5
   818     };
   819     static const XMMRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
   820       c_farg0, c_farg1, c_farg2, c_farg3,
   821       c_farg4, c_farg5, c_farg6, c_farg7
   822     };
   823 #endif // _WIN64
   826     uint int_args = 0;
   827     uint fp_args = 0;
   828     uint stk_args = 0; // inc by 2 each time
   830     for (int i = 0; i < total_args_passed; i++) {
   831       switch (sig_bt[i]) {
   832       case T_BOOLEAN:
   833       case T_CHAR:
   834       case T_BYTE:
   835       case T_SHORT:
   836       case T_INT:
   837         if (int_args < Argument::n_int_register_parameters_c) {
   838           regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
   839 #ifdef _WIN64
   840           fp_args++;
   841           // Allocate slots for callee to stuff register args the stack.
   842           stk_args += 2;
   843 #endif
   844         } else {
   845           regs[i].set1(VMRegImpl::stack2reg(stk_args));
   846           stk_args += 2;
   847         }
   848         break;
   849       case T_LONG:
   850         assert(sig_bt[i + 1] == T_VOID, "expecting half");
   851         // fall through
   852       case T_OBJECT:
   853       case T_ARRAY:
   854       case T_ADDRESS:
   855         if (int_args < Argument::n_int_register_parameters_c) {
   856           regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
   857 #ifdef _WIN64
   858           fp_args++;
   859           stk_args += 2;
   860 #endif
   861         } else {
   862           regs[i].set2(VMRegImpl::stack2reg(stk_args));
   863           stk_args += 2;
   864         }
   865         break;
   866       case T_FLOAT:
   867         if (fp_args < Argument::n_float_register_parameters_c) {
   868           regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
   869 #ifdef _WIN64
   870           int_args++;
   871           // Allocate slots for callee to stuff register args the stack.
   872           stk_args += 2;
   873 #endif
   874         } else {
   875           regs[i].set1(VMRegImpl::stack2reg(stk_args));
   876           stk_args += 2;
   877         }
   878         break;
   879       case T_DOUBLE:
   880         assert(sig_bt[i + 1] == T_VOID, "expecting half");
   881         if (fp_args < Argument::n_float_register_parameters_c) {
   882           regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
   883 #ifdef _WIN64
   884           int_args++;
   885           // Allocate slots for callee to stuff register args the stack.
   886           stk_args += 2;
   887 #endif
   888         } else {
   889           regs[i].set2(VMRegImpl::stack2reg(stk_args));
   890           stk_args += 2;
   891         }
   892         break;
   893       case T_VOID: // Halves of longs and doubles
   894         assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
   895         regs[i].set_bad();
   896         break;
   897       default:
   898         ShouldNotReachHere();
   899         break;
   900       }
   901     }
   902 #ifdef _WIN64
   903   // windows abi requires that we always allocate enough stack space
   904   // for 4 64bit registers to be stored down.
   905   if (stk_args < 8) {
   906     stk_args = 8;
   907   }
   908 #endif // _WIN64
   910   return stk_args;
   911 }
   913 // On 64 bit we will store integer like items to the stack as
   914 // 64 bits items (sparc abi) even though java would only store
   915 // 32bits for a parameter. On 32bit it will simply be 32 bits
   916 // So this routine will do 32->32 on 32bit and 32->64 on 64bit
   917 static void move32_64(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
   918   if (src.first()->is_stack()) {
   919     if (dst.first()->is_stack()) {
   920       // stack to stack
   921       __ movslq(rax, Address(rbp, reg2offset_in(src.first())));
   922       __ movq(Address(rsp, reg2offset_out(dst.first())), rax);
   923     } else {
   924       // stack to reg
   925       __ movslq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first())));
   926     }
   927   } else if (dst.first()->is_stack()) {
   928     // reg to stack
   929     // Do we really have to sign extend???
   930     // __ movslq(src.first()->as_Register(), src.first()->as_Register());
   931     __ movq(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
   932   } else {
   933     // Do we really have to sign extend???
   934     // __ movslq(dst.first()->as_Register(), src.first()->as_Register());
   935     if (dst.first() != src.first()) {
   936       __ movq(dst.first()->as_Register(), src.first()->as_Register());
   937     }
   938   }
   939 }
   942 // An oop arg. Must pass a handle not the oop itself
   943 static void object_move(MacroAssembler* masm,
   944                         OopMap* map,
   945                         int oop_handle_offset,
   946                         int framesize_in_slots,
   947                         VMRegPair src,
   948                         VMRegPair dst,
   949                         bool is_receiver,
   950                         int* receiver_offset) {
   952   // must pass a handle. First figure out the location we use as a handle
   954   Register rHandle = dst.first()->is_stack() ? rax : dst.first()->as_Register();
   956   // See if oop is NULL if it is we need no handle
   958   if (src.first()->is_stack()) {
   960     // Oop is already on the stack as an argument
   961     int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
   962     map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots));
   963     if (is_receiver) {
   964       *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size;
   965     }
   967     __ cmpptr(Address(rbp, reg2offset_in(src.first())), (int32_t)NULL_WORD);
   968     __ lea(rHandle, Address(rbp, reg2offset_in(src.first())));
   969     // conditionally move a NULL
   970     __ cmovptr(Assembler::equal, rHandle, Address(rbp, reg2offset_in(src.first())));
   971   } else {
   973     // Oop is in an a register we must store it to the space we reserve
   974     // on the stack for oop_handles and pass a handle if oop is non-NULL
   976     const Register rOop = src.first()->as_Register();
   977     int oop_slot;
   978     if (rOop == j_rarg0)
   979       oop_slot = 0;
   980     else if (rOop == j_rarg1)
   981       oop_slot = 1;
   982     else if (rOop == j_rarg2)
   983       oop_slot = 2;
   984     else if (rOop == j_rarg3)
   985       oop_slot = 3;
   986     else if (rOop == j_rarg4)
   987       oop_slot = 4;
   988     else {
   989       assert(rOop == j_rarg5, "wrong register");
   990       oop_slot = 5;
   991     }
   993     oop_slot = oop_slot * VMRegImpl::slots_per_word + oop_handle_offset;
   994     int offset = oop_slot*VMRegImpl::stack_slot_size;
   996     map->set_oop(VMRegImpl::stack2reg(oop_slot));
   997     // Store oop in handle area, may be NULL
   998     __ movptr(Address(rsp, offset), rOop);
   999     if (is_receiver) {
  1000       *receiver_offset = offset;
  1003     __ cmpptr(rOop, (int32_t)NULL_WORD);
  1004     __ lea(rHandle, Address(rsp, offset));
  1005     // conditionally move a NULL from the handle area where it was just stored
  1006     __ cmovptr(Assembler::equal, rHandle, Address(rsp, offset));
  1009   // If arg is on the stack then place it otherwise it is already in correct reg.
  1010   if (dst.first()->is_stack()) {
  1011     __ movptr(Address(rsp, reg2offset_out(dst.first())), rHandle);
  1015 // A float arg may have to do float reg int reg conversion
  1016 static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
  1017   assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move");
  1019   // The calling conventions assures us that each VMregpair is either
  1020   // all really one physical register or adjacent stack slots.
  1021   // This greatly simplifies the cases here compared to sparc.
  1023   if (src.first()->is_stack()) {
  1024     if (dst.first()->is_stack()) {
  1025       __ movl(rax, Address(rbp, reg2offset_in(src.first())));
  1026       __ movptr(Address(rsp, reg2offset_out(dst.first())), rax);
  1027     } else {
  1028       // stack to reg
  1029       assert(dst.first()->is_XMMRegister(), "only expect xmm registers as parameters");
  1030       __ movflt(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_in(src.first())));
  1032   } else if (dst.first()->is_stack()) {
  1033     // reg to stack
  1034     assert(src.first()->is_XMMRegister(), "only expect xmm registers as parameters");
  1035     __ movflt(Address(rsp, reg2offset_out(dst.first())), src.first()->as_XMMRegister());
  1036   } else {
  1037     // reg to reg
  1038     // In theory these overlap but the ordering is such that this is likely a nop
  1039     if ( src.first() != dst.first()) {
  1040       __ movdbl(dst.first()->as_XMMRegister(),  src.first()->as_XMMRegister());
  1045 // A long move
  1046 static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
  1048   // The calling conventions assures us that each VMregpair is either
  1049   // all really one physical register or adjacent stack slots.
  1050   // This greatly simplifies the cases here compared to sparc.
  1052   if (src.is_single_phys_reg() ) {
  1053     if (dst.is_single_phys_reg()) {
  1054       if (dst.first() != src.first()) {
  1055         __ mov(dst.first()->as_Register(), src.first()->as_Register());
  1057     } else {
  1058       assert(dst.is_single_reg(), "not a stack pair");
  1059       __ movq(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
  1061   } else if (dst.is_single_phys_reg()) {
  1062     assert(src.is_single_reg(),  "not a stack pair");
  1063     __ movq(dst.first()->as_Register(), Address(rbp, reg2offset_out(src.first())));
  1064   } else {
  1065     assert(src.is_single_reg() && dst.is_single_reg(), "not stack pairs");
  1066     __ movq(rax, Address(rbp, reg2offset_in(src.first())));
  1067     __ movq(Address(rsp, reg2offset_out(dst.first())), rax);
  1071 // A double move
  1072 static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
  1074   // The calling conventions assures us that each VMregpair is either
  1075   // all really one physical register or adjacent stack slots.
  1076   // This greatly simplifies the cases here compared to sparc.
  1078   if (src.is_single_phys_reg() ) {
  1079     if (dst.is_single_phys_reg()) {
  1080       // In theory these overlap but the ordering is such that this is likely a nop
  1081       if ( src.first() != dst.first()) {
  1082         __ movdbl(dst.first()->as_XMMRegister(), src.first()->as_XMMRegister());
  1084     } else {
  1085       assert(dst.is_single_reg(), "not a stack pair");
  1086       __ movdbl(Address(rsp, reg2offset_out(dst.first())), src.first()->as_XMMRegister());
  1088   } else if (dst.is_single_phys_reg()) {
  1089     assert(src.is_single_reg(),  "not a stack pair");
  1090     __ movdbl(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_out(src.first())));
  1091   } else {
  1092     assert(src.is_single_reg() && dst.is_single_reg(), "not stack pairs");
  1093     __ movq(rax, Address(rbp, reg2offset_in(src.first())));
  1094     __ movq(Address(rsp, reg2offset_out(dst.first())), rax);
  1099 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
  1100   // We always ignore the frame_slots arg and just use the space just below frame pointer
  1101   // which by this time is free to use
  1102   switch (ret_type) {
  1103   case T_FLOAT:
  1104     __ movflt(Address(rbp, -wordSize), xmm0);
  1105     break;
  1106   case T_DOUBLE:
  1107     __ movdbl(Address(rbp, -wordSize), xmm0);
  1108     break;
  1109   case T_VOID:  break;
  1110   default: {
  1111     __ movptr(Address(rbp, -wordSize), rax);
  1116 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
  1117   // We always ignore the frame_slots arg and just use the space just below frame pointer
  1118   // which by this time is free to use
  1119   switch (ret_type) {
  1120   case T_FLOAT:
  1121     __ movflt(xmm0, Address(rbp, -wordSize));
  1122     break;
  1123   case T_DOUBLE:
  1124     __ movdbl(xmm0, Address(rbp, -wordSize));
  1125     break;
  1126   case T_VOID:  break;
  1127   default: {
  1128     __ movptr(rax, Address(rbp, -wordSize));
  1133 static void save_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
  1134     for ( int i = first_arg ; i < arg_count ; i++ ) {
  1135       if (args[i].first()->is_Register()) {
  1136         __ push(args[i].first()->as_Register());
  1137       } else if (args[i].first()->is_XMMRegister()) {
  1138         __ subptr(rsp, 2*wordSize);
  1139         __ movdbl(Address(rsp, 0), args[i].first()->as_XMMRegister());
  1144 static void restore_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
  1145     for ( int i = arg_count - 1 ; i >= first_arg ; i-- ) {
  1146       if (args[i].first()->is_Register()) {
  1147         __ pop(args[i].first()->as_Register());
  1148       } else if (args[i].first()->is_XMMRegister()) {
  1149         __ movdbl(args[i].first()->as_XMMRegister(), Address(rsp, 0));
  1150         __ addptr(rsp, 2*wordSize);
  1155 // ---------------------------------------------------------------------------
  1156 // Generate a native wrapper for a given method.  The method takes arguments
  1157 // in the Java compiled code convention, marshals them to the native
  1158 // convention (handlizes oops, etc), transitions to native, makes the call,
  1159 // returns to java state (possibly blocking), unhandlizes any result and
  1160 // returns.
  1161 nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
  1162                                                 methodHandle method,
  1163                                                 int compile_id,
  1164                                                 int total_in_args,
  1165                                                 int comp_args_on_stack,
  1166                                                 BasicType *in_sig_bt,
  1167                                                 VMRegPair *in_regs,
  1168                                                 BasicType ret_type) {
  1169   // Native nmethod wrappers never take possesion of the oop arguments.
  1170   // So the caller will gc the arguments. The only thing we need an
  1171   // oopMap for is if the call is static
  1172   //
  1173   // An OopMap for lock (and class if static)
  1174   OopMapSet *oop_maps = new OopMapSet();
  1175   intptr_t start = (intptr_t)__ pc();
  1177   // We have received a description of where all the java arg are located
  1178   // on entry to the wrapper. We need to convert these args to where
  1179   // the jni function will expect them. To figure out where they go
  1180   // we convert the java signature to a C signature by inserting
  1181   // the hidden arguments as arg[0] and possibly arg[1] (static method)
  1183   int total_c_args = total_in_args + 1;
  1184   if (method->is_static()) {
  1185     total_c_args++;
  1188   BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
  1189   VMRegPair* out_regs   = NEW_RESOURCE_ARRAY(VMRegPair,   total_c_args);
  1191   int argc = 0;
  1192   out_sig_bt[argc++] = T_ADDRESS;
  1193   if (method->is_static()) {
  1194     out_sig_bt[argc++] = T_OBJECT;
  1197   for (int i = 0; i < total_in_args ; i++ ) {
  1198     out_sig_bt[argc++] = in_sig_bt[i];
  1201   // Now figure out where the args must be stored and how much stack space
  1202   // they require.
  1203   //
  1204   int out_arg_slots;
  1205   out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args);
  1207   // Compute framesize for the wrapper.  We need to handlize all oops in
  1208   // incoming registers
  1210   // Calculate the total number of stack slots we will need.
  1212   // First count the abi requirement plus all of the outgoing args
  1213   int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
  1215   // Now the space for the inbound oop handle area
  1217   int oop_handle_offset = stack_slots;
  1218   stack_slots += 6*VMRegImpl::slots_per_word;
  1220   // Now any space we need for handlizing a klass if static method
  1222   int oop_temp_slot_offset = 0;
  1223   int klass_slot_offset = 0;
  1224   int klass_offset = -1;
  1225   int lock_slot_offset = 0;
  1226   bool is_static = false;
  1228   if (method->is_static()) {
  1229     klass_slot_offset = stack_slots;
  1230     stack_slots += VMRegImpl::slots_per_word;
  1231     klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
  1232     is_static = true;
  1235   // Plus a lock if needed
  1237   if (method->is_synchronized()) {
  1238     lock_slot_offset = stack_slots;
  1239     stack_slots += VMRegImpl::slots_per_word;
  1242   // Now a place (+2) to save return values or temp during shuffling
  1243   // + 4 for return address (which we own) and saved rbp
  1244   stack_slots += 6;
  1246   // Ok The space we have allocated will look like:
  1247   //
  1248   //
  1249   // FP-> |                     |
  1250   //      |---------------------|
  1251   //      | 2 slots for moves   |
  1252   //      |---------------------|
  1253   //      | lock box (if sync)  |
  1254   //      |---------------------| <- lock_slot_offset
  1255   //      | klass (if static)   |
  1256   //      |---------------------| <- klass_slot_offset
  1257   //      | oopHandle area      |
  1258   //      |---------------------| <- oop_handle_offset (6 java arg registers)
  1259   //      | outbound memory     |
  1260   //      | based arguments     |
  1261   //      |                     |
  1262   //      |---------------------|
  1263   //      |                     |
  1264   // SP-> | out_preserved_slots |
  1265   //
  1266   //
  1269   // Now compute actual number of stack words we need rounding to make
  1270   // stack properly aligned.
  1271   stack_slots = round_to(stack_slots, StackAlignmentInSlots);
  1273   int stack_size = stack_slots * VMRegImpl::stack_slot_size;
  1276   // First thing make an ic check to see if we should even be here
  1278   // We are free to use all registers as temps without saving them and
  1279   // restoring them except rbp. rbp is the only callee save register
  1280   // as far as the interpreter and the compiler(s) are concerned.
  1283   const Register ic_reg = rax;
  1284   const Register receiver = j_rarg0;
  1286   Label ok;
  1287   Label exception_pending;
  1289   assert_different_registers(ic_reg, receiver, rscratch1);
  1290   __ verify_oop(receiver);
  1291   __ load_klass(rscratch1, receiver);
  1292   __ cmpq(ic_reg, rscratch1);
  1293   __ jcc(Assembler::equal, ok);
  1295   __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
  1297   __ bind(ok);
  1299   // Verified entry point must be aligned
  1300   __ align(8);
  1302   int vep_offset = ((intptr_t)__ pc()) - start;
  1304   // The instruction at the verified entry point must be 5 bytes or longer
  1305   // because it can be patched on the fly by make_non_entrant. The stack bang
  1306   // instruction fits that requirement.
  1308   // Generate stack overflow check
  1310   if (UseStackBanging) {
  1311     __ bang_stack_with_offset(StackShadowPages*os::vm_page_size());
  1312   } else {
  1313     // need a 5 byte instruction to allow MT safe patching to non-entrant
  1314     __ fat_nop();
  1317   // Generate a new frame for the wrapper.
  1318   __ enter();
  1319   // -2 because return address is already present and so is saved rbp
  1320   __ subptr(rsp, stack_size - 2*wordSize);
  1322     // Frame is now completed as far as size and linkage.
  1324     int frame_complete = ((intptr_t)__ pc()) - start;
  1326 #ifdef ASSERT
  1328       Label L;
  1329       __ mov(rax, rsp);
  1330       __ andptr(rax, -16); // must be 16 byte boundary (see amd64 ABI)
  1331       __ cmpptr(rax, rsp);
  1332       __ jcc(Assembler::equal, L);
  1333       __ stop("improperly aligned stack");
  1334       __ bind(L);
  1336 #endif /* ASSERT */
  1339   // We use r14 as the oop handle for the receiver/klass
  1340   // It is callee save so it survives the call to native
  1342   const Register oop_handle_reg = r14;
  1346   //
  1347   // We immediately shuffle the arguments so that any vm call we have to
  1348   // make from here on out (sync slow path, jvmti, etc.) we will have
  1349   // captured the oops from our caller and have a valid oopMap for
  1350   // them.
  1352   // -----------------
  1353   // The Grand Shuffle
  1355   // The Java calling convention is either equal (linux) or denser (win64) than the
  1356   // c calling convention. However the because of the jni_env argument the c calling
  1357   // convention always has at least one more (and two for static) arguments than Java.
  1358   // Therefore if we move the args from java -> c backwards then we will never have
  1359   // a register->register conflict and we don't have to build a dependency graph
  1360   // and figure out how to break any cycles.
  1361   //
  1363   // Record esp-based slot for receiver on stack for non-static methods
  1364   int receiver_offset = -1;
  1366   // This is a trick. We double the stack slots so we can claim
  1367   // the oops in the caller's frame. Since we are sure to have
  1368   // more args than the caller doubling is enough to make
  1369   // sure we can capture all the incoming oop args from the
  1370   // caller.
  1371   //
  1372   OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
  1374   // Mark location of rbp (someday)
  1375   // map->set_callee_saved(VMRegImpl::stack2reg( stack_slots - 2), stack_slots * 2, 0, vmreg(rbp));
  1377   // Use eax, ebx as temporaries during any memory-memory moves we have to do
  1378   // All inbound args are referenced based on rbp and all outbound args via rsp.
  1381 #ifdef ASSERT
  1382   bool reg_destroyed[RegisterImpl::number_of_registers];
  1383   bool freg_destroyed[XMMRegisterImpl::number_of_registers];
  1384   for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) {
  1385     reg_destroyed[r] = false;
  1387   for ( int f = 0 ; f < XMMRegisterImpl::number_of_registers ; f++ ) {
  1388     freg_destroyed[f] = false;
  1391 #endif /* ASSERT */
  1394   int c_arg = total_c_args - 1;
  1395   for ( int i = total_in_args - 1; i >= 0 ; i--, c_arg-- ) {
  1396 #ifdef ASSERT
  1397     if (in_regs[i].first()->is_Register()) {
  1398       assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "destroyed reg!");
  1399     } else if (in_regs[i].first()->is_XMMRegister()) {
  1400       assert(!freg_destroyed[in_regs[i].first()->as_XMMRegister()->encoding()], "destroyed reg!");
  1402     if (out_regs[c_arg].first()->is_Register()) {
  1403       reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
  1404     } else if (out_regs[c_arg].first()->is_XMMRegister()) {
  1405       freg_destroyed[out_regs[c_arg].first()->as_XMMRegister()->encoding()] = true;
  1407 #endif /* ASSERT */
  1408     switch (in_sig_bt[i]) {
  1409       case T_ARRAY:
  1410       case T_OBJECT:
  1411         object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
  1412                     ((i == 0) && (!is_static)),
  1413                     &receiver_offset);
  1414         break;
  1415       case T_VOID:
  1416         break;
  1418       case T_FLOAT:
  1419         float_move(masm, in_regs[i], out_regs[c_arg]);
  1420           break;
  1422       case T_DOUBLE:
  1423         assert( i + 1 < total_in_args &&
  1424                 in_sig_bt[i + 1] == T_VOID &&
  1425                 out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
  1426         double_move(masm, in_regs[i], out_regs[c_arg]);
  1427         break;
  1429       case T_LONG :
  1430         long_move(masm, in_regs[i], out_regs[c_arg]);
  1431         break;
  1433       case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
  1435       default:
  1436         move32_64(masm, in_regs[i], out_regs[c_arg]);
  1440   // point c_arg at the first arg that is already loaded in case we
  1441   // need to spill before we call out
  1442   c_arg++;
  1444   // Pre-load a static method's oop into r14.  Used both by locking code and
  1445   // the normal JNI call code.
  1446   if (method->is_static()) {
  1448     //  load oop into a register
  1449     __ movoop(oop_handle_reg, JNIHandles::make_local(Klass::cast(method->method_holder())->java_mirror()));
  1451     // Now handlize the static class mirror it's known not-null.
  1452     __ movptr(Address(rsp, klass_offset), oop_handle_reg);
  1453     map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
  1455     // Now get the handle
  1456     __ lea(oop_handle_reg, Address(rsp, klass_offset));
  1457     // store the klass handle as second argument
  1458     __ movptr(c_rarg1, oop_handle_reg);
  1459     // and protect the arg if we must spill
  1460     c_arg--;
  1463   // Change state to native (we save the return address in the thread, since it might not
  1464   // be pushed on the stack when we do a a stack traversal). It is enough that the pc()
  1465   // points into the right code segment. It does not have to be the correct return pc.
  1466   // We use the same pc/oopMap repeatedly when we call out
  1468   intptr_t the_pc = (intptr_t) __ pc();
  1469   oop_maps->add_gc_map(the_pc - start, map);
  1471   __ set_last_Java_frame(rsp, noreg, (address)the_pc);
  1474   // We have all of the arguments setup at this point. We must not touch any register
  1475   // argument registers at this point (what if we save/restore them there are no oop?
  1478     SkipIfEqual skip(masm, &DTraceMethodProbes, false);
  1479     // protect the args we've loaded
  1480     save_args(masm, total_c_args, c_arg, out_regs);
  1481     __ movoop(c_rarg1, JNIHandles::make_local(method()));
  1482     __ call_VM_leaf(
  1483       CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
  1484       r15_thread, c_rarg1);
  1485     restore_args(masm, total_c_args, c_arg, out_regs);
  1488   // RedefineClasses() tracing support for obsolete method entry
  1489   if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) {
  1490     // protect the args we've loaded
  1491     save_args(masm, total_c_args, c_arg, out_regs);
  1492     __ movoop(c_rarg1, JNIHandles::make_local(method()));
  1493     __ call_VM_leaf(
  1494       CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
  1495       r15_thread, c_rarg1);
  1496     restore_args(masm, total_c_args, c_arg, out_regs);
  1499   // Lock a synchronized method
  1501   // Register definitions used by locking and unlocking
  1503   const Register swap_reg = rax;  // Must use rax for cmpxchg instruction
  1504   const Register obj_reg  = rbx;  // Will contain the oop
  1505   const Register lock_reg = r13;  // Address of compiler lock object (BasicLock)
  1506   const Register old_hdr  = r13;  // value of old header at unlock time
  1508   Label slow_path_lock;
  1509   Label lock_done;
  1511   if (method->is_synchronized()) {
  1514     const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
  1516     // Get the handle (the 2nd argument)
  1517     __ mov(oop_handle_reg, c_rarg1);
  1519     // Get address of the box
  1521     __ lea(lock_reg, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
  1523     // Load the oop from the handle
  1524     __ movptr(obj_reg, Address(oop_handle_reg, 0));
  1526     if (UseBiasedLocking) {
  1527       __ biased_locking_enter(lock_reg, obj_reg, swap_reg, rscratch1, false, lock_done, &slow_path_lock);
  1530     // Load immediate 1 into swap_reg %rax
  1531     __ movl(swap_reg, 1);
  1533     // Load (object->mark() | 1) into swap_reg %rax
  1534     __ orptr(swap_reg, Address(obj_reg, 0));
  1536     // Save (object->mark() | 1) into BasicLock's displaced header
  1537     __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
  1539     if (os::is_MP()) {
  1540       __ lock();
  1543     // src -> dest iff dest == rax else rax <- dest
  1544     __ cmpxchgptr(lock_reg, Address(obj_reg, 0));
  1545     __ jcc(Assembler::equal, lock_done);
  1547     // Hmm should this move to the slow path code area???
  1549     // Test if the oopMark is an obvious stack pointer, i.e.,
  1550     //  1) (mark & 3) == 0, and
  1551     //  2) rsp <= mark < mark + os::pagesize()
  1552     // These 3 tests can be done by evaluating the following
  1553     // expression: ((mark - rsp) & (3 - os::vm_page_size())),
  1554     // assuming both stack pointer and pagesize have their
  1555     // least significant 2 bits clear.
  1556     // NOTE: the oopMark is in swap_reg %rax as the result of cmpxchg
  1558     __ subptr(swap_reg, rsp);
  1559     __ andptr(swap_reg, 3 - os::vm_page_size());
  1561     // Save the test result, for recursive case, the result is zero
  1562     __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
  1563     __ jcc(Assembler::notEqual, slow_path_lock);
  1565     // Slow path will re-enter here
  1567     __ bind(lock_done);
  1571   // Finally just about ready to make the JNI call
  1574   // get JNIEnv* which is first argument to native
  1576   __ lea(c_rarg0, Address(r15_thread, in_bytes(JavaThread::jni_environment_offset())));
  1578   // Now set thread in native
  1579   __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native);
  1581   __ call(RuntimeAddress(method->native_function()));
  1583     // Either restore the MXCSR register after returning from the JNI Call
  1584     // or verify that it wasn't changed.
  1585     if (RestoreMXCSROnJNICalls) {
  1586       __ ldmxcsr(ExternalAddress(StubRoutines::x86::mxcsr_std()));
  1589     else if (CheckJNICalls ) {
  1590       __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::verify_mxcsr_entry())));
  1594   // Unpack native results.
  1595   switch (ret_type) {
  1596   case T_BOOLEAN: __ c2bool(rax);            break;
  1597   case T_CHAR   : __ movzwl(rax, rax);      break;
  1598   case T_BYTE   : __ sign_extend_byte (rax); break;
  1599   case T_SHORT  : __ sign_extend_short(rax); break;
  1600   case T_INT    : /* nothing to do */        break;
  1601   case T_DOUBLE :
  1602   case T_FLOAT  :
  1603     // Result is in xmm0 we'll save as needed
  1604     break;
  1605   case T_ARRAY:                 // Really a handle
  1606   case T_OBJECT:                // Really a handle
  1607       break; // can't de-handlize until after safepoint check
  1608   case T_VOID: break;
  1609   case T_LONG: break;
  1610   default       : ShouldNotReachHere();
  1613   // Switch thread to "native transition" state before reading the synchronization state.
  1614   // This additional state is necessary because reading and testing the synchronization
  1615   // state is not atomic w.r.t. GC, as this scenario demonstrates:
  1616   //     Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
  1617   //     VM thread changes sync state to synchronizing and suspends threads for GC.
  1618   //     Thread A is resumed to finish this native method, but doesn't block here since it
  1619   //     didn't see any synchronization is progress, and escapes.
  1620   __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native_trans);
  1622   if(os::is_MP()) {
  1623     if (UseMembar) {
  1624       // Force this write out before the read below
  1625       __ membar(Assembler::Membar_mask_bits(
  1626            Assembler::LoadLoad | Assembler::LoadStore |
  1627            Assembler::StoreLoad | Assembler::StoreStore));
  1628     } else {
  1629       // Write serialization page so VM thread can do a pseudo remote membar.
  1630       // We use the current thread pointer to calculate a thread specific
  1631       // offset to write to within the page. This minimizes bus traffic
  1632       // due to cache line collision.
  1633       __ serialize_memory(r15_thread, rcx);
  1638   // check for safepoint operation in progress and/or pending suspend requests
  1640     Label Continue;
  1642     __ cmp32(ExternalAddress((address)SafepointSynchronize::address_of_state()),
  1643              SafepointSynchronize::_not_synchronized);
  1645     Label L;
  1646     __ jcc(Assembler::notEqual, L);
  1647     __ cmpl(Address(r15_thread, JavaThread::suspend_flags_offset()), 0);
  1648     __ jcc(Assembler::equal, Continue);
  1649     __ bind(L);
  1651     // Don't use call_VM as it will see a possible pending exception and forward it
  1652     // and never return here preventing us from clearing _last_native_pc down below.
  1653     // Also can't use call_VM_leaf either as it will check to see if rsi & rdi are
  1654     // preserved and correspond to the bcp/locals pointers. So we do a runtime call
  1655     // by hand.
  1656     //
  1657     save_native_result(masm, ret_type, stack_slots);
  1658     __ mov(c_rarg0, r15_thread);
  1659     __ mov(r12, rsp); // remember sp
  1660     __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
  1661     __ andptr(rsp, -16); // align stack as required by ABI
  1662     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
  1663     __ mov(rsp, r12); // restore sp
  1664     __ reinit_heapbase();
  1665     // Restore any method result value
  1666     restore_native_result(masm, ret_type, stack_slots);
  1667     __ bind(Continue);
  1670   // change thread state
  1671   __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_Java);
  1673   Label reguard;
  1674   Label reguard_done;
  1675   __ cmpl(Address(r15_thread, JavaThread::stack_guard_state_offset()), JavaThread::stack_guard_yellow_disabled);
  1676   __ jcc(Assembler::equal, reguard);
  1677   __ bind(reguard_done);
  1679   // native result if any is live
  1681   // Unlock
  1682   Label unlock_done;
  1683   Label slow_path_unlock;
  1684   if (method->is_synchronized()) {
  1686     // Get locked oop from the handle we passed to jni
  1687     __ movptr(obj_reg, Address(oop_handle_reg, 0));
  1689     Label done;
  1691     if (UseBiasedLocking) {
  1692       __ biased_locking_exit(obj_reg, old_hdr, done);
  1695     // Simple recursive lock?
  1697     __ cmpptr(Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size), (int32_t)NULL_WORD);
  1698     __ jcc(Assembler::equal, done);
  1700     // Must save rax if if it is live now because cmpxchg must use it
  1701     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
  1702       save_native_result(masm, ret_type, stack_slots);
  1706     // get address of the stack lock
  1707     __ lea(rax, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
  1708     //  get old displaced header
  1709     __ movptr(old_hdr, Address(rax, 0));
  1711     // Atomic swap old header if oop still contains the stack lock
  1712     if (os::is_MP()) {
  1713       __ lock();
  1715     __ cmpxchgptr(old_hdr, Address(obj_reg, 0));
  1716     __ jcc(Assembler::notEqual, slow_path_unlock);
  1718     // slow path re-enters here
  1719     __ bind(unlock_done);
  1720     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
  1721       restore_native_result(masm, ret_type, stack_slots);
  1724     __ bind(done);
  1728     SkipIfEqual skip(masm, &DTraceMethodProbes, false);
  1729     save_native_result(masm, ret_type, stack_slots);
  1730     __ movoop(c_rarg1, JNIHandles::make_local(method()));
  1731     __ call_VM_leaf(
  1732          CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
  1733          r15_thread, c_rarg1);
  1734     restore_native_result(masm, ret_type, stack_slots);
  1737   __ reset_last_Java_frame(false, true);
  1739   // Unpack oop result
  1740   if (ret_type == T_OBJECT || ret_type == T_ARRAY) {
  1741       Label L;
  1742       __ testptr(rax, rax);
  1743       __ jcc(Assembler::zero, L);
  1744       __ movptr(rax, Address(rax, 0));
  1745       __ bind(L);
  1746       __ verify_oop(rax);
  1749   // reset handle block
  1750   __ movptr(rcx, Address(r15_thread, JavaThread::active_handles_offset()));
  1751   __ movptr(Address(rcx, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD);
  1753   // pop our frame
  1755   __ leave();
  1757   // Any exception pending?
  1758   __ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
  1759   __ jcc(Assembler::notEqual, exception_pending);
  1761   // Return
  1763   __ ret(0);
  1765   // Unexpected paths are out of line and go here
  1767   // forward the exception
  1768   __ bind(exception_pending);
  1770   // and forward the exception
  1771   __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
  1774   // Slow path locking & unlocking
  1775   if (method->is_synchronized()) {
  1777     // BEGIN Slow path lock
  1778     __ bind(slow_path_lock);
  1780     // has last_Java_frame setup. No exceptions so do vanilla call not call_VM
  1781     // args are (oop obj, BasicLock* lock, JavaThread* thread)
  1783     // protect the args we've loaded
  1784     save_args(masm, total_c_args, c_arg, out_regs);
  1786     __ mov(c_rarg0, obj_reg);
  1787     __ mov(c_rarg1, lock_reg);
  1788     __ mov(c_rarg2, r15_thread);
  1790     // Not a leaf but we have last_Java_frame setup as we want
  1791     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), 3);
  1792     restore_args(masm, total_c_args, c_arg, out_regs);
  1794 #ifdef ASSERT
  1795     { Label L;
  1796     __ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
  1797     __ jcc(Assembler::equal, L);
  1798     __ stop("no pending exception allowed on exit from monitorenter");
  1799     __ bind(L);
  1801 #endif
  1802     __ jmp(lock_done);
  1804     // END Slow path lock
  1806     // BEGIN Slow path unlock
  1807     __ bind(slow_path_unlock);
  1809     // If we haven't already saved the native result we must save it now as xmm registers
  1810     // are still exposed.
  1812     if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
  1813       save_native_result(masm, ret_type, stack_slots);
  1816     __ lea(c_rarg1, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
  1818     __ mov(c_rarg0, obj_reg);
  1819     __ mov(r12, rsp); // remember sp
  1820     __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
  1821     __ andptr(rsp, -16); // align stack as required by ABI
  1823     // Save pending exception around call to VM (which contains an EXCEPTION_MARK)
  1824     // NOTE that obj_reg == rbx currently
  1825     __ movptr(rbx, Address(r15_thread, in_bytes(Thread::pending_exception_offset())));
  1826     __ movptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
  1828     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C)));
  1829     __ mov(rsp, r12); // restore sp
  1830     __ reinit_heapbase();
  1831 #ifdef ASSERT
  1833       Label L;
  1834       __ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int)NULL_WORD);
  1835       __ jcc(Assembler::equal, L);
  1836       __ stop("no pending exception allowed on exit complete_monitor_unlocking_C");
  1837       __ bind(L);
  1839 #endif /* ASSERT */
  1841     __ movptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), rbx);
  1843     if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
  1844       restore_native_result(masm, ret_type, stack_slots);
  1846     __ jmp(unlock_done);
  1848     // END Slow path unlock
  1850   } // synchronized
  1852   // SLOW PATH Reguard the stack if needed
  1854   __ bind(reguard);
  1855   save_native_result(masm, ret_type, stack_slots);
  1856   __ mov(r12, rsp); // remember sp
  1857   __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
  1858   __ andptr(rsp, -16); // align stack as required by ABI
  1859   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
  1860   __ mov(rsp, r12); // restore sp
  1861   __ reinit_heapbase();
  1862   restore_native_result(masm, ret_type, stack_slots);
  1863   // and continue
  1864   __ jmp(reguard_done);
  1868   __ flush();
  1870   nmethod *nm = nmethod::new_native_nmethod(method,
  1871                                             compile_id,
  1872                                             masm->code(),
  1873                                             vep_offset,
  1874                                             frame_complete,
  1875                                             stack_slots / VMRegImpl::slots_per_word,
  1876                                             (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
  1877                                             in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size),
  1878                                             oop_maps);
  1879   return nm;
  1883 #ifdef HAVE_DTRACE_H
  1884 // ---------------------------------------------------------------------------
  1885 // Generate a dtrace nmethod for a given signature.  The method takes arguments
  1886 // in the Java compiled code convention, marshals them to the native
  1887 // abi and then leaves nops at the position you would expect to call a native
  1888 // function. When the probe is enabled the nops are replaced with a trap
  1889 // instruction that dtrace inserts and the trace will cause a notification
  1890 // to dtrace.
  1891 //
  1892 // The probes are only able to take primitive types and java/lang/String as
  1893 // arguments.  No other java types are allowed. Strings are converted to utf8
  1894 // strings so that from dtrace point of view java strings are converted to C
  1895 // strings. There is an arbitrary fixed limit on the total space that a method
  1896 // can use for converting the strings. (256 chars per string in the signature).
  1897 // So any java string larger then this is truncated.
  1899 static int  fp_offset[ConcreteRegisterImpl::number_of_registers] = { 0 };
  1900 static bool offsets_initialized = false;
  1903 nmethod *SharedRuntime::generate_dtrace_nmethod(MacroAssembler *masm,
  1904                                                 methodHandle method) {
  1907   // generate_dtrace_nmethod is guarded by a mutex so we are sure to
  1908   // be single threaded in this method.
  1909   assert(AdapterHandlerLibrary_lock->owned_by_self(), "must be");
  1911   if (!offsets_initialized) {
  1912     fp_offset[c_rarg0->as_VMReg()->value()] = -1 * wordSize;
  1913     fp_offset[c_rarg1->as_VMReg()->value()] = -2 * wordSize;
  1914     fp_offset[c_rarg2->as_VMReg()->value()] = -3 * wordSize;
  1915     fp_offset[c_rarg3->as_VMReg()->value()] = -4 * wordSize;
  1916     fp_offset[c_rarg4->as_VMReg()->value()] = -5 * wordSize;
  1917     fp_offset[c_rarg5->as_VMReg()->value()] = -6 * wordSize;
  1919     fp_offset[c_farg0->as_VMReg()->value()] = -7 * wordSize;
  1920     fp_offset[c_farg1->as_VMReg()->value()] = -8 * wordSize;
  1921     fp_offset[c_farg2->as_VMReg()->value()] = -9 * wordSize;
  1922     fp_offset[c_farg3->as_VMReg()->value()] = -10 * wordSize;
  1923     fp_offset[c_farg4->as_VMReg()->value()] = -11 * wordSize;
  1924     fp_offset[c_farg5->as_VMReg()->value()] = -12 * wordSize;
  1925     fp_offset[c_farg6->as_VMReg()->value()] = -13 * wordSize;
  1926     fp_offset[c_farg7->as_VMReg()->value()] = -14 * wordSize;
  1928     offsets_initialized = true;
  1930   // Fill in the signature array, for the calling-convention call.
  1931   int total_args_passed = method->size_of_parameters();
  1933   BasicType* in_sig_bt  = NEW_RESOURCE_ARRAY(BasicType, total_args_passed);
  1934   VMRegPair  *in_regs   = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
  1936   // The signature we are going to use for the trap that dtrace will see
  1937   // java/lang/String is converted. We drop "this" and any other object
  1938   // is converted to NULL.  (A one-slot java/lang/Long object reference
  1939   // is converted to a two-slot long, which is why we double the allocation).
  1940   BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_args_passed * 2);
  1941   VMRegPair* out_regs   = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed * 2);
  1943   int i=0;
  1944   int total_strings = 0;
  1945   int first_arg_to_pass = 0;
  1946   int total_c_args = 0;
  1948   // Skip the receiver as dtrace doesn't want to see it
  1949   if( !method->is_static() ) {
  1950     in_sig_bt[i++] = T_OBJECT;
  1951     first_arg_to_pass = 1;
  1954   // We need to convert the java args to where a native (non-jni) function
  1955   // would expect them. To figure out where they go we convert the java
  1956   // signature to a C signature.
  1958   SignatureStream ss(method->signature());
  1959   for ( ; !ss.at_return_type(); ss.next()) {
  1960     BasicType bt = ss.type();
  1961     in_sig_bt[i++] = bt;  // Collect remaining bits of signature
  1962     out_sig_bt[total_c_args++] = bt;
  1963     if( bt == T_OBJECT) {
  1964       Symbol* s = ss.as_symbol_or_null();   // symbol is created
  1965       if (s == vmSymbols::java_lang_String()) {
  1966         total_strings++;
  1967         out_sig_bt[total_c_args-1] = T_ADDRESS;
  1968       } else if (s == vmSymbols::java_lang_Boolean() ||
  1969                  s == vmSymbols::java_lang_Character() ||
  1970                  s == vmSymbols::java_lang_Byte() ||
  1971                  s == vmSymbols::java_lang_Short() ||
  1972                  s == vmSymbols::java_lang_Integer() ||
  1973                  s == vmSymbols::java_lang_Float()) {
  1974         out_sig_bt[total_c_args-1] = T_INT;
  1975       } else if (s == vmSymbols::java_lang_Long() ||
  1976                  s == vmSymbols::java_lang_Double()) {
  1977         out_sig_bt[total_c_args-1] = T_LONG;
  1978         out_sig_bt[total_c_args++] = T_VOID;
  1980     } else if ( bt == T_LONG || bt == T_DOUBLE ) {
  1981       in_sig_bt[i++] = T_VOID;   // Longs & doubles take 2 Java slots
  1982       // We convert double to long
  1983       out_sig_bt[total_c_args-1] = T_LONG;
  1984       out_sig_bt[total_c_args++] = T_VOID;
  1985     } else if ( bt == T_FLOAT) {
  1986       // We convert float to int
  1987       out_sig_bt[total_c_args-1] = T_INT;
  1991   assert(i==total_args_passed, "validly parsed signature");
  1993   // Now get the compiled-Java layout as input arguments
  1994   int comp_args_on_stack;
  1995   comp_args_on_stack = SharedRuntime::java_calling_convention(
  1996       in_sig_bt, in_regs, total_args_passed, false);
  1998   // Now figure out where the args must be stored and how much stack space
  1999   // they require (neglecting out_preserve_stack_slots but space for storing
  2000   // the 1st six register arguments). It's weird see int_stk_helper.
  2002   int out_arg_slots;
  2003   out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args);
  2005   // Calculate the total number of stack slots we will need.
  2007   // First count the abi requirement plus all of the outgoing args
  2008   int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
  2010   // Now space for the string(s) we must convert
  2011   int* string_locs   = NEW_RESOURCE_ARRAY(int, total_strings + 1);
  2012   for (i = 0; i < total_strings ; i++) {
  2013     string_locs[i] = stack_slots;
  2014     stack_slots += max_dtrace_string_size / VMRegImpl::stack_slot_size;
  2017   // Plus the temps we might need to juggle register args
  2018   // regs take two slots each
  2019   stack_slots += (Argument::n_int_register_parameters_c +
  2020                   Argument::n_float_register_parameters_c) * 2;
  2023   // + 4 for return address (which we own) and saved rbp,
  2025   stack_slots += 4;
  2027   // Ok The space we have allocated will look like:
  2028   //
  2029   //
  2030   // FP-> |                     |
  2031   //      |---------------------|
  2032   //      | string[n]           |
  2033   //      |---------------------| <- string_locs[n]
  2034   //      | string[n-1]         |
  2035   //      |---------------------| <- string_locs[n-1]
  2036   //      | ...                 |
  2037   //      | ...                 |
  2038   //      |---------------------| <- string_locs[1]
  2039   //      | string[0]           |
  2040   //      |---------------------| <- string_locs[0]
  2041   //      | outbound memory     |
  2042   //      | based arguments     |
  2043   //      |                     |
  2044   //      |---------------------|
  2045   //      |                     |
  2046   // SP-> | out_preserved_slots |
  2047   //
  2048   //
  2050   // Now compute actual number of stack words we need rounding to make
  2051   // stack properly aligned.
  2052   stack_slots = round_to(stack_slots, 4 * VMRegImpl::slots_per_word);
  2054   int stack_size = stack_slots * VMRegImpl::stack_slot_size;
  2056   intptr_t start = (intptr_t)__ pc();
  2058   // First thing make an ic check to see if we should even be here
  2060   // We are free to use all registers as temps without saving them and
  2061   // restoring them except rbp. rbp, is the only callee save register
  2062   // as far as the interpreter and the compiler(s) are concerned.
  2064   const Register ic_reg = rax;
  2065   const Register receiver = rcx;
  2066   Label hit;
  2067   Label exception_pending;
  2070   __ verify_oop(receiver);
  2071   __ cmpl(ic_reg, Address(receiver, oopDesc::klass_offset_in_bytes()));
  2072   __ jcc(Assembler::equal, hit);
  2074   __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
  2076   // verified entry must be aligned for code patching.
  2077   // and the first 5 bytes must be in the same cache line
  2078   // if we align at 8 then we will be sure 5 bytes are in the same line
  2079   __ align(8);
  2081   __ bind(hit);
  2083   int vep_offset = ((intptr_t)__ pc()) - start;
  2086   // The instruction at the verified entry point must be 5 bytes or longer
  2087   // because it can be patched on the fly by make_non_entrant. The stack bang
  2088   // instruction fits that requirement.
  2090   // Generate stack overflow check
  2092   if (UseStackBanging) {
  2093     if (stack_size <= StackShadowPages*os::vm_page_size()) {
  2094       __ bang_stack_with_offset(StackShadowPages*os::vm_page_size());
  2095     } else {
  2096       __ movl(rax, stack_size);
  2097       __ bang_stack_size(rax, rbx);
  2099   } else {
  2100     // need a 5 byte instruction to allow MT safe patching to non-entrant
  2101     __ fat_nop();
  2104   assert(((uintptr_t)__ pc() - start - vep_offset) >= 5,
  2105          "valid size for make_non_entrant");
  2107   // Generate a new frame for the wrapper.
  2108   __ enter();
  2110   // -4 because return address is already present and so is saved rbp,
  2111   if (stack_size - 2*wordSize != 0) {
  2112     __ subq(rsp, stack_size - 2*wordSize);
  2115   // Frame is now completed as far a size and linkage.
  2117   int frame_complete = ((intptr_t)__ pc()) - start;
  2119   int c_arg, j_arg;
  2121   // State of input register args
  2123   bool  live[ConcreteRegisterImpl::number_of_registers];
  2125   live[j_rarg0->as_VMReg()->value()] = false;
  2126   live[j_rarg1->as_VMReg()->value()] = false;
  2127   live[j_rarg2->as_VMReg()->value()] = false;
  2128   live[j_rarg3->as_VMReg()->value()] = false;
  2129   live[j_rarg4->as_VMReg()->value()] = false;
  2130   live[j_rarg5->as_VMReg()->value()] = false;
  2132   live[j_farg0->as_VMReg()->value()] = false;
  2133   live[j_farg1->as_VMReg()->value()] = false;
  2134   live[j_farg2->as_VMReg()->value()] = false;
  2135   live[j_farg3->as_VMReg()->value()] = false;
  2136   live[j_farg4->as_VMReg()->value()] = false;
  2137   live[j_farg5->as_VMReg()->value()] = false;
  2138   live[j_farg6->as_VMReg()->value()] = false;
  2139   live[j_farg7->as_VMReg()->value()] = false;
  2142   bool rax_is_zero = false;
  2144   // All args (except strings) destined for the stack are moved first
  2145   for (j_arg = first_arg_to_pass, c_arg = 0 ;
  2146        j_arg < total_args_passed ; j_arg++, c_arg++ ) {
  2147     VMRegPair src = in_regs[j_arg];
  2148     VMRegPair dst = out_regs[c_arg];
  2150     // Get the real reg value or a dummy (rsp)
  2152     int src_reg = src.first()->is_reg() ?
  2153                   src.first()->value() :
  2154                   rsp->as_VMReg()->value();
  2156     bool useless =  in_sig_bt[j_arg] == T_ARRAY ||
  2157                     (in_sig_bt[j_arg] == T_OBJECT &&
  2158                      out_sig_bt[c_arg] != T_INT &&
  2159                      out_sig_bt[c_arg] != T_ADDRESS &&
  2160                      out_sig_bt[c_arg] != T_LONG);
  2162     live[src_reg] = !useless;
  2164     if (dst.first()->is_stack()) {
  2166       // Even though a string arg in a register is still live after this loop
  2167       // after the string conversion loop (next) it will be dead so we take
  2168       // advantage of that now for simpler code to manage live.
  2170       live[src_reg] = false;
  2171       switch (in_sig_bt[j_arg]) {
  2173         case T_ARRAY:
  2174         case T_OBJECT:
  2176             Address stack_dst(rsp, reg2offset_out(dst.first()));
  2178             if (out_sig_bt[c_arg] == T_INT || out_sig_bt[c_arg] == T_LONG) {
  2179               // need to unbox a one-word value
  2180               Register in_reg = rax;
  2181               if ( src.first()->is_reg() ) {
  2182                 in_reg = src.first()->as_Register();
  2183               } else {
  2184                 __ movq(rax, Address(rbp, reg2offset_in(src.first())));
  2185                 rax_is_zero = false;
  2187               Label skipUnbox;
  2188               __ movptr(Address(rsp, reg2offset_out(dst.first())),
  2189                         (int32_t)NULL_WORD);
  2190               __ testq(in_reg, in_reg);
  2191               __ jcc(Assembler::zero, skipUnbox);
  2193               BasicType bt = out_sig_bt[c_arg];
  2194               int box_offset = java_lang_boxing_object::value_offset_in_bytes(bt);
  2195               Address src1(in_reg, box_offset);
  2196               if ( bt == T_LONG ) {
  2197                 __ movq(in_reg,  src1);
  2198                 __ movq(stack_dst, in_reg);
  2199                 assert(out_sig_bt[c_arg+1] == T_VOID, "must be");
  2200                 ++c_arg; // skip over T_VOID to keep the loop indices in sync
  2201               } else {
  2202                 __ movl(in_reg,  src1);
  2203                 __ movl(stack_dst, in_reg);
  2206               __ bind(skipUnbox);
  2207             } else if (out_sig_bt[c_arg] != T_ADDRESS) {
  2208               // Convert the arg to NULL
  2209               if (!rax_is_zero) {
  2210                 __ xorq(rax, rax);
  2211                 rax_is_zero = true;
  2213               __ movq(stack_dst, rax);
  2216           break;
  2218         case T_VOID:
  2219           break;
  2221         case T_FLOAT:
  2222           // This does the right thing since we know it is destined for the
  2223           // stack
  2224           float_move(masm, src, dst);
  2225           break;
  2227         case T_DOUBLE:
  2228           // This does the right thing since we know it is destined for the
  2229           // stack
  2230           double_move(masm, src, dst);
  2231           break;
  2233         case T_LONG :
  2234           long_move(masm, src, dst);
  2235           break;
  2237         case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
  2239         default:
  2240           move32_64(masm, src, dst);
  2246   // If we have any strings we must store any register based arg to the stack
  2247   // This includes any still live xmm registers too.
  2249   int sid = 0;
  2251   if (total_strings > 0 ) {
  2252     for (j_arg = first_arg_to_pass, c_arg = 0 ;
  2253          j_arg < total_args_passed ; j_arg++, c_arg++ ) {
  2254       VMRegPair src = in_regs[j_arg];
  2255       VMRegPair dst = out_regs[c_arg];
  2257       if (src.first()->is_reg()) {
  2258         Address src_tmp(rbp, fp_offset[src.first()->value()]);
  2260         // string oops were left untouched by the previous loop even if the
  2261         // eventual (converted) arg is destined for the stack so park them
  2262         // away now (except for first)
  2264         if (out_sig_bt[c_arg] == T_ADDRESS) {
  2265           Address utf8_addr = Address(
  2266               rsp, string_locs[sid++] * VMRegImpl::stack_slot_size);
  2267           if (sid != 1) {
  2268             // The first string arg won't be killed until after the utf8
  2269             // conversion
  2270             __ movq(utf8_addr, src.first()->as_Register());
  2272         } else if (dst.first()->is_reg()) {
  2273           if (in_sig_bt[j_arg] == T_FLOAT || in_sig_bt[j_arg] == T_DOUBLE) {
  2275             // Convert the xmm register to an int and store it in the reserved
  2276             // location for the eventual c register arg
  2277             XMMRegister f = src.first()->as_XMMRegister();
  2278             if (in_sig_bt[j_arg] == T_FLOAT) {
  2279               __ movflt(src_tmp, f);
  2280             } else {
  2281               __ movdbl(src_tmp, f);
  2283           } else {
  2284             // If the arg is an oop type we don't support don't bother to store
  2285             // it remember string was handled above.
  2286             bool useless =  in_sig_bt[j_arg] == T_ARRAY ||
  2287                             (in_sig_bt[j_arg] == T_OBJECT &&
  2288                              out_sig_bt[c_arg] != T_INT &&
  2289                              out_sig_bt[c_arg] != T_LONG);
  2291             if (!useless) {
  2292               __ movq(src_tmp, src.first()->as_Register());
  2297       if (in_sig_bt[j_arg] == T_OBJECT && out_sig_bt[c_arg] == T_LONG) {
  2298         assert(out_sig_bt[c_arg+1] == T_VOID, "must be");
  2299         ++c_arg; // skip over T_VOID to keep the loop indices in sync
  2303     // Now that the volatile registers are safe, convert all the strings
  2304     sid = 0;
  2306     for (j_arg = first_arg_to_pass, c_arg = 0 ;
  2307          j_arg < total_args_passed ; j_arg++, c_arg++ ) {
  2308       if (out_sig_bt[c_arg] == T_ADDRESS) {
  2309         // It's a string
  2310         Address utf8_addr = Address(
  2311             rsp, string_locs[sid++] * VMRegImpl::stack_slot_size);
  2312         // The first string we find might still be in the original java arg
  2313         // register
  2315         VMReg src = in_regs[j_arg].first();
  2317         // We will need to eventually save the final argument to the trap
  2318         // in the von-volatile location dedicated to src. This is the offset
  2319         // from fp we will use.
  2320         int src_off = src->is_reg() ?
  2321             fp_offset[src->value()] : reg2offset_in(src);
  2323         // This is where the argument will eventually reside
  2324         VMRegPair dst = out_regs[c_arg];
  2326         if (src->is_reg()) {
  2327           if (sid == 1) {
  2328             __ movq(c_rarg0, src->as_Register());
  2329           } else {
  2330             __ movq(c_rarg0, utf8_addr);
  2332         } else {
  2333           // arg is still in the original location
  2334           __ movq(c_rarg0, Address(rbp, reg2offset_in(src)));
  2336         Label done, convert;
  2338         // see if the oop is NULL
  2339         __ testq(c_rarg0, c_rarg0);
  2340         __ jcc(Assembler::notEqual, convert);
  2342         if (dst.first()->is_reg()) {
  2343           // Save the ptr to utf string in the origina src loc or the tmp
  2344           // dedicated to it
  2345           __ movq(Address(rbp, src_off), c_rarg0);
  2346         } else {
  2347           __ movq(Address(rsp, reg2offset_out(dst.first())), c_rarg0);
  2349         __ jmp(done);
  2351         __ bind(convert);
  2353         __ lea(c_rarg1, utf8_addr);
  2354         if (dst.first()->is_reg()) {
  2355           __ movq(Address(rbp, src_off), c_rarg1);
  2356         } else {
  2357           __ movq(Address(rsp, reg2offset_out(dst.first())), c_rarg1);
  2359         // And do the conversion
  2360         __ call(RuntimeAddress(
  2361                 CAST_FROM_FN_PTR(address, SharedRuntime::get_utf)));
  2363         __ bind(done);
  2365       if (in_sig_bt[j_arg] == T_OBJECT && out_sig_bt[c_arg] == T_LONG) {
  2366         assert(out_sig_bt[c_arg+1] == T_VOID, "must be");
  2367         ++c_arg; // skip over T_VOID to keep the loop indices in sync
  2370     // The get_utf call killed all the c_arg registers
  2371     live[c_rarg0->as_VMReg()->value()] = false;
  2372     live[c_rarg1->as_VMReg()->value()] = false;
  2373     live[c_rarg2->as_VMReg()->value()] = false;
  2374     live[c_rarg3->as_VMReg()->value()] = false;
  2375     live[c_rarg4->as_VMReg()->value()] = false;
  2376     live[c_rarg5->as_VMReg()->value()] = false;
  2378     live[c_farg0->as_VMReg()->value()] = false;
  2379     live[c_farg1->as_VMReg()->value()] = false;
  2380     live[c_farg2->as_VMReg()->value()] = false;
  2381     live[c_farg3->as_VMReg()->value()] = false;
  2382     live[c_farg4->as_VMReg()->value()] = false;
  2383     live[c_farg5->as_VMReg()->value()] = false;
  2384     live[c_farg6->as_VMReg()->value()] = false;
  2385     live[c_farg7->as_VMReg()->value()] = false;
  2388   // Now we can finally move the register args to their desired locations
  2390   rax_is_zero = false;
  2392   for (j_arg = first_arg_to_pass, c_arg = 0 ;
  2393        j_arg < total_args_passed ; j_arg++, c_arg++ ) {
  2395     VMRegPair src = in_regs[j_arg];
  2396     VMRegPair dst = out_regs[c_arg];
  2398     // Only need to look for args destined for the interger registers (since we
  2399     // convert float/double args to look like int/long outbound)
  2400     if (dst.first()->is_reg()) {
  2401       Register r =  dst.first()->as_Register();
  2403       // Check if the java arg is unsupported and thereofre useless
  2404       bool useless =  in_sig_bt[j_arg] == T_ARRAY ||
  2405                       (in_sig_bt[j_arg] == T_OBJECT &&
  2406                        out_sig_bt[c_arg] != T_INT &&
  2407                        out_sig_bt[c_arg] != T_ADDRESS &&
  2408                        out_sig_bt[c_arg] != T_LONG);
  2411       // If we're going to kill an existing arg save it first
  2412       if (live[dst.first()->value()]) {
  2413         // you can't kill yourself
  2414         if (src.first() != dst.first()) {
  2415           __ movq(Address(rbp, fp_offset[dst.first()->value()]), r);
  2418       if (src.first()->is_reg()) {
  2419         if (live[src.first()->value()] ) {
  2420           if (in_sig_bt[j_arg] == T_FLOAT) {
  2421             __ movdl(r, src.first()->as_XMMRegister());
  2422           } else if (in_sig_bt[j_arg] == T_DOUBLE) {
  2423             __ movdq(r, src.first()->as_XMMRegister());
  2424           } else if (r != src.first()->as_Register()) {
  2425             if (!useless) {
  2426               __ movq(r, src.first()->as_Register());
  2429         } else {
  2430           // If the arg is an oop type we don't support don't bother to store
  2431           // it
  2432           if (!useless) {
  2433             if (in_sig_bt[j_arg] == T_DOUBLE ||
  2434                 in_sig_bt[j_arg] == T_LONG  ||
  2435                 in_sig_bt[j_arg] == T_OBJECT ) {
  2436               __ movq(r, Address(rbp, fp_offset[src.first()->value()]));
  2437             } else {
  2438               __ movl(r, Address(rbp, fp_offset[src.first()->value()]));
  2442         live[src.first()->value()] = false;
  2443       } else if (!useless) {
  2444         // full sized move even for int should be ok
  2445         __ movq(r, Address(rbp, reg2offset_in(src.first())));
  2448       // At this point r has the original java arg in the final location
  2449       // (assuming it wasn't useless). If the java arg was an oop
  2450       // we have a bit more to do
  2452       if (in_sig_bt[j_arg] == T_ARRAY || in_sig_bt[j_arg] == T_OBJECT ) {
  2453         if (out_sig_bt[c_arg] == T_INT || out_sig_bt[c_arg] == T_LONG) {
  2454           // need to unbox a one-word value
  2455           Label skip;
  2456           __ testq(r, r);
  2457           __ jcc(Assembler::equal, skip);
  2458           BasicType bt = out_sig_bt[c_arg];
  2459           int box_offset = java_lang_boxing_object::value_offset_in_bytes(bt);
  2460           Address src1(r, box_offset);
  2461           if ( bt == T_LONG ) {
  2462             __ movq(r, src1);
  2463           } else {
  2464             __ movl(r, src1);
  2466           __ bind(skip);
  2468         } else if (out_sig_bt[c_arg] != T_ADDRESS) {
  2469           // Convert the arg to NULL
  2470           __ xorq(r, r);
  2474       // dst can longer be holding an input value
  2475       live[dst.first()->value()] = false;
  2477     if (in_sig_bt[j_arg] == T_OBJECT && out_sig_bt[c_arg] == T_LONG) {
  2478       assert(out_sig_bt[c_arg+1] == T_VOID, "must be");
  2479       ++c_arg; // skip over T_VOID to keep the loop indices in sync
  2484   // Ok now we are done. Need to place the nop that dtrace wants in order to
  2485   // patch in the trap
  2486   int patch_offset = ((intptr_t)__ pc()) - start;
  2488   __ nop();
  2491   // Return
  2493   __ leave();
  2494   __ ret(0);
  2496   __ flush();
  2498   nmethod *nm = nmethod::new_dtrace_nmethod(
  2499       method, masm->code(), vep_offset, patch_offset, frame_complete,
  2500       stack_slots / VMRegImpl::slots_per_word);
  2501   return nm;
  2505 #endif // HAVE_DTRACE_H
  2507 // this function returns the adjust size (in number of words) to a c2i adapter
  2508 // activation for use during deoptimization
  2509 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals ) {
  2510   return (callee_locals - callee_parameters) * Interpreter::stackElementWords;
  2514 uint SharedRuntime::out_preserve_stack_slots() {
  2515   return 0;
  2519 //------------------------------generate_deopt_blob----------------------------
  2520 void SharedRuntime::generate_deopt_blob() {
  2521   // Allocate space for the code
  2522   ResourceMark rm;
  2523   // Setup code generation tools
  2524   CodeBuffer buffer("deopt_blob", 2048, 1024);
  2525   MacroAssembler* masm = new MacroAssembler(&buffer);
  2526   int frame_size_in_words;
  2527   OopMap* map = NULL;
  2528   OopMapSet *oop_maps = new OopMapSet();
  2530   // -------------
  2531   // This code enters when returning to a de-optimized nmethod.  A return
  2532   // address has been pushed on the the stack, and return values are in
  2533   // registers.
  2534   // If we are doing a normal deopt then we were called from the patched
  2535   // nmethod from the point we returned to the nmethod. So the return
  2536   // address on the stack is wrong by NativeCall::instruction_size
  2537   // We will adjust the value so it looks like we have the original return
  2538   // address on the stack (like when we eagerly deoptimized).
  2539   // In the case of an exception pending when deoptimizing, we enter
  2540   // with a return address on the stack that points after the call we patched
  2541   // into the exception handler. We have the following register state from,
  2542   // e.g., the forward exception stub (see stubGenerator_x86_64.cpp).
  2543   //    rax: exception oop
  2544   //    rbx: exception handler
  2545   //    rdx: throwing pc
  2546   // So in this case we simply jam rdx into the useless return address and
  2547   // the stack looks just like we want.
  2548   //
  2549   // At this point we need to de-opt.  We save the argument return
  2550   // registers.  We call the first C routine, fetch_unroll_info().  This
  2551   // routine captures the return values and returns a structure which
  2552   // describes the current frame size and the sizes of all replacement frames.
  2553   // The current frame is compiled code and may contain many inlined
  2554   // functions, each with their own JVM state.  We pop the current frame, then
  2555   // push all the new frames.  Then we call the C routine unpack_frames() to
  2556   // populate these frames.  Finally unpack_frames() returns us the new target
  2557   // address.  Notice that callee-save registers are BLOWN here; they have
  2558   // already been captured in the vframeArray at the time the return PC was
  2559   // patched.
  2560   address start = __ pc();
  2561   Label cont;
  2563   // Prolog for non exception case!
  2565   // Save everything in sight.
  2566   map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
  2568   // Normal deoptimization.  Save exec mode for unpack_frames.
  2569   __ movl(r14, Deoptimization::Unpack_deopt); // callee-saved
  2570   __ jmp(cont);
  2572   int reexecute_offset = __ pc() - start;
  2574   // Reexecute case
  2575   // return address is the pc describes what bci to do re-execute at
  2577   // No need to update map as each call to save_live_registers will produce identical oopmap
  2578   (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
  2580   __ movl(r14, Deoptimization::Unpack_reexecute); // callee-saved
  2581   __ jmp(cont);
  2583   int exception_offset = __ pc() - start;
  2585   // Prolog for exception case
  2587   // all registers are dead at this entry point, except for rax, and
  2588   // rdx which contain the exception oop and exception pc
  2589   // respectively.  Set them in TLS and fall thru to the
  2590   // unpack_with_exception_in_tls entry point.
  2592   __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), rdx);
  2593   __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()), rax);
  2595   int exception_in_tls_offset = __ pc() - start;
  2597   // new implementation because exception oop is now passed in JavaThread
  2599   // Prolog for exception case
  2600   // All registers must be preserved because they might be used by LinearScan
  2601   // Exceptiop oop and throwing PC are passed in JavaThread
  2602   // tos: stack at point of call to method that threw the exception (i.e. only
  2603   // args are on the stack, no return address)
  2605   // make room on stack for the return address
  2606   // It will be patched later with the throwing pc. The correct value is not
  2607   // available now because loading it from memory would destroy registers.
  2608   __ push(0);
  2610   // Save everything in sight.
  2611   map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
  2613   // Now it is safe to overwrite any register
  2615   // Deopt during an exception.  Save exec mode for unpack_frames.
  2616   __ movl(r14, Deoptimization::Unpack_exception); // callee-saved
  2618   // load throwing pc from JavaThread and patch it as the return address
  2619   // of the current frame. Then clear the field in JavaThread
  2621   __ movptr(rdx, Address(r15_thread, JavaThread::exception_pc_offset()));
  2622   __ movptr(Address(rbp, wordSize), rdx);
  2623   __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD);
  2625 #ifdef ASSERT
  2626   // verify that there is really an exception oop in JavaThread
  2627   __ movptr(rax, Address(r15_thread, JavaThread::exception_oop_offset()));
  2628   __ verify_oop(rax);
  2630   // verify that there is no pending exception
  2631   Label no_pending_exception;
  2632   __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset()));
  2633   __ testptr(rax, rax);
  2634   __ jcc(Assembler::zero, no_pending_exception);
  2635   __ stop("must not have pending exception here");
  2636   __ bind(no_pending_exception);
  2637 #endif
  2639   __ bind(cont);
  2641   // Call C code.  Need thread and this frame, but NOT official VM entry
  2642   // crud.  We cannot block on this call, no GC can happen.
  2643   //
  2644   // UnrollBlock* fetch_unroll_info(JavaThread* thread)
  2646   // fetch_unroll_info needs to call last_java_frame().
  2648   __ set_last_Java_frame(noreg, noreg, NULL);
  2649 #ifdef ASSERT
  2650   { Label L;
  2651     __ cmpptr(Address(r15_thread,
  2652                     JavaThread::last_Java_fp_offset()),
  2653             (int32_t)0);
  2654     __ jcc(Assembler::equal, L);
  2655     __ stop("SharedRuntime::generate_deopt_blob: last_Java_fp not cleared");
  2656     __ bind(L);
  2658 #endif // ASSERT
  2659   __ mov(c_rarg0, r15_thread);
  2660   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info)));
  2662   // Need to have an oopmap that tells fetch_unroll_info where to
  2663   // find any register it might need.
  2664   oop_maps->add_gc_map(__ pc() - start, map);
  2666   __ reset_last_Java_frame(false, false);
  2668   // Load UnrollBlock* into rdi
  2669   __ mov(rdi, rax);
  2671    Label noException;
  2672   __ cmpl(r14, Deoptimization::Unpack_exception);   // Was exception pending?
  2673   __ jcc(Assembler::notEqual, noException);
  2674   __ movptr(rax, Address(r15_thread, JavaThread::exception_oop_offset()));
  2675   // QQQ this is useless it was NULL above
  2676   __ movptr(rdx, Address(r15_thread, JavaThread::exception_pc_offset()));
  2677   __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()), (int32_t)NULL_WORD);
  2678   __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD);
  2680   __ verify_oop(rax);
  2682   // Overwrite the result registers with the exception results.
  2683   __ movptr(Address(rsp, RegisterSaver::rax_offset_in_bytes()), rax);
  2684   // I think this is useless
  2685   __ movptr(Address(rsp, RegisterSaver::rdx_offset_in_bytes()), rdx);
  2687   __ bind(noException);
  2689   // Only register save data is on the stack.
  2690   // Now restore the result registers.  Everything else is either dead
  2691   // or captured in the vframeArray.
  2692   RegisterSaver::restore_result_registers(masm);
  2694   // All of the register save area has been popped of the stack. Only the
  2695   // return address remains.
  2697   // Pop all the frames we must move/replace.
  2698   //
  2699   // Frame picture (youngest to oldest)
  2700   // 1: self-frame (no frame link)
  2701   // 2: deopting frame  (no frame link)
  2702   // 3: caller of deopting frame (could be compiled/interpreted).
  2703   //
  2704   // Note: by leaving the return address of self-frame on the stack
  2705   // and using the size of frame 2 to adjust the stack
  2706   // when we are done the return to frame 3 will still be on the stack.
  2708   // Pop deoptimized frame
  2709   __ movl(rcx, Address(rdi, Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset_in_bytes()));
  2710   __ addptr(rsp, rcx);
  2712   // rsp should be pointing at the return address to the caller (3)
  2714   // Stack bang to make sure there's enough room for these interpreter frames.
  2715   if (UseStackBanging) {
  2716     __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
  2717     __ bang_stack_size(rbx, rcx);
  2720   // Load address of array of frame pcs into rcx
  2721   __ movptr(rcx, Address(rdi, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
  2723   // Trash the old pc
  2724   __ addptr(rsp, wordSize);
  2726   // Load address of array of frame sizes into rsi
  2727   __ movptr(rsi, Address(rdi, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes()));
  2729   // Load counter into rdx
  2730   __ movl(rdx, Address(rdi, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes()));
  2732   // Pick up the initial fp we should save
  2733   __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
  2735   // Now adjust the caller's stack to make up for the extra locals
  2736   // but record the original sp so that we can save it in the skeletal interpreter
  2737   // frame and the stack walking of interpreter_sender will get the unextended sp
  2738   // value and not the "real" sp value.
  2740   const Register sender_sp = r8;
  2742   __ mov(sender_sp, rsp);
  2743   __ movl(rbx, Address(rdi,
  2744                        Deoptimization::UnrollBlock::
  2745                        caller_adjustment_offset_in_bytes()));
  2746   __ subptr(rsp, rbx);
  2748   // Push interpreter frames in a loop
  2749   Label loop;
  2750   __ bind(loop);
  2751   __ movptr(rbx, Address(rsi, 0));      // Load frame size
  2752 #ifdef CC_INTERP
  2753   __ subptr(rbx, 4*wordSize);           // we'll push pc and ebp by hand and
  2754 #ifdef ASSERT
  2755   __ push(0xDEADDEAD);                  // Make a recognizable pattern
  2756   __ push(0xDEADDEAD);
  2757 #else /* ASSERT */
  2758   __ subptr(rsp, 2*wordSize);           // skip the "static long no_param"
  2759 #endif /* ASSERT */
  2760 #else
  2761   __ subptr(rbx, 2*wordSize);           // We'll push pc and ebp by hand
  2762 #endif // CC_INTERP
  2763   __ pushptr(Address(rcx, 0));          // Save return address
  2764   __ enter();                           // Save old & set new ebp
  2765   __ subptr(rsp, rbx);                  // Prolog
  2766 #ifdef CC_INTERP
  2767   __ movptr(Address(rbp,
  2768                   -(sizeof(BytecodeInterpreter)) + in_bytes(byte_offset_of(BytecodeInterpreter, _sender_sp))),
  2769             sender_sp); // Make it walkable
  2770 #else /* CC_INTERP */
  2771   // This value is corrected by layout_activation_impl
  2772   __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD );
  2773   __ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize), sender_sp); // Make it walkable
  2774 #endif /* CC_INTERP */
  2775   __ mov(sender_sp, rsp);               // Pass sender_sp to next frame
  2776   __ addptr(rsi, wordSize);             // Bump array pointer (sizes)
  2777   __ addptr(rcx, wordSize);             // Bump array pointer (pcs)
  2778   __ decrementl(rdx);                   // Decrement counter
  2779   __ jcc(Assembler::notZero, loop);
  2780   __ pushptr(Address(rcx, 0));          // Save final return address
  2782   // Re-push self-frame
  2783   __ enter();                           // Save old & set new ebp
  2785   // Allocate a full sized register save area.
  2786   // Return address and rbp are in place, so we allocate two less words.
  2787   __ subptr(rsp, (frame_size_in_words - 2) * wordSize);
  2789   // Restore frame locals after moving the frame
  2790   __ movdbl(Address(rsp, RegisterSaver::xmm0_offset_in_bytes()), xmm0);
  2791   __ movptr(Address(rsp, RegisterSaver::rax_offset_in_bytes()), rax);
  2793   // Call C code.  Need thread but NOT official VM entry
  2794   // crud.  We cannot block on this call, no GC can happen.  Call should
  2795   // restore return values to their stack-slots with the new SP.
  2796   //
  2797   // void Deoptimization::unpack_frames(JavaThread* thread, int exec_mode)
  2799   // Use rbp because the frames look interpreted now
  2800   __ set_last_Java_frame(noreg, rbp, NULL);
  2802   __ mov(c_rarg0, r15_thread);
  2803   __ movl(c_rarg1, r14); // second arg: exec_mode
  2804   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
  2806   // Set an oopmap for the call site
  2807   oop_maps->add_gc_map(__ pc() - start,
  2808                        new OopMap( frame_size_in_words, 0 ));
  2810   __ reset_last_Java_frame(true, false);
  2812   // Collect return values
  2813   __ movdbl(xmm0, Address(rsp, RegisterSaver::xmm0_offset_in_bytes()));
  2814   __ movptr(rax, Address(rsp, RegisterSaver::rax_offset_in_bytes()));
  2815   // I think this is useless (throwing pc?)
  2816   __ movptr(rdx, Address(rsp, RegisterSaver::rdx_offset_in_bytes()));
  2818   // Pop self-frame.
  2819   __ leave();                           // Epilog
  2821   // Jump to interpreter
  2822   __ ret(0);
  2824   // Make sure all code is generated
  2825   masm->flush();
  2827   _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_in_words);
  2828   _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
  2831 #ifdef COMPILER2
  2832 //------------------------------generate_uncommon_trap_blob--------------------
  2833 void SharedRuntime::generate_uncommon_trap_blob() {
  2834   // Allocate space for the code
  2835   ResourceMark rm;
  2836   // Setup code generation tools
  2837   CodeBuffer buffer("uncommon_trap_blob", 2048, 1024);
  2838   MacroAssembler* masm = new MacroAssembler(&buffer);
  2840   assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned");
  2842   address start = __ pc();
  2844   // Push self-frame.  We get here with a return address on the
  2845   // stack, so rsp is 8-byte aligned until we allocate our frame.
  2846   __ subptr(rsp, SimpleRuntimeFrame::return_off << LogBytesPerInt); // Epilog!
  2848   // No callee saved registers. rbp is assumed implicitly saved
  2849   __ movptr(Address(rsp, SimpleRuntimeFrame::rbp_off << LogBytesPerInt), rbp);
  2851   // compiler left unloaded_class_index in j_rarg0 move to where the
  2852   // runtime expects it.
  2853   __ movl(c_rarg1, j_rarg0);
  2855   __ set_last_Java_frame(noreg, noreg, NULL);
  2857   // Call C code.  Need thread but NOT official VM entry
  2858   // crud.  We cannot block on this call, no GC can happen.  Call should
  2859   // capture callee-saved registers as well as return values.
  2860   // Thread is in rdi already.
  2861   //
  2862   // UnrollBlock* uncommon_trap(JavaThread* thread, jint unloaded_class_index);
  2864   __ mov(c_rarg0, r15_thread);
  2865   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap)));
  2867   // Set an oopmap for the call site
  2868   OopMapSet* oop_maps = new OopMapSet();
  2869   OopMap* map = new OopMap(SimpleRuntimeFrame::framesize, 0);
  2871   // location of rbp is known implicitly by the frame sender code
  2873   oop_maps->add_gc_map(__ pc() - start, map);
  2875   __ reset_last_Java_frame(false, false);
  2877   // Load UnrollBlock* into rdi
  2878   __ mov(rdi, rax);
  2880   // Pop all the frames we must move/replace.
  2881   //
  2882   // Frame picture (youngest to oldest)
  2883   // 1: self-frame (no frame link)
  2884   // 2: deopting frame  (no frame link)
  2885   // 3: caller of deopting frame (could be compiled/interpreted).
  2887   // Pop self-frame.  We have no frame, and must rely only on rax and rsp.
  2888   __ addptr(rsp, (SimpleRuntimeFrame::framesize - 2) << LogBytesPerInt); // Epilog!
  2890   // Pop deoptimized frame (int)
  2891   __ movl(rcx, Address(rdi,
  2892                        Deoptimization::UnrollBlock::
  2893                        size_of_deoptimized_frame_offset_in_bytes()));
  2894   __ addptr(rsp, rcx);
  2896   // rsp should be pointing at the return address to the caller (3)
  2898   // Stack bang to make sure there's enough room for these interpreter frames.
  2899   if (UseStackBanging) {
  2900     __ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
  2901     __ bang_stack_size(rbx, rcx);
  2904   // Load address of array of frame pcs into rcx (address*)
  2905   __ movptr(rcx,
  2906             Address(rdi,
  2907                     Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
  2909   // Trash the return pc
  2910   __ addptr(rsp, wordSize);
  2912   // Load address of array of frame sizes into rsi (intptr_t*)
  2913   __ movptr(rsi, Address(rdi,
  2914                          Deoptimization::UnrollBlock::
  2915                          frame_sizes_offset_in_bytes()));
  2917   // Counter
  2918   __ movl(rdx, Address(rdi,
  2919                        Deoptimization::UnrollBlock::
  2920                        number_of_frames_offset_in_bytes())); // (int)
  2922   // Pick up the initial fp we should save
  2923   __ movptr(rbp,
  2924             Address(rdi,
  2925                     Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
  2927   // Now adjust the caller's stack to make up for the extra locals but
  2928   // record the original sp so that we can save it in the skeletal
  2929   // interpreter frame and the stack walking of interpreter_sender
  2930   // will get the unextended sp value and not the "real" sp value.
  2932   const Register sender_sp = r8;
  2934   __ mov(sender_sp, rsp);
  2935   __ movl(rbx, Address(rdi,
  2936                        Deoptimization::UnrollBlock::
  2937                        caller_adjustment_offset_in_bytes())); // (int)
  2938   __ subptr(rsp, rbx);
  2940   // Push interpreter frames in a loop
  2941   Label loop;
  2942   __ bind(loop);
  2943   __ movptr(rbx, Address(rsi, 0)); // Load frame size
  2944   __ subptr(rbx, 2 * wordSize);    // We'll push pc and rbp by hand
  2945   __ pushptr(Address(rcx, 0));     // Save return address
  2946   __ enter();                      // Save old & set new rbp
  2947   __ subptr(rsp, rbx);             // Prolog
  2948 #ifdef CC_INTERP
  2949   __ movptr(Address(rbp,
  2950                   -(sizeof(BytecodeInterpreter)) + in_bytes(byte_offset_of(BytecodeInterpreter, _sender_sp))),
  2951             sender_sp); // Make it walkable
  2952 #else // CC_INTERP
  2953   __ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize),
  2954             sender_sp);            // Make it walkable
  2955   // This value is corrected by layout_activation_impl
  2956   __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD );
  2957 #endif // CC_INTERP
  2958   __ mov(sender_sp, rsp);          // Pass sender_sp to next frame
  2959   __ addptr(rsi, wordSize);        // Bump array pointer (sizes)
  2960   __ addptr(rcx, wordSize);        // Bump array pointer (pcs)
  2961   __ decrementl(rdx);              // Decrement counter
  2962   __ jcc(Assembler::notZero, loop);
  2963   __ pushptr(Address(rcx, 0));     // Save final return address
  2965   // Re-push self-frame
  2966   __ enter();                 // Save old & set new rbp
  2967   __ subptr(rsp, (SimpleRuntimeFrame::framesize - 4) << LogBytesPerInt);
  2968                               // Prolog
  2970   // Use rbp because the frames look interpreted now
  2971   __ set_last_Java_frame(noreg, rbp, NULL);
  2973   // Call C code.  Need thread but NOT official VM entry
  2974   // crud.  We cannot block on this call, no GC can happen.  Call should
  2975   // restore return values to their stack-slots with the new SP.
  2976   // Thread is in rdi already.
  2977   //
  2978   // BasicType unpack_frames(JavaThread* thread, int exec_mode);
  2980   __ mov(c_rarg0, r15_thread);
  2981   __ movl(c_rarg1, Deoptimization::Unpack_uncommon_trap);
  2982   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
  2984   // Set an oopmap for the call site
  2985   oop_maps->add_gc_map(__ pc() - start, new OopMap(SimpleRuntimeFrame::framesize, 0));
  2987   __ reset_last_Java_frame(true, false);
  2989   // Pop self-frame.
  2990   __ leave();                 // Epilog
  2992   // Jump to interpreter
  2993   __ ret(0);
  2995   // Make sure all code is generated
  2996   masm->flush();
  2998   _uncommon_trap_blob =  UncommonTrapBlob::create(&buffer, oop_maps,
  2999                                                  SimpleRuntimeFrame::framesize >> 1);
  3001 #endif // COMPILER2
  3004 //------------------------------generate_handler_blob------
  3005 //
  3006 // Generate a special Compile2Runtime blob that saves all registers,
  3007 // and setup oopmap.
  3008 //
  3009 SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, bool cause_return) {
  3010   assert(StubRoutines::forward_exception_entry() != NULL,
  3011          "must be generated before");
  3013   ResourceMark rm;
  3014   OopMapSet *oop_maps = new OopMapSet();
  3015   OopMap* map;
  3017   // Allocate space for the code.  Setup code generation tools.
  3018   CodeBuffer buffer("handler_blob", 2048, 1024);
  3019   MacroAssembler* masm = new MacroAssembler(&buffer);
  3021   address start   = __ pc();
  3022   address call_pc = NULL;
  3023   int frame_size_in_words;
  3025   // Make room for return address (or push it again)
  3026   if (!cause_return) {
  3027     __ push(rbx);
  3030   // Save registers, fpu state, and flags
  3031   map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
  3033   // The following is basically a call_VM.  However, we need the precise
  3034   // address of the call in order to generate an oopmap. Hence, we do all the
  3035   // work outselves.
  3037   __ set_last_Java_frame(noreg, noreg, NULL);
  3039   // The return address must always be correct so that frame constructor never
  3040   // sees an invalid pc.
  3042   if (!cause_return) {
  3043     // overwrite the dummy value we pushed on entry
  3044     __ movptr(c_rarg0, Address(r15_thread, JavaThread::saved_exception_pc_offset()));
  3045     __ movptr(Address(rbp, wordSize), c_rarg0);
  3048   // Do the call
  3049   __ mov(c_rarg0, r15_thread);
  3050   __ call(RuntimeAddress(call_ptr));
  3052   // Set an oopmap for the call site.  This oopmap will map all
  3053   // oop-registers and debug-info registers as callee-saved.  This
  3054   // will allow deoptimization at this safepoint to find all possible
  3055   // debug-info recordings, as well as let GC find all oops.
  3057   oop_maps->add_gc_map( __ pc() - start, map);
  3059   Label noException;
  3061   __ reset_last_Java_frame(false, false);
  3063   __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
  3064   __ jcc(Assembler::equal, noException);
  3066   // Exception pending
  3068   RegisterSaver::restore_live_registers(masm);
  3070   __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
  3072   // No exception case
  3073   __ bind(noException);
  3075   // Normal exit, restore registers and exit.
  3076   RegisterSaver::restore_live_registers(masm);
  3078   __ ret(0);
  3080   // Make sure all code is generated
  3081   masm->flush();
  3083   // Fill-out other meta info
  3084   return SafepointBlob::create(&buffer, oop_maps, frame_size_in_words);
  3087 //
  3088 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss
  3089 //
  3090 // Generate a stub that calls into vm to find out the proper destination
  3091 // of a java call. All the argument registers are live at this point
  3092 // but since this is generic code we don't know what they are and the caller
  3093 // must do any gc of the args.
  3094 //
  3095 RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
  3096   assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
  3098   // allocate space for the code
  3099   ResourceMark rm;
  3101   CodeBuffer buffer(name, 1000, 512);
  3102   MacroAssembler* masm                = new MacroAssembler(&buffer);
  3104   int frame_size_in_words;
  3106   OopMapSet *oop_maps = new OopMapSet();
  3107   OopMap* map = NULL;
  3109   int start = __ offset();
  3111   map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
  3113   int frame_complete = __ offset();
  3115   __ set_last_Java_frame(noreg, noreg, NULL);
  3117   __ mov(c_rarg0, r15_thread);
  3119   __ call(RuntimeAddress(destination));
  3122   // Set an oopmap for the call site.
  3123   // We need this not only for callee-saved registers, but also for volatile
  3124   // registers that the compiler might be keeping live across a safepoint.
  3126   oop_maps->add_gc_map( __ offset() - start, map);
  3128   // rax contains the address we are going to jump to assuming no exception got installed
  3130   // clear last_Java_sp
  3131   __ reset_last_Java_frame(false, false);
  3132   // check for pending exceptions
  3133   Label pending;
  3134   __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
  3135   __ jcc(Assembler::notEqual, pending);
  3137   // get the returned methodOop
  3138   __ movptr(rbx, Address(r15_thread, JavaThread::vm_result_offset()));
  3139   __ movptr(Address(rsp, RegisterSaver::rbx_offset_in_bytes()), rbx);
  3141   __ movptr(Address(rsp, RegisterSaver::rax_offset_in_bytes()), rax);
  3143   RegisterSaver::restore_live_registers(masm);
  3145   // We are back the the original state on entry and ready to go.
  3147   __ jmp(rax);
  3149   // Pending exception after the safepoint
  3151   __ bind(pending);
  3153   RegisterSaver::restore_live_registers(masm);
  3155   // exception pending => remove activation and forward to exception handler
  3157   __ movptr(Address(r15_thread, JavaThread::vm_result_offset()), (int)NULL_WORD);
  3159   __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset()));
  3160   __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
  3162   // -------------
  3163   // make sure all code is generated
  3164   masm->flush();
  3166   // return the  blob
  3167   // frame_size_words or bytes??
  3168   return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_words, oop_maps, true);
  3172 #ifdef COMPILER2
  3173 // This is here instead of runtime_x86_64.cpp because it uses SimpleRuntimeFrame
  3174 //
  3175 //------------------------------generate_exception_blob---------------------------
  3176 // creates exception blob at the end
  3177 // Using exception blob, this code is jumped from a compiled method.
  3178 // (see emit_exception_handler in x86_64.ad file)
  3179 //
  3180 // Given an exception pc at a call we call into the runtime for the
  3181 // handler in this method. This handler might merely restore state
  3182 // (i.e. callee save registers) unwind the frame and jump to the
  3183 // exception handler for the nmethod if there is no Java level handler
  3184 // for the nmethod.
  3185 //
  3186 // This code is entered with a jmp.
  3187 //
  3188 // Arguments:
  3189 //   rax: exception oop
  3190 //   rdx: exception pc
  3191 //
  3192 // Results:
  3193 //   rax: exception oop
  3194 //   rdx: exception pc in caller or ???
  3195 //   destination: exception handler of caller
  3196 //
  3197 // Note: the exception pc MUST be at a call (precise debug information)
  3198 //       Registers rax, rdx, rcx, rsi, rdi, r8-r11 are not callee saved.
  3199 //
  3201 void OptoRuntime::generate_exception_blob() {
  3202   assert(!OptoRuntime::is_callee_saved_register(RDX_num), "");
  3203   assert(!OptoRuntime::is_callee_saved_register(RAX_num), "");
  3204   assert(!OptoRuntime::is_callee_saved_register(RCX_num), "");
  3206   assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned");
  3208   // Allocate space for the code
  3209   ResourceMark rm;
  3210   // Setup code generation tools
  3211   CodeBuffer buffer("exception_blob", 2048, 1024);
  3212   MacroAssembler* masm = new MacroAssembler(&buffer);
  3215   address start = __ pc();
  3217   // Exception pc is 'return address' for stack walker
  3218   __ push(rdx);
  3219   __ subptr(rsp, SimpleRuntimeFrame::return_off << LogBytesPerInt); // Prolog
  3221   // Save callee-saved registers.  See x86_64.ad.
  3223   // rbp is an implicitly saved callee saved register (i.e. the calling
  3224   // convention will save restore it in prolog/epilog) Other than that
  3225   // there are no callee save registers now that adapter frames are gone.
  3227   __ movptr(Address(rsp, SimpleRuntimeFrame::rbp_off << LogBytesPerInt), rbp);
  3229   // Store exception in Thread object. We cannot pass any arguments to the
  3230   // handle_exception call, since we do not want to make any assumption
  3231   // about the size of the frame where the exception happened in.
  3232   // c_rarg0 is either rdi (Linux) or rcx (Windows).
  3233   __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()),rax);
  3234   __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), rdx);
  3236   // This call does all the hard work.  It checks if an exception handler
  3237   // exists in the method.
  3238   // If so, it returns the handler address.
  3239   // If not, it prepares for stack-unwinding, restoring the callee-save
  3240   // registers of the frame being removed.
  3241   //
  3242   // address OptoRuntime::handle_exception_C(JavaThread* thread)
  3244   __ set_last_Java_frame(noreg, noreg, NULL);
  3245   __ mov(c_rarg0, r15_thread);
  3246   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, OptoRuntime::handle_exception_C)));
  3248   // Set an oopmap for the call site.  This oopmap will only be used if we
  3249   // are unwinding the stack.  Hence, all locations will be dead.
  3250   // Callee-saved registers will be the same as the frame above (i.e.,
  3251   // handle_exception_stub), since they were restored when we got the
  3252   // exception.
  3254   OopMapSet* oop_maps = new OopMapSet();
  3256   oop_maps->add_gc_map( __ pc()-start, new OopMap(SimpleRuntimeFrame::framesize, 0));
  3258   __ reset_last_Java_frame(false, false);
  3260   // Restore callee-saved registers
  3262   // rbp is an implicitly saved callee saved register (i.e. the calling
  3263   // convention will save restore it in prolog/epilog) Other than that
  3264   // there are no callee save registers no that adapter frames are gone.
  3266   __ movptr(rbp, Address(rsp, SimpleRuntimeFrame::rbp_off << LogBytesPerInt));
  3268   __ addptr(rsp, SimpleRuntimeFrame::return_off << LogBytesPerInt); // Epilog
  3269   __ pop(rdx);                  // No need for exception pc anymore
  3271   // rax: exception handler
  3273   // Restore SP from BP if the exception PC is a MethodHandle call site.
  3274   __ cmpl(Address(r15_thread, JavaThread::is_method_handle_return_offset()), 0);
  3275   __ cmovptr(Assembler::notEqual, rsp, rbp_mh_SP_save);
  3277   // We have a handler in rax (could be deopt blob).
  3278   __ mov(r8, rax);
  3280   // Get the exception oop
  3281   __ movptr(rax, Address(r15_thread, JavaThread::exception_oop_offset()));
  3282   // Get the exception pc in case we are deoptimized
  3283   __ movptr(rdx, Address(r15_thread, JavaThread::exception_pc_offset()));
  3284 #ifdef ASSERT
  3285   __ movptr(Address(r15_thread, JavaThread::exception_handler_pc_offset()), (int)NULL_WORD);
  3286   __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), (int)NULL_WORD);
  3287 #endif
  3288   // Clear the exception oop so GC no longer processes it as a root.
  3289   __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()), (int)NULL_WORD);
  3291   // rax: exception oop
  3292   // r8:  exception handler
  3293   // rdx: exception pc
  3294   // Jump to handler
  3296   __ jmp(r8);
  3298   // Make sure all code is generated
  3299   masm->flush();
  3301   // Set exception blob
  3302   _exception_blob =  ExceptionBlob::create(&buffer, oop_maps, SimpleRuntimeFrame::framesize >> 1);
  3304 #endif // COMPILER2

mercurial