src/cpu/sparc/vm/sharedRuntime_sparc.cpp

Thu, 21 Jul 2011 11:25:07 -0700

author
kvn
date
Thu, 21 Jul 2011 11:25:07 -0700
changeset 3037
3d42f82cd811
parent 2950
cba7b5c2d53f
child 3310
6729bbc1fcd6
permissions
-rw-r--r--

7063628: Use cbcond on T4
Summary: Add new short branch instruction to Hotspot sparc assembler.
Reviewed-by: never, twisti, jrose

     1 /*
     2  * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "asm/assembler.hpp"
    27 #include "assembler_sparc.inline.hpp"
    28 #include "code/debugInfoRec.hpp"
    29 #include "code/icBuffer.hpp"
    30 #include "code/vtableStubs.hpp"
    31 #include "interpreter/interpreter.hpp"
    32 #include "oops/compiledICHolderOop.hpp"
    33 #include "prims/jvmtiRedefineClassesTrace.hpp"
    34 #include "runtime/sharedRuntime.hpp"
    35 #include "runtime/vframeArray.hpp"
    36 #include "vmreg_sparc.inline.hpp"
    37 #ifdef COMPILER1
    38 #include "c1/c1_Runtime1.hpp"
    39 #endif
    40 #ifdef COMPILER2
    41 #include "opto/runtime.hpp"
    42 #endif
    43 #ifdef SHARK
    44 #include "compiler/compileBroker.hpp"
    45 #include "shark/sharkCompiler.hpp"
    46 #endif
    48 #define __ masm->
    51 class RegisterSaver {
    53   // Used for saving volatile registers. This is Gregs, Fregs, I/L/O.
    54   // The Oregs are problematic. In the 32bit build the compiler can
    55   // have O registers live with 64 bit quantities. A window save will
    56   // cut the heads off of the registers. We have to do a very extensive
    57   // stack dance to save and restore these properly.
    59   // Note that the Oregs problem only exists if we block at either a polling
    60   // page exception a compiled code safepoint that was not originally a call
    61   // or deoptimize following one of these kinds of safepoints.
    63   // Lots of registers to save.  For all builds, a window save will preserve
    64   // the %i and %l registers.  For the 32-bit longs-in-two entries and 64-bit
    65   // builds a window-save will preserve the %o registers.  In the LION build
    66   // we need to save the 64-bit %o registers which requires we save them
    67   // before the window-save (as then they become %i registers and get their
    68   // heads chopped off on interrupt).  We have to save some %g registers here
    69   // as well.
    70   enum {
    71     // This frame's save area.  Includes extra space for the native call:
    72     // vararg's layout space and the like.  Briefly holds the caller's
    73     // register save area.
    74     call_args_area = frame::register_save_words_sp_offset +
    75                      frame::memory_parameter_word_sp_offset*wordSize,
    76     // Make sure save locations are always 8 byte aligned.
    77     // can't use round_to because it doesn't produce compile time constant
    78     start_of_extra_save_area = ((call_args_area + 7) & ~7),
    79     g1_offset = start_of_extra_save_area, // g-regs needing saving
    80     g3_offset = g1_offset+8,
    81     g4_offset = g3_offset+8,
    82     g5_offset = g4_offset+8,
    83     o0_offset = g5_offset+8,
    84     o1_offset = o0_offset+8,
    85     o2_offset = o1_offset+8,
    86     o3_offset = o2_offset+8,
    87     o4_offset = o3_offset+8,
    88     o5_offset = o4_offset+8,
    89     start_of_flags_save_area = o5_offset+8,
    90     ccr_offset = start_of_flags_save_area,
    91     fsr_offset = ccr_offset + 8,
    92     d00_offset = fsr_offset+8,  // Start of float save area
    93     register_save_size = d00_offset+8*32
    94   };
    97   public:
    99   static int Oexception_offset() { return o0_offset; };
   100   static int G3_offset() { return g3_offset; };
   101   static int G5_offset() { return g5_offset; };
   102   static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words);
   103   static void restore_live_registers(MacroAssembler* masm);
   105   // During deoptimization only the result register need to be restored
   106   // all the other values have already been extracted.
   108   static void restore_result_registers(MacroAssembler* masm);
   109 };
   111 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words) {
   112   // Record volatile registers as callee-save values in an OopMap so their save locations will be
   113   // propagated to the caller frame's RegisterMap during StackFrameStream construction (needed for
   114   // deoptimization; see compiledVFrame::create_stack_value).  The caller's I, L and O registers
   115   // are saved in register windows - I's and L's in the caller's frame and O's in the stub frame
   116   // (as the stub's I's) when the runtime routine called by the stub creates its frame.
   117   int i;
   118   // Always make the frame size 16 byte aligned.
   119   int frame_size = round_to(additional_frame_words + register_save_size, 16);
   120   // OopMap frame size is in c2 stack slots (sizeof(jint)) not bytes or words
   121   int frame_size_in_slots = frame_size / sizeof(jint);
   122   // CodeBlob frame size is in words.
   123   *total_frame_words = frame_size / wordSize;
   124   // OopMap* map = new OopMap(*total_frame_words, 0);
   125   OopMap* map = new OopMap(frame_size_in_slots, 0);
   127 #if !defined(_LP64)
   129   // Save 64-bit O registers; they will get their heads chopped off on a 'save'.
   130   __ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8);
   131   __ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8);
   132   __ stx(O2, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8);
   133   __ stx(O3, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8);
   134   __ stx(O4, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8);
   135   __ stx(O5, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8);
   136 #endif /* _LP64 */
   138   __ save(SP, -frame_size, SP);
   140 #ifndef _LP64
   141   // Reload the 64 bit Oregs. Although they are now Iregs we load them
   142   // to Oregs here to avoid interrupts cutting off their heads
   144   __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0);
   145   __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1);
   146   __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8, O2);
   147   __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8, O3);
   148   __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8, O4);
   149   __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8, O5);
   151   __ stx(O0, SP, o0_offset+STACK_BIAS);
   152   map->set_callee_saved(VMRegImpl::stack2reg((o0_offset + 4)>>2), O0->as_VMReg());
   154   __ stx(O1, SP, o1_offset+STACK_BIAS);
   156   map->set_callee_saved(VMRegImpl::stack2reg((o1_offset + 4)>>2), O1->as_VMReg());
   158   __ stx(O2, SP, o2_offset+STACK_BIAS);
   159   map->set_callee_saved(VMRegImpl::stack2reg((o2_offset + 4)>>2), O2->as_VMReg());
   161   __ stx(O3, SP, o3_offset+STACK_BIAS);
   162   map->set_callee_saved(VMRegImpl::stack2reg((o3_offset + 4)>>2), O3->as_VMReg());
   164   __ stx(O4, SP, o4_offset+STACK_BIAS);
   165   map->set_callee_saved(VMRegImpl::stack2reg((o4_offset + 4)>>2), O4->as_VMReg());
   167   __ stx(O5, SP, o5_offset+STACK_BIAS);
   168   map->set_callee_saved(VMRegImpl::stack2reg((o5_offset + 4)>>2), O5->as_VMReg());
   169 #endif /* _LP64 */
   172 #ifdef _LP64
   173   int debug_offset = 0;
   174 #else
   175   int debug_offset = 4;
   176 #endif
   177   // Save the G's
   178   __ stx(G1, SP, g1_offset+STACK_BIAS);
   179   map->set_callee_saved(VMRegImpl::stack2reg((g1_offset + debug_offset)>>2), G1->as_VMReg());
   181   __ stx(G3, SP, g3_offset+STACK_BIAS);
   182   map->set_callee_saved(VMRegImpl::stack2reg((g3_offset + debug_offset)>>2), G3->as_VMReg());
   184   __ stx(G4, SP, g4_offset+STACK_BIAS);
   185   map->set_callee_saved(VMRegImpl::stack2reg((g4_offset + debug_offset)>>2), G4->as_VMReg());
   187   __ stx(G5, SP, g5_offset+STACK_BIAS);
   188   map->set_callee_saved(VMRegImpl::stack2reg((g5_offset + debug_offset)>>2), G5->as_VMReg());
   190   // This is really a waste but we'll keep things as they were for now
   191   if (true) {
   192 #ifndef _LP64
   193     map->set_callee_saved(VMRegImpl::stack2reg((o0_offset)>>2), O0->as_VMReg()->next());
   194     map->set_callee_saved(VMRegImpl::stack2reg((o1_offset)>>2), O1->as_VMReg()->next());
   195     map->set_callee_saved(VMRegImpl::stack2reg((o2_offset)>>2), O2->as_VMReg()->next());
   196     map->set_callee_saved(VMRegImpl::stack2reg((o3_offset)>>2), O3->as_VMReg()->next());
   197     map->set_callee_saved(VMRegImpl::stack2reg((o4_offset)>>2), O4->as_VMReg()->next());
   198     map->set_callee_saved(VMRegImpl::stack2reg((o5_offset)>>2), O5->as_VMReg()->next());
   199     map->set_callee_saved(VMRegImpl::stack2reg((g1_offset)>>2), G1->as_VMReg()->next());
   200     map->set_callee_saved(VMRegImpl::stack2reg((g3_offset)>>2), G3->as_VMReg()->next());
   201     map->set_callee_saved(VMRegImpl::stack2reg((g4_offset)>>2), G4->as_VMReg()->next());
   202     map->set_callee_saved(VMRegImpl::stack2reg((g5_offset)>>2), G5->as_VMReg()->next());
   203 #endif /* _LP64 */
   204   }
   207   // Save the flags
   208   __ rdccr( G5 );
   209   __ stx(G5, SP, ccr_offset+STACK_BIAS);
   210   __ stxfsr(SP, fsr_offset+STACK_BIAS);
   212   // Save all the FP registers: 32 doubles (32 floats correspond to the 2 halves of the first 16 doubles)
   213   int offset = d00_offset;
   214   for( int i=0; i<FloatRegisterImpl::number_of_registers; i+=2 ) {
   215     FloatRegister f = as_FloatRegister(i);
   216     __ stf(FloatRegisterImpl::D,  f, SP, offset+STACK_BIAS);
   217     // Record as callee saved both halves of double registers (2 float registers).
   218     map->set_callee_saved(VMRegImpl::stack2reg(offset>>2), f->as_VMReg());
   219     map->set_callee_saved(VMRegImpl::stack2reg((offset + sizeof(float))>>2), f->as_VMReg()->next());
   220     offset += sizeof(double);
   221   }
   223   // And we're done.
   225   return map;
   226 }
   229 // Pop the current frame and restore all the registers that we
   230 // saved.
   231 void RegisterSaver::restore_live_registers(MacroAssembler* masm) {
   233   // Restore all the FP registers
   234   for( int i=0; i<FloatRegisterImpl::number_of_registers; i+=2 ) {
   235     __ ldf(FloatRegisterImpl::D, SP, d00_offset+i*sizeof(float)+STACK_BIAS, as_FloatRegister(i));
   236   }
   238   __ ldx(SP, ccr_offset+STACK_BIAS, G1);
   239   __ wrccr (G1) ;
   241   // Restore the G's
   242   // Note that G2 (AKA GThread) must be saved and restored separately.
   243   // TODO-FIXME: save and restore some of the other ASRs, viz., %asi and %gsr.
   245   __ ldx(SP, g1_offset+STACK_BIAS, G1);
   246   __ ldx(SP, g3_offset+STACK_BIAS, G3);
   247   __ ldx(SP, g4_offset+STACK_BIAS, G4);
   248   __ ldx(SP, g5_offset+STACK_BIAS, G5);
   251 #if !defined(_LP64)
   252   // Restore the 64-bit O's.
   253   __ ldx(SP, o0_offset+STACK_BIAS, O0);
   254   __ ldx(SP, o1_offset+STACK_BIAS, O1);
   255   __ ldx(SP, o2_offset+STACK_BIAS, O2);
   256   __ ldx(SP, o3_offset+STACK_BIAS, O3);
   257   __ ldx(SP, o4_offset+STACK_BIAS, O4);
   258   __ ldx(SP, o5_offset+STACK_BIAS, O5);
   260   // And temporarily place them in TLS
   262   __ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8);
   263   __ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8);
   264   __ stx(O2, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8);
   265   __ stx(O3, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8);
   266   __ stx(O4, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8);
   267   __ stx(O5, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8);
   268 #endif /* _LP64 */
   270   // Restore flags
   272   __ ldxfsr(SP, fsr_offset+STACK_BIAS);
   274   __ restore();
   276 #if !defined(_LP64)
   277   // Now reload the 64bit Oregs after we've restore the window.
   278   __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0);
   279   __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1);
   280   __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8, O2);
   281   __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8, O3);
   282   __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8, O4);
   283   __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8, O5);
   284 #endif /* _LP64 */
   286 }
   288 // Pop the current frame and restore the registers that might be holding
   289 // a result.
   290 void RegisterSaver::restore_result_registers(MacroAssembler* masm) {
   292 #if !defined(_LP64)
   293   // 32bit build returns longs in G1
   294   __ ldx(SP, g1_offset+STACK_BIAS, G1);
   296   // Retrieve the 64-bit O's.
   297   __ ldx(SP, o0_offset+STACK_BIAS, O0);
   298   __ ldx(SP, o1_offset+STACK_BIAS, O1);
   299   // and save to TLS
   300   __ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8);
   301   __ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8);
   302 #endif /* _LP64 */
   304   __ ldf(FloatRegisterImpl::D, SP, d00_offset+STACK_BIAS, as_FloatRegister(0));
   306   __ restore();
   308 #if !defined(_LP64)
   309   // Now reload the 64bit Oregs after we've restore the window.
   310   __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0);
   311   __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1);
   312 #endif /* _LP64 */
   314 }
   316 // The java_calling_convention describes stack locations as ideal slots on
   317 // a frame with no abi restrictions. Since we must observe abi restrictions
   318 // (like the placement of the register window) the slots must be biased by
   319 // the following value.
   320 static int reg2offset(VMReg r) {
   321   return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
   322 }
   324 // ---------------------------------------------------------------------------
   325 // Read the array of BasicTypes from a signature, and compute where the
   326 // arguments should go.  Values in the VMRegPair regs array refer to 4-byte (VMRegImpl::stack_slot_size)
   327 // quantities.  Values less than VMRegImpl::stack0 are registers, those above
   328 // refer to 4-byte stack slots.  All stack slots are based off of the window
   329 // top.  VMRegImpl::stack0 refers to the first slot past the 16-word window,
   330 // and VMRegImpl::stack0+1 refers to the memory word 4-byes higher.  Register
   331 // values 0-63 (up to RegisterImpl::number_of_registers) are the 64-bit
   332 // integer registers.  Values 64-95 are the (32-bit only) float registers.
   333 // Each 32-bit quantity is given its own number, so the integer registers
   334 // (in either 32- or 64-bit builds) use 2 numbers.  For example, there is
   335 // an O0-low and an O0-high.  Essentially, all int register numbers are doubled.
   337 // Register results are passed in O0-O5, for outgoing call arguments.  To
   338 // convert to incoming arguments, convert all O's to I's.  The regs array
   339 // refer to the low and hi 32-bit words of 64-bit registers or stack slots.
   340 // If the regs[].second() field is set to VMRegImpl::Bad(), it means it's unused (a
   341 // 32-bit value was passed).  If both are VMRegImpl::Bad(), it means no value was
   342 // passed (used as a placeholder for the other half of longs and doubles in
   343 // the 64-bit build).  regs[].second() is either VMRegImpl::Bad() or regs[].second() is
   344 // regs[].first()+1 (regs[].first() may be misaligned in the C calling convention).
   345 // Sparc never passes a value in regs[].second() but not regs[].first() (regs[].first()
   346 // == VMRegImpl::Bad() && regs[].second() != VMRegImpl::Bad()) nor unrelated values in the
   347 // same VMRegPair.
   349 // Note: the INPUTS in sig_bt are in units of Java argument words, which are
   350 // either 32-bit or 64-bit depending on the build.  The OUTPUTS are in 32-bit
   351 // units regardless of build.
   354 // ---------------------------------------------------------------------------
   355 // The compiled Java calling convention.  The Java convention always passes
   356 // 64-bit values in adjacent aligned locations (either registers or stack),
   357 // floats in float registers and doubles in aligned float pairs.  Values are
   358 // packed in the registers.  There is no backing varargs store for values in
   359 // registers.  In the 32-bit build, longs are passed in G1 and G4 (cannot be
   360 // passed in I's, because longs in I's get their heads chopped off at
   361 // interrupt).
   362 int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
   363                                            VMRegPair *regs,
   364                                            int total_args_passed,
   365                                            int is_outgoing) {
   366   assert(F31->as_VMReg()->is_reg(), "overlapping stack/register numbers");
   368   // Convention is to pack the first 6 int/oop args into the first 6 registers
   369   // (I0-I5), extras spill to the stack.  Then pack the first 8 float args
   370   // into F0-F7, extras spill to the stack.  Then pad all register sets to
   371   // align.  Then put longs and doubles into the same registers as they fit,
   372   // else spill to the stack.
   373   const int int_reg_max = SPARC_ARGS_IN_REGS_NUM;
   374   const int flt_reg_max = 8;
   375   //
   376   // Where 32-bit 1-reg longs start being passed
   377   // In tiered we must pass on stack because c1 can't use a "pair" in a single reg.
   378   // So make it look like we've filled all the G regs that c2 wants to use.
   379   Register g_reg = TieredCompilation ? noreg : G1;
   381   // Count int/oop and float args.  See how many stack slots we'll need and
   382   // where the longs & doubles will go.
   383   int int_reg_cnt   = 0;
   384   int flt_reg_cnt   = 0;
   385   // int stk_reg_pairs = frame::register_save_words*(wordSize>>2);
   386   // int stk_reg_pairs = SharedRuntime::out_preserve_stack_slots();
   387   int stk_reg_pairs = 0;
   388   for (int i = 0; i < total_args_passed; i++) {
   389     switch (sig_bt[i]) {
   390     case T_LONG:                // LP64, longs compete with int args
   391       assert(sig_bt[i+1] == T_VOID, "");
   392 #ifdef _LP64
   393       if (int_reg_cnt < int_reg_max) int_reg_cnt++;
   394 #endif
   395       break;
   396     case T_OBJECT:
   397     case T_ARRAY:
   398     case T_ADDRESS: // Used, e.g., in slow-path locking for the lock's stack address
   399       if (int_reg_cnt < int_reg_max) int_reg_cnt++;
   400 #ifndef _LP64
   401       else                            stk_reg_pairs++;
   402 #endif
   403       break;
   404     case T_INT:
   405     case T_SHORT:
   406     case T_CHAR:
   407     case T_BYTE:
   408     case T_BOOLEAN:
   409       if (int_reg_cnt < int_reg_max) int_reg_cnt++;
   410       else                            stk_reg_pairs++;
   411       break;
   412     case T_FLOAT:
   413       if (flt_reg_cnt < flt_reg_max) flt_reg_cnt++;
   414       else                            stk_reg_pairs++;
   415       break;
   416     case T_DOUBLE:
   417       assert(sig_bt[i+1] == T_VOID, "");
   418       break;
   419     case T_VOID:
   420       break;
   421     default:
   422       ShouldNotReachHere();
   423     }
   424   }
   426   // This is where the longs/doubles start on the stack.
   427   stk_reg_pairs = (stk_reg_pairs+1) & ~1; // Round
   429   int int_reg_pairs = (int_reg_cnt+1) & ~1; // 32-bit 2-reg longs only
   430   int flt_reg_pairs = (flt_reg_cnt+1) & ~1;
   432   // int stk_reg = frame::register_save_words*(wordSize>>2);
   433   // int stk_reg = SharedRuntime::out_preserve_stack_slots();
   434   int stk_reg = 0;
   435   int int_reg = 0;
   436   int flt_reg = 0;
   438   // Now do the signature layout
   439   for (int i = 0; i < total_args_passed; i++) {
   440     switch (sig_bt[i]) {
   441     case T_INT:
   442     case T_SHORT:
   443     case T_CHAR:
   444     case T_BYTE:
   445     case T_BOOLEAN:
   446 #ifndef _LP64
   447     case T_OBJECT:
   448     case T_ARRAY:
   449     case T_ADDRESS: // Used, e.g., in slow-path locking for the lock's stack address
   450 #endif // _LP64
   451       if (int_reg < int_reg_max) {
   452         Register r = is_outgoing ? as_oRegister(int_reg++) : as_iRegister(int_reg++);
   453         regs[i].set1(r->as_VMReg());
   454       } else {
   455         regs[i].set1(VMRegImpl::stack2reg(stk_reg++));
   456       }
   457       break;
   459 #ifdef _LP64
   460     case T_OBJECT:
   461     case T_ARRAY:
   462     case T_ADDRESS: // Used, e.g., in slow-path locking for the lock's stack address
   463       if (int_reg < int_reg_max) {
   464         Register r = is_outgoing ? as_oRegister(int_reg++) : as_iRegister(int_reg++);
   465         regs[i].set2(r->as_VMReg());
   466       } else {
   467         regs[i].set2(VMRegImpl::stack2reg(stk_reg_pairs));
   468         stk_reg_pairs += 2;
   469       }
   470       break;
   471 #endif // _LP64
   473     case T_LONG:
   474       assert(sig_bt[i+1] == T_VOID, "expecting VOID in other half");
   475 #ifdef _LP64
   476         if (int_reg < int_reg_max) {
   477           Register r = is_outgoing ? as_oRegister(int_reg++) : as_iRegister(int_reg++);
   478           regs[i].set2(r->as_VMReg());
   479         } else {
   480           regs[i].set2(VMRegImpl::stack2reg(stk_reg_pairs));
   481           stk_reg_pairs += 2;
   482         }
   483 #else
   484 #ifdef COMPILER2
   485         // For 32-bit build, can't pass longs in O-regs because they become
   486         // I-regs and get trashed.  Use G-regs instead.  G1 and G4 are almost
   487         // spare and available.  This convention isn't used by the Sparc ABI or
   488         // anywhere else. If we're tiered then we don't use G-regs because c1
   489         // can't deal with them as a "pair". (Tiered makes this code think g's are filled)
   490         // G0: zero
   491         // G1: 1st Long arg
   492         // G2: global allocated to TLS
   493         // G3: used in inline cache check
   494         // G4: 2nd Long arg
   495         // G5: used in inline cache check
   496         // G6: used by OS
   497         // G7: used by OS
   499         if (g_reg == G1) {
   500           regs[i].set2(G1->as_VMReg()); // This long arg in G1
   501           g_reg = G4;                  // Where the next arg goes
   502         } else if (g_reg == G4) {
   503           regs[i].set2(G4->as_VMReg()); // The 2nd long arg in G4
   504           g_reg = noreg;               // No more longs in registers
   505         } else {
   506           regs[i].set2(VMRegImpl::stack2reg(stk_reg_pairs));
   507           stk_reg_pairs += 2;
   508         }
   509 #else // COMPILER2
   510         if (int_reg_pairs + 1 < int_reg_max) {
   511           if (is_outgoing) {
   512             regs[i].set_pair(as_oRegister(int_reg_pairs + 1)->as_VMReg(), as_oRegister(int_reg_pairs)->as_VMReg());
   513           } else {
   514             regs[i].set_pair(as_iRegister(int_reg_pairs + 1)->as_VMReg(), as_iRegister(int_reg_pairs)->as_VMReg());
   515           }
   516           int_reg_pairs += 2;
   517         } else {
   518           regs[i].set2(VMRegImpl::stack2reg(stk_reg_pairs));
   519           stk_reg_pairs += 2;
   520         }
   521 #endif // COMPILER2
   522 #endif // _LP64
   523       break;
   525     case T_FLOAT:
   526       if (flt_reg < flt_reg_max) regs[i].set1(as_FloatRegister(flt_reg++)->as_VMReg());
   527       else                       regs[i].set1(    VMRegImpl::stack2reg(stk_reg++));
   528       break;
   529     case T_DOUBLE:
   530       assert(sig_bt[i+1] == T_VOID, "expecting half");
   531       if (flt_reg_pairs + 1 < flt_reg_max) {
   532         regs[i].set2(as_FloatRegister(flt_reg_pairs)->as_VMReg());
   533         flt_reg_pairs += 2;
   534       } else {
   535         regs[i].set2(VMRegImpl::stack2reg(stk_reg_pairs));
   536         stk_reg_pairs += 2;
   537       }
   538       break;
   539     case T_VOID: regs[i].set_bad();  break; // Halves of longs & doubles
   540     default:
   541       ShouldNotReachHere();
   542     }
   543   }
   545   // retun the amount of stack space these arguments will need.
   546   return stk_reg_pairs;
   548 }
   550 // Helper class mostly to avoid passing masm everywhere, and handle
   551 // store displacement overflow logic.
   552 class AdapterGenerator {
   553   MacroAssembler *masm;
   554   Register Rdisp;
   555   void set_Rdisp(Register r)  { Rdisp = r; }
   557   void patch_callers_callsite();
   559   // base+st_off points to top of argument
   560   int arg_offset(const int st_off) { return st_off; }
   561   int next_arg_offset(const int st_off) {
   562     return st_off - Interpreter::stackElementSize;
   563   }
   565   // Argument slot values may be loaded first into a register because
   566   // they might not fit into displacement.
   567   RegisterOrConstant arg_slot(const int st_off);
   568   RegisterOrConstant next_arg_slot(const int st_off);
   570   // Stores long into offset pointed to by base
   571   void store_c2i_long(Register r, Register base,
   572                       const int st_off, bool is_stack);
   573   void store_c2i_object(Register r, Register base,
   574                         const int st_off);
   575   void store_c2i_int(Register r, Register base,
   576                      const int st_off);
   577   void store_c2i_double(VMReg r_2,
   578                         VMReg r_1, Register base, const int st_off);
   579   void store_c2i_float(FloatRegister f, Register base,
   580                        const int st_off);
   582  public:
   583   void gen_c2i_adapter(int total_args_passed,
   584                               // VMReg max_arg,
   585                               int comp_args_on_stack, // VMRegStackSlots
   586                               const BasicType *sig_bt,
   587                               const VMRegPair *regs,
   588                               Label& skip_fixup);
   589   void gen_i2c_adapter(int total_args_passed,
   590                               // VMReg max_arg,
   591                               int comp_args_on_stack, // VMRegStackSlots
   592                               const BasicType *sig_bt,
   593                               const VMRegPair *regs);
   595   AdapterGenerator(MacroAssembler *_masm) : masm(_masm) {}
   596 };
   599 // Patch the callers callsite with entry to compiled code if it exists.
   600 void AdapterGenerator::patch_callers_callsite() {
   601   Label L;
   602   __ ld_ptr(G5_method, in_bytes(methodOopDesc::code_offset()), G3_scratch);
   603   __ br_null(G3_scratch, false, Assembler::pt, L);
   604   // Schedule the branch target address early.
   605   __ delayed()->ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), G3_scratch);
   606   // Call into the VM to patch the caller, then jump to compiled callee
   607   __ save_frame(4);     // Args in compiled layout; do not blow them
   609   // Must save all the live Gregs the list is:
   610   // G1: 1st Long arg (32bit build)
   611   // G2: global allocated to TLS
   612   // G3: used in inline cache check (scratch)
   613   // G4: 2nd Long arg (32bit build);
   614   // G5: used in inline cache check (methodOop)
   616   // The longs must go to the stack by hand since in the 32 bit build they can be trashed by window ops.
   618 #ifdef _LP64
   619   // mov(s,d)
   620   __ mov(G1, L1);
   621   __ mov(G4, L4);
   622   __ mov(G5_method, L5);
   623   __ mov(G5_method, O0);         // VM needs target method
   624   __ mov(I7, O1);                // VM needs caller's callsite
   625   // Must be a leaf call...
   626   // can be very far once the blob has been relocated
   627   AddressLiteral dest(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite));
   628   __ relocate(relocInfo::runtime_call_type);
   629   __ jumpl_to(dest, O7, O7);
   630   __ delayed()->mov(G2_thread, L7_thread_cache);
   631   __ mov(L7_thread_cache, G2_thread);
   632   __ mov(L1, G1);
   633   __ mov(L4, G4);
   634   __ mov(L5, G5_method);
   635 #else
   636   __ stx(G1, FP, -8 + STACK_BIAS);
   637   __ stx(G4, FP, -16 + STACK_BIAS);
   638   __ mov(G5_method, L5);
   639   __ mov(G5_method, O0);         // VM needs target method
   640   __ mov(I7, O1);                // VM needs caller's callsite
   641   // Must be a leaf call...
   642   __ call(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite), relocInfo::runtime_call_type);
   643   __ delayed()->mov(G2_thread, L7_thread_cache);
   644   __ mov(L7_thread_cache, G2_thread);
   645   __ ldx(FP, -8 + STACK_BIAS, G1);
   646   __ ldx(FP, -16 + STACK_BIAS, G4);
   647   __ mov(L5, G5_method);
   648   __ ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), G3_scratch);
   649 #endif /* _LP64 */
   651   __ restore();      // Restore args
   652   __ bind(L);
   653 }
   656 RegisterOrConstant AdapterGenerator::arg_slot(const int st_off) {
   657   RegisterOrConstant roc(arg_offset(st_off));
   658   return __ ensure_simm13_or_reg(roc, Rdisp);
   659 }
   661 RegisterOrConstant AdapterGenerator::next_arg_slot(const int st_off) {
   662   RegisterOrConstant roc(next_arg_offset(st_off));
   663   return __ ensure_simm13_or_reg(roc, Rdisp);
   664 }
   667 // Stores long into offset pointed to by base
   668 void AdapterGenerator::store_c2i_long(Register r, Register base,
   669                                       const int st_off, bool is_stack) {
   670 #ifdef _LP64
   671   // In V9, longs are given 2 64-bit slots in the interpreter, but the
   672   // data is passed in only 1 slot.
   673   __ stx(r, base, next_arg_slot(st_off));
   674 #else
   675 #ifdef COMPILER2
   676   // Misaligned store of 64-bit data
   677   __ stw(r, base, arg_slot(st_off));    // lo bits
   678   __ srlx(r, 32, r);
   679   __ stw(r, base, next_arg_slot(st_off));  // hi bits
   680 #else
   681   if (is_stack) {
   682     // Misaligned store of 64-bit data
   683     __ stw(r, base, arg_slot(st_off));    // lo bits
   684     __ srlx(r, 32, r);
   685     __ stw(r, base, next_arg_slot(st_off));  // hi bits
   686   } else {
   687     __ stw(r->successor(), base, arg_slot(st_off)     ); // lo bits
   688     __ stw(r             , base, next_arg_slot(st_off)); // hi bits
   689   }
   690 #endif // COMPILER2
   691 #endif // _LP64
   692 }
   694 void AdapterGenerator::store_c2i_object(Register r, Register base,
   695                       const int st_off) {
   696   __ st_ptr (r, base, arg_slot(st_off));
   697 }
   699 void AdapterGenerator::store_c2i_int(Register r, Register base,
   700                    const int st_off) {
   701   __ st (r, base, arg_slot(st_off));
   702 }
   704 // Stores into offset pointed to by base
   705 void AdapterGenerator::store_c2i_double(VMReg r_2,
   706                       VMReg r_1, Register base, const int st_off) {
   707 #ifdef _LP64
   708   // In V9, doubles are given 2 64-bit slots in the interpreter, but the
   709   // data is passed in only 1 slot.
   710   __ stf(FloatRegisterImpl::D, r_1->as_FloatRegister(), base, next_arg_slot(st_off));
   711 #else
   712   // Need to marshal 64-bit value from misaligned Lesp loads
   713   __ stf(FloatRegisterImpl::S, r_1->as_FloatRegister(), base, next_arg_slot(st_off));
   714   __ stf(FloatRegisterImpl::S, r_2->as_FloatRegister(), base, arg_slot(st_off) );
   715 #endif
   716 }
   718 void AdapterGenerator::store_c2i_float(FloatRegister f, Register base,
   719                                        const int st_off) {
   720   __ stf(FloatRegisterImpl::S, f, base, arg_slot(st_off));
   721 }
   723 void AdapterGenerator::gen_c2i_adapter(
   724                             int total_args_passed,
   725                             // VMReg max_arg,
   726                             int comp_args_on_stack, // VMRegStackSlots
   727                             const BasicType *sig_bt,
   728                             const VMRegPair *regs,
   729                             Label& skip_fixup) {
   731   // Before we get into the guts of the C2I adapter, see if we should be here
   732   // at all.  We've come from compiled code and are attempting to jump to the
   733   // interpreter, which means the caller made a static call to get here
   734   // (vcalls always get a compiled target if there is one).  Check for a
   735   // compiled target.  If there is one, we need to patch the caller's call.
   736   // However we will run interpreted if we come thru here. The next pass
   737   // thru the call site will run compiled. If we ran compiled here then
   738   // we can (theorectically) do endless i2c->c2i->i2c transitions during
   739   // deopt/uncommon trap cycles. If we always go interpreted here then
   740   // we can have at most one and don't need to play any tricks to keep
   741   // from endlessly growing the stack.
   742   //
   743   // Actually if we detected that we had an i2c->c2i transition here we
   744   // ought to be able to reset the world back to the state of the interpreted
   745   // call and not bother building another interpreter arg area. We don't
   746   // do that at this point.
   748   patch_callers_callsite();
   750   __ bind(skip_fixup);
   752   // Since all args are passed on the stack, total_args_passed*wordSize is the
   753   // space we need.  Add in varargs area needed by the interpreter. Round up
   754   // to stack alignment.
   755   const int arg_size = total_args_passed * Interpreter::stackElementSize;
   756   const int varargs_area =
   757                  (frame::varargs_offset - frame::register_save_words)*wordSize;
   758   const int extraspace = round_to(arg_size + varargs_area, 2*wordSize);
   760   int bias = STACK_BIAS;
   761   const int interp_arg_offset = frame::varargs_offset*wordSize +
   762                         (total_args_passed-1)*Interpreter::stackElementSize;
   764   Register base = SP;
   766 #ifdef _LP64
   767   // In the 64bit build because of wider slots and STACKBIAS we can run
   768   // out of bits in the displacement to do loads and stores.  Use g3 as
   769   // temporary displacement.
   770   if (! __ is_simm13(extraspace)) {
   771     __ set(extraspace, G3_scratch);
   772     __ sub(SP, G3_scratch, SP);
   773   } else {
   774     __ sub(SP, extraspace, SP);
   775   }
   776   set_Rdisp(G3_scratch);
   777 #else
   778   __ sub(SP, extraspace, SP);
   779 #endif // _LP64
   781   // First write G1 (if used) to where ever it must go
   782   for (int i=0; i<total_args_passed; i++) {
   783     const int st_off = interp_arg_offset - (i*Interpreter::stackElementSize) + bias;
   784     VMReg r_1 = regs[i].first();
   785     VMReg r_2 = regs[i].second();
   786     if (r_1 == G1_scratch->as_VMReg()) {
   787       if (sig_bt[i] == T_OBJECT || sig_bt[i] == T_ARRAY) {
   788         store_c2i_object(G1_scratch, base, st_off);
   789       } else if (sig_bt[i] == T_LONG) {
   790         assert(!TieredCompilation, "should not use register args for longs");
   791         store_c2i_long(G1_scratch, base, st_off, false);
   792       } else {
   793         store_c2i_int(G1_scratch, base, st_off);
   794       }
   795     }
   796   }
   798   // Now write the args into the outgoing interpreter space
   799   for (int i=0; i<total_args_passed; i++) {
   800     const int st_off = interp_arg_offset - (i*Interpreter::stackElementSize) + bias;
   801     VMReg r_1 = regs[i].first();
   802     VMReg r_2 = regs[i].second();
   803     if (!r_1->is_valid()) {
   804       assert(!r_2->is_valid(), "");
   805       continue;
   806     }
   807     // Skip G1 if found as we did it first in order to free it up
   808     if (r_1 == G1_scratch->as_VMReg()) {
   809       continue;
   810     }
   811 #ifdef ASSERT
   812     bool G1_forced = false;
   813 #endif // ASSERT
   814     if (r_1->is_stack()) {        // Pretend stack targets are loaded into G1
   815 #ifdef _LP64
   816       Register ld_off = Rdisp;
   817       __ set(reg2offset(r_1) + extraspace + bias, ld_off);
   818 #else
   819       int ld_off = reg2offset(r_1) + extraspace + bias;
   820 #endif // _LP64
   821 #ifdef ASSERT
   822       G1_forced = true;
   823 #endif // ASSERT
   824       r_1 = G1_scratch->as_VMReg();// as part of the load/store shuffle
   825       if (!r_2->is_valid()) __ ld (base, ld_off, G1_scratch);
   826       else                  __ ldx(base, ld_off, G1_scratch);
   827     }
   829     if (r_1->is_Register()) {
   830       Register r = r_1->as_Register()->after_restore();
   831       if (sig_bt[i] == T_OBJECT || sig_bt[i] == T_ARRAY) {
   832         store_c2i_object(r, base, st_off);
   833       } else if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
   834 #ifndef _LP64
   835         if (TieredCompilation) {
   836           assert(G1_forced || sig_bt[i] != T_LONG, "should not use register args for longs");
   837         }
   838 #endif // _LP64
   839         store_c2i_long(r, base, st_off, r_2->is_stack());
   840       } else {
   841         store_c2i_int(r, base, st_off);
   842       }
   843     } else {
   844       assert(r_1->is_FloatRegister(), "");
   845       if (sig_bt[i] == T_FLOAT) {
   846         store_c2i_float(r_1->as_FloatRegister(), base, st_off);
   847       } else {
   848         assert(sig_bt[i] == T_DOUBLE, "wrong type");
   849         store_c2i_double(r_2, r_1, base, st_off);
   850       }
   851     }
   852   }
   854 #ifdef _LP64
   855   // Need to reload G3_scratch, used for temporary displacements.
   856   __ ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), G3_scratch);
   858   // Pass O5_savedSP as an argument to the interpreter.
   859   // The interpreter will restore SP to this value before returning.
   860   __ set(extraspace, G1);
   861   __ add(SP, G1, O5_savedSP);
   862 #else
   863   // Pass O5_savedSP as an argument to the interpreter.
   864   // The interpreter will restore SP to this value before returning.
   865   __ add(SP, extraspace, O5_savedSP);
   866 #endif // _LP64
   868   __ mov((frame::varargs_offset)*wordSize -
   869          1*Interpreter::stackElementSize+bias+BytesPerWord, G1);
   870   // Jump to the interpreter just as if interpreter was doing it.
   871   __ jmpl(G3_scratch, 0, G0);
   872   // Setup Lesp for the call.  Cannot actually set Lesp as the current Lesp
   873   // (really L0) is in use by the compiled frame as a generic temp.  However,
   874   // the interpreter does not know where its args are without some kind of
   875   // arg pointer being passed in.  Pass it in Gargs.
   876   __ delayed()->add(SP, G1, Gargs);
   877 }
   879 void AdapterGenerator::gen_i2c_adapter(
   880                             int total_args_passed,
   881                             // VMReg max_arg,
   882                             int comp_args_on_stack, // VMRegStackSlots
   883                             const BasicType *sig_bt,
   884                             const VMRegPair *regs) {
   886   // Generate an I2C adapter: adjust the I-frame to make space for the C-frame
   887   // layout.  Lesp was saved by the calling I-frame and will be restored on
   888   // return.  Meanwhile, outgoing arg space is all owned by the callee
   889   // C-frame, so we can mangle it at will.  After adjusting the frame size,
   890   // hoist register arguments and repack other args according to the compiled
   891   // code convention.  Finally, end in a jump to the compiled code.  The entry
   892   // point address is the start of the buffer.
   894   // We will only enter here from an interpreted frame and never from after
   895   // passing thru a c2i. Azul allowed this but we do not. If we lose the
   896   // race and use a c2i we will remain interpreted for the race loser(s).
   897   // This removes all sorts of headaches on the x86 side and also eliminates
   898   // the possibility of having c2i -> i2c -> c2i -> ... endless transitions.
   900   // As you can see from the list of inputs & outputs there are not a lot
   901   // of temp registers to work with: mostly G1, G3 & G4.
   903   // Inputs:
   904   // G2_thread      - TLS
   905   // G5_method      - Method oop
   906   // G4 (Gargs)     - Pointer to interpreter's args
   907   // O0..O4         - free for scratch
   908   // O5_savedSP     - Caller's saved SP, to be restored if needed
   909   // O6             - Current SP!
   910   // O7             - Valid return address
   911   // L0-L7, I0-I7   - Caller's temps (no frame pushed yet)
   913   // Outputs:
   914   // G2_thread      - TLS
   915   // G1, G4         - Outgoing long args in 32-bit build
   916   // O0-O5          - Outgoing args in compiled layout
   917   // O6             - Adjusted or restored SP
   918   // O7             - Valid return address
   919   // L0-L7, I0-I7   - Caller's temps (no frame pushed yet)
   920   // F0-F7          - more outgoing args
   923   // Gargs is the incoming argument base, and also an outgoing argument.
   924   __ sub(Gargs, BytesPerWord, Gargs);
   926   // ON ENTRY TO THE CODE WE ARE MAKING, WE HAVE AN INTERPRETED FRAME
   927   // WITH O7 HOLDING A VALID RETURN PC
   928   //
   929   // |              |
   930   // :  java stack  :
   931   // |              |
   932   // +--------------+ <--- start of outgoing args
   933   // |   receiver   |   |
   934   // : rest of args :   |---size is java-arg-words
   935   // |              |   |
   936   // +--------------+ <--- O4_args (misaligned) and Lesp if prior is not C2I
   937   // |              |   |
   938   // :    unused    :   |---Space for max Java stack, plus stack alignment
   939   // |              |   |
   940   // +--------------+ <--- SP + 16*wordsize
   941   // |              |
   942   // :    window    :
   943   // |              |
   944   // +--------------+ <--- SP
   946   // WE REPACK THE STACK.  We use the common calling convention layout as
   947   // discovered by calling SharedRuntime::calling_convention.  We assume it
   948   // causes an arbitrary shuffle of memory, which may require some register
   949   // temps to do the shuffle.  We hope for (and optimize for) the case where
   950   // temps are not needed.  We may have to resize the stack slightly, in case
   951   // we need alignment padding (32-bit interpreter can pass longs & doubles
   952   // misaligned, but the compilers expect them aligned).
   953   //
   954   // |              |
   955   // :  java stack  :
   956   // |              |
   957   // +--------------+ <--- start of outgoing args
   958   // |  pad, align  |   |
   959   // +--------------+   |
   960   // | ints, floats |   |---Outgoing stack args, packed low.
   961   // +--------------+   |   First few args in registers.
   962   // :   doubles    :   |
   963   // |   longs      |   |
   964   // +--------------+ <--- SP' + 16*wordsize
   965   // |              |
   966   // :    window    :
   967   // |              |
   968   // +--------------+ <--- SP'
   970   // ON EXIT FROM THE CODE WE ARE MAKING, WE STILL HAVE AN INTERPRETED FRAME
   971   // WITH O7 HOLDING A VALID RETURN PC - ITS JUST THAT THE ARGS ARE NOW SETUP
   972   // FOR COMPILED CODE AND THE FRAME SLIGHTLY GROWN.
   974   // Cut-out for having no stack args.  Since up to 6 args are passed
   975   // in registers, we will commonly have no stack args.
   976   if (comp_args_on_stack > 0) {
   978     // Convert VMReg stack slots to words.
   979     int comp_words_on_stack = round_to(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
   980     // Round up to miminum stack alignment, in wordSize
   981     comp_words_on_stack = round_to(comp_words_on_stack, 2);
   982     // Now compute the distance from Lesp to SP.  This calculation does not
   983     // include the space for total_args_passed because Lesp has not yet popped
   984     // the arguments.
   985     __ sub(SP, (comp_words_on_stack)*wordSize, SP);
   986   }
   988   // Will jump to the compiled code just as if compiled code was doing it.
   989   // Pre-load the register-jump target early, to schedule it better.
   990   __ ld_ptr(G5_method, in_bytes(methodOopDesc::from_compiled_offset()), G3);
   992   // Now generate the shuffle code.  Pick up all register args and move the
   993   // rest through G1_scratch.
   994   for (int i=0; i<total_args_passed; i++) {
   995     if (sig_bt[i] == T_VOID) {
   996       // Longs and doubles are passed in native word order, but misaligned
   997       // in the 32-bit build.
   998       assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
   999       continue;
  1002     // Pick up 0, 1 or 2 words from Lesp+offset.  Assume mis-aligned in the
  1003     // 32-bit build and aligned in the 64-bit build.  Look for the obvious
  1004     // ldx/lddf optimizations.
  1006     // Load in argument order going down.
  1007     const int ld_off = (total_args_passed-i)*Interpreter::stackElementSize;
  1008     set_Rdisp(G1_scratch);
  1010     VMReg r_1 = regs[i].first();
  1011     VMReg r_2 = regs[i].second();
  1012     if (!r_1->is_valid()) {
  1013       assert(!r_2->is_valid(), "");
  1014       continue;
  1016     if (r_1->is_stack()) {        // Pretend stack targets are loaded into F8/F9
  1017       r_1 = F8->as_VMReg();        // as part of the load/store shuffle
  1018       if (r_2->is_valid()) r_2 = r_1->next();
  1020     if (r_1->is_Register()) {  // Register argument
  1021       Register r = r_1->as_Register()->after_restore();
  1022       if (!r_2->is_valid()) {
  1023         __ ld(Gargs, arg_slot(ld_off), r);
  1024       } else {
  1025 #ifdef _LP64
  1026         // In V9, longs are given 2 64-bit slots in the interpreter, but the
  1027         // data is passed in only 1 slot.
  1028         RegisterOrConstant slot = (sig_bt[i] == T_LONG) ?
  1029               next_arg_slot(ld_off) : arg_slot(ld_off);
  1030         __ ldx(Gargs, slot, r);
  1031 #else
  1032         // Need to load a 64-bit value into G1/G4, but G1/G4 is being used in the
  1033         // stack shuffle.  Load the first 2 longs into G1/G4 later.
  1034 #endif
  1036     } else {
  1037       assert(r_1->is_FloatRegister(), "");
  1038       if (!r_2->is_valid()) {
  1039         __ ldf(FloatRegisterImpl::S, Gargs, arg_slot(ld_off), r_1->as_FloatRegister());
  1040       } else {
  1041 #ifdef _LP64
  1042         // In V9, doubles are given 2 64-bit slots in the interpreter, but the
  1043         // data is passed in only 1 slot.  This code also handles longs that
  1044         // are passed on the stack, but need a stack-to-stack move through a
  1045         // spare float register.
  1046         RegisterOrConstant slot = (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) ?
  1047               next_arg_slot(ld_off) : arg_slot(ld_off);
  1048         __ ldf(FloatRegisterImpl::D, Gargs, slot, r_1->as_FloatRegister());
  1049 #else
  1050         // Need to marshal 64-bit value from misaligned Lesp loads
  1051         __ ldf(FloatRegisterImpl::S, Gargs, next_arg_slot(ld_off), r_1->as_FloatRegister());
  1052         __ ldf(FloatRegisterImpl::S, Gargs, arg_slot(ld_off), r_2->as_FloatRegister());
  1053 #endif
  1056     // Was the argument really intended to be on the stack, but was loaded
  1057     // into F8/F9?
  1058     if (regs[i].first()->is_stack()) {
  1059       assert(r_1->as_FloatRegister() == F8, "fix this code");
  1060       // Convert stack slot to an SP offset
  1061       int st_off = reg2offset(regs[i].first()) + STACK_BIAS;
  1062       // Store down the shuffled stack word.  Target address _is_ aligned.
  1063       RegisterOrConstant slot = __ ensure_simm13_or_reg(st_off, Rdisp);
  1064       if (!r_2->is_valid()) __ stf(FloatRegisterImpl::S, r_1->as_FloatRegister(), SP, slot);
  1065       else                  __ stf(FloatRegisterImpl::D, r_1->as_FloatRegister(), SP, slot);
  1068   bool made_space = false;
  1069 #ifndef _LP64
  1070   // May need to pick up a few long args in G1/G4
  1071   bool g4_crushed = false;
  1072   bool g3_crushed = false;
  1073   for (int i=0; i<total_args_passed; i++) {
  1074     if (regs[i].first()->is_Register() && regs[i].second()->is_valid()) {
  1075       // Load in argument order going down
  1076       int ld_off = (total_args_passed-i)*Interpreter::stackElementSize;
  1077       // Need to marshal 64-bit value from misaligned Lesp loads
  1078       Register r = regs[i].first()->as_Register()->after_restore();
  1079       if (r == G1 || r == G4) {
  1080         assert(!g4_crushed, "ordering problem");
  1081         if (r == G4){
  1082           g4_crushed = true;
  1083           __ lduw(Gargs, arg_slot(ld_off)     , G3_scratch); // Load lo bits
  1084           __ ld  (Gargs, next_arg_slot(ld_off), r);          // Load hi bits
  1085         } else {
  1086           // better schedule this way
  1087           __ ld  (Gargs, next_arg_slot(ld_off), r);          // Load hi bits
  1088           __ lduw(Gargs, arg_slot(ld_off)     , G3_scratch); // Load lo bits
  1090         g3_crushed = true;
  1091         __ sllx(r, 32, r);
  1092         __ or3(G3_scratch, r, r);
  1093       } else {
  1094         assert(r->is_out(), "longs passed in two O registers");
  1095         __ ld  (Gargs, arg_slot(ld_off)     , r->successor()); // Load lo bits
  1096         __ ld  (Gargs, next_arg_slot(ld_off), r);              // Load hi bits
  1100 #endif
  1102   // Jump to the compiled code just as if compiled code was doing it.
  1103   //
  1104 #ifndef _LP64
  1105     if (g3_crushed) {
  1106       // Rats load was wasted, at least it is in cache...
  1107       __ ld_ptr(G5_method, methodOopDesc::from_compiled_offset(), G3);
  1109 #endif /* _LP64 */
  1111     // 6243940 We might end up in handle_wrong_method if
  1112     // the callee is deoptimized as we race thru here. If that
  1113     // happens we don't want to take a safepoint because the
  1114     // caller frame will look interpreted and arguments are now
  1115     // "compiled" so it is much better to make this transition
  1116     // invisible to the stack walking code. Unfortunately if
  1117     // we try and find the callee by normal means a safepoint
  1118     // is possible. So we stash the desired callee in the thread
  1119     // and the vm will find there should this case occur.
  1120     Address callee_target_addr(G2_thread, JavaThread::callee_target_offset());
  1121     __ st_ptr(G5_method, callee_target_addr);
  1123     if (StressNonEntrant) {
  1124       // Open a big window for deopt failure
  1125       __ save_frame(0);
  1126       __ mov(G0, L0);
  1127       Label loop;
  1128       __ bind(loop);
  1129       __ sub(L0, 1, L0);
  1130       __ br_null_short(L0, Assembler::pt, loop);
  1132       __ restore();
  1136     __ jmpl(G3, 0, G0);
  1137     __ delayed()->nop();
  1140 // ---------------------------------------------------------------
  1141 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
  1142                                                             int total_args_passed,
  1143                                                             // VMReg max_arg,
  1144                                                             int comp_args_on_stack, // VMRegStackSlots
  1145                                                             const BasicType *sig_bt,
  1146                                                             const VMRegPair *regs,
  1147                                                             AdapterFingerPrint* fingerprint) {
  1148   address i2c_entry = __ pc();
  1150   AdapterGenerator agen(masm);
  1152   agen.gen_i2c_adapter(total_args_passed, comp_args_on_stack, sig_bt, regs);
  1155   // -------------------------------------------------------------------------
  1156   // Generate a C2I adapter.  On entry we know G5 holds the methodOop.  The
  1157   // args start out packed in the compiled layout.  They need to be unpacked
  1158   // into the interpreter layout.  This will almost always require some stack
  1159   // space.  We grow the current (compiled) stack, then repack the args.  We
  1160   // finally end in a jump to the generic interpreter entry point.  On exit
  1161   // from the interpreter, the interpreter will restore our SP (lest the
  1162   // compiled code, which relys solely on SP and not FP, get sick).
  1164   address c2i_unverified_entry = __ pc();
  1165   Label skip_fixup;
  1167 #if !defined(_LP64) && defined(COMPILER2)
  1168     Register R_temp   = L0;   // another scratch register
  1169 #else
  1170     Register R_temp   = G1;   // another scratch register
  1171 #endif
  1173     AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub());
  1175     __ verify_oop(O0);
  1176     __ verify_oop(G5_method);
  1177     __ load_klass(O0, G3_scratch);
  1178     __ verify_oop(G3_scratch);
  1180 #if !defined(_LP64) && defined(COMPILER2)
  1181     __ save(SP, -frame::register_save_words*wordSize, SP);
  1182     __ ld_ptr(G5_method, compiledICHolderOopDesc::holder_klass_offset(), R_temp);
  1183     __ verify_oop(R_temp);
  1184     __ cmp(G3_scratch, R_temp);
  1185     __ restore();
  1186 #else
  1187     __ ld_ptr(G5_method, compiledICHolderOopDesc::holder_klass_offset(), R_temp);
  1188     __ verify_oop(R_temp);
  1189     __ cmp(G3_scratch, R_temp);
  1190 #endif
  1192     Label ok, ok2;
  1193     __ brx(Assembler::equal, false, Assembler::pt, ok);
  1194     __ delayed()->ld_ptr(G5_method, compiledICHolderOopDesc::holder_method_offset(), G5_method);
  1195     __ jump_to(ic_miss, G3_scratch);
  1196     __ delayed()->nop();
  1198     __ bind(ok);
  1199     // Method might have been compiled since the call site was patched to
  1200     // interpreted if that is the case treat it as a miss so we can get
  1201     // the call site corrected.
  1202     __ ld_ptr(G5_method, in_bytes(methodOopDesc::code_offset()), G3_scratch);
  1203     __ bind(ok2);
  1204     __ br_null(G3_scratch, false, Assembler::pt, skip_fixup);
  1205     __ delayed()->ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), G3_scratch);
  1206     __ jump_to(ic_miss, G3_scratch);
  1207     __ delayed()->nop();
  1211   address c2i_entry = __ pc();
  1213   agen.gen_c2i_adapter(total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
  1215   __ flush();
  1216   return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
  1220 // Helper function for native calling conventions
  1221 static VMReg int_stk_helper( int i ) {
  1222   // Bias any stack based VMReg we get by ignoring the window area
  1223   // but not the register parameter save area.
  1224   //
  1225   // This is strange for the following reasons. We'd normally expect
  1226   // the calling convention to return an VMReg for a stack slot
  1227   // completely ignoring any abi reserved area. C2 thinks of that
  1228   // abi area as only out_preserve_stack_slots. This does not include
  1229   // the area allocated by the C abi to store down integer arguments
  1230   // because the java calling convention does not use it. So
  1231   // since c2 assumes that there are only out_preserve_stack_slots
  1232   // to bias the optoregs (which impacts VMRegs) when actually referencing any actual stack
  1233   // location the c calling convention must add in this bias amount
  1234   // to make up for the fact that the out_preserve_stack_slots is
  1235   // insufficient for C calls. What a mess. I sure hope those 6
  1236   // stack words were worth it on every java call!
  1238   // Another way of cleaning this up would be for out_preserve_stack_slots
  1239   // to take a parameter to say whether it was C or java calling conventions.
  1240   // Then things might look a little better (but not much).
  1242   int mem_parm_offset = i - SPARC_ARGS_IN_REGS_NUM;
  1243   if( mem_parm_offset < 0 ) {
  1244     return as_oRegister(i)->as_VMReg();
  1245   } else {
  1246     int actual_offset = (mem_parm_offset + frame::memory_parameter_word_sp_offset) * VMRegImpl::slots_per_word;
  1247     // Now return a biased offset that will be correct when out_preserve_slots is added back in
  1248     return VMRegImpl::stack2reg(actual_offset - SharedRuntime::out_preserve_stack_slots());
  1253 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
  1254                                          VMRegPair *regs,
  1255                                          int total_args_passed) {
  1257     // Return the number of VMReg stack_slots needed for the args.
  1258     // This value does not include an abi space (like register window
  1259     // save area).
  1261     // The native convention is V8 if !LP64
  1262     // The LP64 convention is the V9 convention which is slightly more sane.
  1264     // We return the amount of VMReg stack slots we need to reserve for all
  1265     // the arguments NOT counting out_preserve_stack_slots. Since we always
  1266     // have space for storing at least 6 registers to memory we start with that.
  1267     // See int_stk_helper for a further discussion.
  1268     int max_stack_slots = (frame::varargs_offset * VMRegImpl::slots_per_word) - SharedRuntime::out_preserve_stack_slots();
  1270 #ifdef _LP64
  1271     // V9 convention: All things "as-if" on double-wide stack slots.
  1272     // Hoist any int/ptr/long's in the first 6 to int regs.
  1273     // Hoist any flt/dbl's in the first 16 dbl regs.
  1274     int j = 0;                  // Count of actual args, not HALVES
  1275     for( int i=0; i<total_args_passed; i++, j++ ) {
  1276       switch( sig_bt[i] ) {
  1277       case T_BOOLEAN:
  1278       case T_BYTE:
  1279       case T_CHAR:
  1280       case T_INT:
  1281       case T_SHORT:
  1282         regs[i].set1( int_stk_helper( j ) ); break;
  1283       case T_LONG:
  1284         assert( sig_bt[i+1] == T_VOID, "expecting half" );
  1285       case T_ADDRESS: // raw pointers, like current thread, for VM calls
  1286       case T_ARRAY:
  1287       case T_OBJECT:
  1288         regs[i].set2( int_stk_helper( j ) );
  1289         break;
  1290       case T_FLOAT:
  1291         if ( j < 16 ) {
  1292           // V9ism: floats go in ODD registers
  1293           regs[i].set1(as_FloatRegister(1 + (j<<1))->as_VMReg());
  1294         } else {
  1295           // V9ism: floats go in ODD stack slot
  1296           regs[i].set1(VMRegImpl::stack2reg(1 + (j<<1)));
  1298         break;
  1299       case T_DOUBLE:
  1300         assert( sig_bt[i+1] == T_VOID, "expecting half" );
  1301         if ( j < 16 ) {
  1302           // V9ism: doubles go in EVEN/ODD regs
  1303           regs[i].set2(as_FloatRegister(j<<1)->as_VMReg());
  1304         } else {
  1305           // V9ism: doubles go in EVEN/ODD stack slots
  1306           regs[i].set2(VMRegImpl::stack2reg(j<<1));
  1308         break;
  1309       case T_VOID:  regs[i].set_bad(); j--; break; // Do not count HALVES
  1310       default:
  1311         ShouldNotReachHere();
  1313       if (regs[i].first()->is_stack()) {
  1314         int off =  regs[i].first()->reg2stack();
  1315         if (off > max_stack_slots) max_stack_slots = off;
  1317       if (regs[i].second()->is_stack()) {
  1318         int off =  regs[i].second()->reg2stack();
  1319         if (off > max_stack_slots) max_stack_slots = off;
  1323 #else // _LP64
  1324     // V8 convention: first 6 things in O-regs, rest on stack.
  1325     // Alignment is willy-nilly.
  1326     for( int i=0; i<total_args_passed; i++ ) {
  1327       switch( sig_bt[i] ) {
  1328       case T_ADDRESS: // raw pointers, like current thread, for VM calls
  1329       case T_ARRAY:
  1330       case T_BOOLEAN:
  1331       case T_BYTE:
  1332       case T_CHAR:
  1333       case T_FLOAT:
  1334       case T_INT:
  1335       case T_OBJECT:
  1336       case T_SHORT:
  1337         regs[i].set1( int_stk_helper( i ) );
  1338         break;
  1339       case T_DOUBLE:
  1340       case T_LONG:
  1341         assert( sig_bt[i+1] == T_VOID, "expecting half" );
  1342         regs[i].set_pair( int_stk_helper( i+1 ), int_stk_helper( i ) );
  1343         break;
  1344       case T_VOID: regs[i].set_bad(); break;
  1345       default:
  1346         ShouldNotReachHere();
  1348       if (regs[i].first()->is_stack()) {
  1349         int off =  regs[i].first()->reg2stack();
  1350         if (off > max_stack_slots) max_stack_slots = off;
  1352       if (regs[i].second()->is_stack()) {
  1353         int off =  regs[i].second()->reg2stack();
  1354         if (off > max_stack_slots) max_stack_slots = off;
  1357 #endif // _LP64
  1359   return round_to(max_stack_slots + 1, 2);
  1364 // ---------------------------------------------------------------------------
  1365 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
  1366   switch (ret_type) {
  1367   case T_FLOAT:
  1368     __ stf(FloatRegisterImpl::S, F0, SP, frame_slots*VMRegImpl::stack_slot_size - 4+STACK_BIAS);
  1369     break;
  1370   case T_DOUBLE:
  1371     __ stf(FloatRegisterImpl::D, F0, SP, frame_slots*VMRegImpl::stack_slot_size - 8+STACK_BIAS);
  1372     break;
  1376 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
  1377   switch (ret_type) {
  1378   case T_FLOAT:
  1379     __ ldf(FloatRegisterImpl::S, SP, frame_slots*VMRegImpl::stack_slot_size - 4+STACK_BIAS, F0);
  1380     break;
  1381   case T_DOUBLE:
  1382     __ ldf(FloatRegisterImpl::D, SP, frame_slots*VMRegImpl::stack_slot_size - 8+STACK_BIAS, F0);
  1383     break;
  1387 // Check and forward and pending exception.  Thread is stored in
  1388 // L7_thread_cache and possibly NOT in G2_thread.  Since this is a native call, there
  1389 // is no exception handler.  We merely pop this frame off and throw the
  1390 // exception in the caller's frame.
  1391 static void check_forward_pending_exception(MacroAssembler *masm, Register Rex_oop) {
  1392   Label L;
  1393   __ br_null(Rex_oop, false, Assembler::pt, L);
  1394   __ delayed()->mov(L7_thread_cache, G2_thread); // restore in case we have exception
  1395   // Since this is a native call, we *know* the proper exception handler
  1396   // without calling into the VM: it's the empty function.  Just pop this
  1397   // frame and then jump to forward_exception_entry; O7 will contain the
  1398   // native caller's return PC.
  1399  AddressLiteral exception_entry(StubRoutines::forward_exception_entry());
  1400   __ jump_to(exception_entry, G3_scratch);
  1401   __ delayed()->restore();      // Pop this frame off.
  1402   __ bind(L);
  1405 // A simple move of integer like type
  1406 static void simple_move32(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
  1407   if (src.first()->is_stack()) {
  1408     if (dst.first()->is_stack()) {
  1409       // stack to stack
  1410       __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
  1411       __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
  1412     } else {
  1413       // stack to reg
  1414       __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
  1416   } else if (dst.first()->is_stack()) {
  1417     // reg to stack
  1418     __ st(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
  1419   } else {
  1420     __ mov(src.first()->as_Register(), dst.first()->as_Register());
  1424 // On 64 bit we will store integer like items to the stack as
  1425 // 64 bits items (sparc abi) even though java would only store
  1426 // 32bits for a parameter. On 32bit it will simply be 32 bits
  1427 // So this routine will do 32->32 on 32bit and 32->64 on 64bit
  1428 static void move32_64(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
  1429   if (src.first()->is_stack()) {
  1430     if (dst.first()->is_stack()) {
  1431       // stack to stack
  1432       __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
  1433       __ st_ptr(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
  1434     } else {
  1435       // stack to reg
  1436       __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
  1438   } else if (dst.first()->is_stack()) {
  1439     // reg to stack
  1440     __ st_ptr(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
  1441   } else {
  1442     __ mov(src.first()->as_Register(), dst.first()->as_Register());
  1447 // An oop arg. Must pass a handle not the oop itself
  1448 static void object_move(MacroAssembler* masm,
  1449                         OopMap* map,
  1450                         int oop_handle_offset,
  1451                         int framesize_in_slots,
  1452                         VMRegPair src,
  1453                         VMRegPair dst,
  1454                         bool is_receiver,
  1455                         int* receiver_offset) {
  1457   // must pass a handle. First figure out the location we use as a handle
  1459   if (src.first()->is_stack()) {
  1460     // Oop is already on the stack
  1461     Register rHandle = dst.first()->is_stack() ? L5 : dst.first()->as_Register();
  1462     __ add(FP, reg2offset(src.first()) + STACK_BIAS, rHandle);
  1463     __ ld_ptr(rHandle, 0, L4);
  1464 #ifdef _LP64
  1465     __ movr( Assembler::rc_z, L4, G0, rHandle );
  1466 #else
  1467     __ tst( L4 );
  1468     __ movcc( Assembler::zero, false, Assembler::icc, G0, rHandle );
  1469 #endif
  1470     if (dst.first()->is_stack()) {
  1471       __ st_ptr(rHandle, SP, reg2offset(dst.first()) + STACK_BIAS);
  1473     int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
  1474     if (is_receiver) {
  1475       *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size;
  1477     map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots));
  1478   } else {
  1479     // Oop is in an input register pass we must flush it to the stack
  1480     const Register rOop = src.first()->as_Register();
  1481     const Register rHandle = L5;
  1482     int oop_slot = rOop->input_number() * VMRegImpl::slots_per_word + oop_handle_offset;
  1483     int offset = oop_slot*VMRegImpl::stack_slot_size;
  1484     Label skip;
  1485     __ st_ptr(rOop, SP, offset + STACK_BIAS);
  1486     if (is_receiver) {
  1487       *receiver_offset = oop_slot * VMRegImpl::stack_slot_size;
  1489     map->set_oop(VMRegImpl::stack2reg(oop_slot));
  1490     __ add(SP, offset + STACK_BIAS, rHandle);
  1491 #ifdef _LP64
  1492     __ movr( Assembler::rc_z, rOop, G0, rHandle );
  1493 #else
  1494     __ tst( rOop );
  1495     __ movcc( Assembler::zero, false, Assembler::icc, G0, rHandle );
  1496 #endif
  1498     if (dst.first()->is_stack()) {
  1499       __ st_ptr(rHandle, SP, reg2offset(dst.first()) + STACK_BIAS);
  1500     } else {
  1501       __ mov(rHandle, dst.first()->as_Register());
  1506 // A float arg may have to do float reg int reg conversion
  1507 static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
  1508   assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move");
  1510   if (src.first()->is_stack()) {
  1511     if (dst.first()->is_stack()) {
  1512       // stack to stack the easiest of the bunch
  1513       __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
  1514       __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
  1515     } else {
  1516       // stack to reg
  1517       if (dst.first()->is_Register()) {
  1518         __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
  1519       } else {
  1520         __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister());
  1523   } else if (dst.first()->is_stack()) {
  1524     // reg to stack
  1525     if (src.first()->is_Register()) {
  1526       __ st(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
  1527     } else {
  1528       __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS);
  1530   } else {
  1531     // reg to reg
  1532     if (src.first()->is_Register()) {
  1533       if (dst.first()->is_Register()) {
  1534         // gpr -> gpr
  1535         __ mov(src.first()->as_Register(), dst.first()->as_Register());
  1536       } else {
  1537         // gpr -> fpr
  1538         __ st(src.first()->as_Register(), FP, -4 + STACK_BIAS);
  1539         __ ldf(FloatRegisterImpl::S, FP, -4 + STACK_BIAS, dst.first()->as_FloatRegister());
  1541     } else if (dst.first()->is_Register()) {
  1542       // fpr -> gpr
  1543       __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), FP, -4 + STACK_BIAS);
  1544       __ ld(FP, -4 + STACK_BIAS, dst.first()->as_Register());
  1545     } else {
  1546       // fpr -> fpr
  1547       // In theory these overlap but the ordering is such that this is likely a nop
  1548       if ( src.first() != dst.first()) {
  1549         __ fmov(FloatRegisterImpl::S, src.first()->as_FloatRegister(), dst.first()->as_FloatRegister());
  1555 static void split_long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
  1556   VMRegPair src_lo(src.first());
  1557   VMRegPair src_hi(src.second());
  1558   VMRegPair dst_lo(dst.first());
  1559   VMRegPair dst_hi(dst.second());
  1560   simple_move32(masm, src_lo, dst_lo);
  1561   simple_move32(masm, src_hi, dst_hi);
  1564 // A long move
  1565 static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
  1567   // Do the simple ones here else do two int moves
  1568   if (src.is_single_phys_reg() ) {
  1569     if (dst.is_single_phys_reg()) {
  1570       __ mov(src.first()->as_Register(), dst.first()->as_Register());
  1571     } else {
  1572       // split src into two separate registers
  1573       // Remember hi means hi address or lsw on sparc
  1574       // Move msw to lsw
  1575       if (dst.second()->is_reg()) {
  1576         // MSW -> MSW
  1577         __ srax(src.first()->as_Register(), 32, dst.first()->as_Register());
  1578         // Now LSW -> LSW
  1579         // this will only move lo -> lo and ignore hi
  1580         VMRegPair split(dst.second());
  1581         simple_move32(masm, src, split);
  1582       } else {
  1583         VMRegPair split(src.first(), L4->as_VMReg());
  1584         // MSW -> MSW (lo ie. first word)
  1585         __ srax(src.first()->as_Register(), 32, L4);
  1586         split_long_move(masm, split, dst);
  1589   } else if (dst.is_single_phys_reg()) {
  1590     if (src.is_adjacent_aligned_on_stack(2)) {
  1591       __ ldx(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
  1592     } else {
  1593       // dst is a single reg.
  1594       // Remember lo is low address not msb for stack slots
  1595       // and lo is the "real" register for registers
  1596       // src is
  1598       VMRegPair split;
  1600       if (src.first()->is_reg()) {
  1601         // src.lo (msw) is a reg, src.hi is stk/reg
  1602         // we will move: src.hi (LSW) -> dst.lo, src.lo (MSW) -> src.lo [the MSW is in the LSW of the reg]
  1603         split.set_pair(dst.first(), src.first());
  1604       } else {
  1605         // msw is stack move to L5
  1606         // lsw is stack move to dst.lo (real reg)
  1607         // we will move: src.hi (LSW) -> dst.lo, src.lo (MSW) -> L5
  1608         split.set_pair(dst.first(), L5->as_VMReg());
  1611       // src.lo -> src.lo/L5, src.hi -> dst.lo (the real reg)
  1612       // msw   -> src.lo/L5,  lsw -> dst.lo
  1613       split_long_move(masm, src, split);
  1615       // So dst now has the low order correct position the
  1616       // msw half
  1617       __ sllx(split.first()->as_Register(), 32, L5);
  1619       const Register d = dst.first()->as_Register();
  1620       __ or3(L5, d, d);
  1622   } else {
  1623     // For LP64 we can probably do better.
  1624     split_long_move(masm, src, dst);
  1628 // A double move
  1629 static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
  1631   // The painful thing here is that like long_move a VMRegPair might be
  1632   // 1: a single physical register
  1633   // 2: two physical registers (v8)
  1634   // 3: a physical reg [lo] and a stack slot [hi] (v8)
  1635   // 4: two stack slots
  1637   // Since src is always a java calling convention we know that the src pair
  1638   // is always either all registers or all stack (and aligned?)
  1640   // in a register [lo] and a stack slot [hi]
  1641   if (src.first()->is_stack()) {
  1642     if (dst.first()->is_stack()) {
  1643       // stack to stack the easiest of the bunch
  1644       // ought to be a way to do this where if alignment is ok we use ldd/std when possible
  1645       __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
  1646       __ ld(FP, reg2offset(src.second()) + STACK_BIAS, L4);
  1647       __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
  1648       __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS);
  1649     } else {
  1650       // stack to reg
  1651       if (dst.second()->is_stack()) {
  1652         // stack -> reg, stack -> stack
  1653         __ ld(FP, reg2offset(src.second()) + STACK_BIAS, L4);
  1654         if (dst.first()->is_Register()) {
  1655           __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
  1656         } else {
  1657           __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister());
  1659         // This was missing. (very rare case)
  1660         __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS);
  1661       } else {
  1662         // stack -> reg
  1663         // Eventually optimize for alignment QQQ
  1664         if (dst.first()->is_Register()) {
  1665           __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
  1666           __ ld(FP, reg2offset(src.second()) + STACK_BIAS, dst.second()->as_Register());
  1667         } else {
  1668           __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister());
  1669           __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.second()) + STACK_BIAS, dst.second()->as_FloatRegister());
  1673   } else if (dst.first()->is_stack()) {
  1674     // reg to stack
  1675     if (src.first()->is_Register()) {
  1676       // Eventually optimize for alignment QQQ
  1677       __ st(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
  1678       if (src.second()->is_stack()) {
  1679         __ ld(FP, reg2offset(src.second()) + STACK_BIAS, L4);
  1680         __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS);
  1681       } else {
  1682         __ st(src.second()->as_Register(), SP, reg2offset(dst.second()) + STACK_BIAS);
  1684     } else {
  1685       // fpr to stack
  1686       if (src.second()->is_stack()) {
  1687         ShouldNotReachHere();
  1688       } else {
  1689         // Is the stack aligned?
  1690         if (reg2offset(dst.first()) & 0x7) {
  1691           // No do as pairs
  1692           __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS);
  1693           __ stf(FloatRegisterImpl::S, src.second()->as_FloatRegister(), SP, reg2offset(dst.second()) + STACK_BIAS);
  1694         } else {
  1695           __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS);
  1699   } else {
  1700     // reg to reg
  1701     if (src.first()->is_Register()) {
  1702       if (dst.first()->is_Register()) {
  1703         // gpr -> gpr
  1704         __ mov(src.first()->as_Register(), dst.first()->as_Register());
  1705         __ mov(src.second()->as_Register(), dst.second()->as_Register());
  1706       } else {
  1707         // gpr -> fpr
  1708         // ought to be able to do a single store
  1709         __ stx(src.first()->as_Register(), FP, -8 + STACK_BIAS);
  1710         __ stx(src.second()->as_Register(), FP, -4 + STACK_BIAS);
  1711         // ought to be able to do a single load
  1712         __ ldf(FloatRegisterImpl::S, FP, -8 + STACK_BIAS, dst.first()->as_FloatRegister());
  1713         __ ldf(FloatRegisterImpl::S, FP, -4 + STACK_BIAS, dst.second()->as_FloatRegister());
  1715     } else if (dst.first()->is_Register()) {
  1716       // fpr -> gpr
  1717       // ought to be able to do a single store
  1718       __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(), FP, -8 + STACK_BIAS);
  1719       // ought to be able to do a single load
  1720       // REMEMBER first() is low address not LSB
  1721       __ ld(FP, -8 + STACK_BIAS, dst.first()->as_Register());
  1722       if (dst.second()->is_Register()) {
  1723         __ ld(FP, -4 + STACK_BIAS, dst.second()->as_Register());
  1724       } else {
  1725         __ ld(FP, -4 + STACK_BIAS, L4);
  1726         __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS);
  1728     } else {
  1729       // fpr -> fpr
  1730       // In theory these overlap but the ordering is such that this is likely a nop
  1731       if ( src.first() != dst.first()) {
  1732         __ fmov(FloatRegisterImpl::D, src.first()->as_FloatRegister(), dst.first()->as_FloatRegister());
  1738 // Creates an inner frame if one hasn't already been created, and
  1739 // saves a copy of the thread in L7_thread_cache
  1740 static void create_inner_frame(MacroAssembler* masm, bool* already_created) {
  1741   if (!*already_created) {
  1742     __ save_frame(0);
  1743     // Save thread in L7 (INNER FRAME); it crosses a bunch of VM calls below
  1744     // Don't use save_thread because it smashes G2 and we merely want to save a
  1745     // copy
  1746     __ mov(G2_thread, L7_thread_cache);
  1747     *already_created = true;
  1751 // ---------------------------------------------------------------------------
  1752 // Generate a native wrapper for a given method.  The method takes arguments
  1753 // in the Java compiled code convention, marshals them to the native
  1754 // convention (handlizes oops, etc), transitions to native, makes the call,
  1755 // returns to java state (possibly blocking), unhandlizes any result and
  1756 // returns.
  1757 nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
  1758                                                 methodHandle method,
  1759                                                 int compile_id,
  1760                                                 int total_in_args,
  1761                                                 int comp_args_on_stack, // in VMRegStackSlots
  1762                                                 BasicType *in_sig_bt,
  1763                                                 VMRegPair *in_regs,
  1764                                                 BasicType ret_type) {
  1766   // Native nmethod wrappers never take possesion of the oop arguments.
  1767   // So the caller will gc the arguments. The only thing we need an
  1768   // oopMap for is if the call is static
  1769   //
  1770   // An OopMap for lock (and class if static), and one for the VM call itself
  1771   OopMapSet *oop_maps = new OopMapSet();
  1772   intptr_t start = (intptr_t)__ pc();
  1774   // First thing make an ic check to see if we should even be here
  1776     Label L;
  1777     const Register temp_reg = G3_scratch;
  1778     AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub());
  1779     __ verify_oop(O0);
  1780     __ load_klass(O0, temp_reg);
  1781     __ cmp_and_brx_short(temp_reg, G5_inline_cache_reg, Assembler::equal, Assembler::pt, L);
  1783     __ jump_to(ic_miss, temp_reg);
  1784     __ delayed()->nop();
  1785     __ align(CodeEntryAlignment);
  1786     __ bind(L);
  1789   int vep_offset = ((intptr_t)__ pc()) - start;
  1791 #ifdef COMPILER1
  1792   if (InlineObjectHash && method->intrinsic_id() == vmIntrinsics::_hashCode) {
  1793     // Object.hashCode can pull the hashCode from the header word
  1794     // instead of doing a full VM transition once it's been computed.
  1795     // Since hashCode is usually polymorphic at call sites we can't do
  1796     // this optimization at the call site without a lot of work.
  1797     Label slowCase;
  1798     Register receiver             = O0;
  1799     Register result               = O0;
  1800     Register header               = G3_scratch;
  1801     Register hash                 = G3_scratch; // overwrite header value with hash value
  1802     Register mask                 = G1;         // to get hash field from header
  1804     // Read the header and build a mask to get its hash field.  Give up if the object is not unlocked.
  1805     // We depend on hash_mask being at most 32 bits and avoid the use of
  1806     // hash_mask_in_place because it could be larger than 32 bits in a 64-bit
  1807     // vm: see markOop.hpp.
  1808     __ ld_ptr(receiver, oopDesc::mark_offset_in_bytes(), header);
  1809     __ sethi(markOopDesc::hash_mask, mask);
  1810     __ btst(markOopDesc::unlocked_value, header);
  1811     __ br(Assembler::zero, false, Assembler::pn, slowCase);
  1812     if (UseBiasedLocking) {
  1813       // Check if biased and fall through to runtime if so
  1814       __ delayed()->nop();
  1815       __ btst(markOopDesc::biased_lock_bit_in_place, header);
  1816       __ br(Assembler::notZero, false, Assembler::pn, slowCase);
  1818     __ delayed()->or3(mask, markOopDesc::hash_mask & 0x3ff, mask);
  1820     // Check for a valid (non-zero) hash code and get its value.
  1821 #ifdef _LP64
  1822     __ srlx(header, markOopDesc::hash_shift, hash);
  1823 #else
  1824     __ srl(header, markOopDesc::hash_shift, hash);
  1825 #endif
  1826     __ andcc(hash, mask, hash);
  1827     __ br(Assembler::equal, false, Assembler::pn, slowCase);
  1828     __ delayed()->nop();
  1830     // leaf return.
  1831     __ retl();
  1832     __ delayed()->mov(hash, result);
  1833     __ bind(slowCase);
  1835 #endif // COMPILER1
  1838   // We have received a description of where all the java arg are located
  1839   // on entry to the wrapper. We need to convert these args to where
  1840   // the jni function will expect them. To figure out where they go
  1841   // we convert the java signature to a C signature by inserting
  1842   // the hidden arguments as arg[0] and possibly arg[1] (static method)
  1844   int total_c_args = total_in_args + 1;
  1845   if (method->is_static()) {
  1846     total_c_args++;
  1849   BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
  1850   VMRegPair  * out_regs   = NEW_RESOURCE_ARRAY(VMRegPair,   total_c_args);
  1852   int argc = 0;
  1853   out_sig_bt[argc++] = T_ADDRESS;
  1854   if (method->is_static()) {
  1855     out_sig_bt[argc++] = T_OBJECT;
  1858   for (int i = 0; i < total_in_args ; i++ ) {
  1859     out_sig_bt[argc++] = in_sig_bt[i];
  1862   // Now figure out where the args must be stored and how much stack space
  1863   // they require (neglecting out_preserve_stack_slots but space for storing
  1864   // the 1st six register arguments). It's weird see int_stk_helper.
  1865   //
  1866   int out_arg_slots;
  1867   out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args);
  1869   // Compute framesize for the wrapper.  We need to handlize all oops in
  1870   // registers. We must create space for them here that is disjoint from
  1871   // the windowed save area because we have no control over when we might
  1872   // flush the window again and overwrite values that gc has since modified.
  1873   // (The live window race)
  1874   //
  1875   // We always just allocate 6 word for storing down these object. This allow
  1876   // us to simply record the base and use the Ireg number to decide which
  1877   // slot to use. (Note that the reg number is the inbound number not the
  1878   // outbound number).
  1879   // We must shuffle args to match the native convention, and include var-args space.
  1881   // Calculate the total number of stack slots we will need.
  1883   // First count the abi requirement plus all of the outgoing args
  1884   int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
  1886   // Now the space for the inbound oop handle area
  1888   int oop_handle_offset = stack_slots;
  1889   stack_slots += 6*VMRegImpl::slots_per_word;
  1891   // Now any space we need for handlizing a klass if static method
  1893   int oop_temp_slot_offset = 0;
  1894   int klass_slot_offset = 0;
  1895   int klass_offset = -1;
  1896   int lock_slot_offset = 0;
  1897   bool is_static = false;
  1899   if (method->is_static()) {
  1900     klass_slot_offset = stack_slots;
  1901     stack_slots += VMRegImpl::slots_per_word;
  1902     klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
  1903     is_static = true;
  1906   // Plus a lock if needed
  1908   if (method->is_synchronized()) {
  1909     lock_slot_offset = stack_slots;
  1910     stack_slots += VMRegImpl::slots_per_word;
  1913   // Now a place to save return value or as a temporary for any gpr -> fpr moves
  1914   stack_slots += 2;
  1916   // Ok The space we have allocated will look like:
  1917   //
  1918   //
  1919   // FP-> |                     |
  1920   //      |---------------------|
  1921   //      | 2 slots for moves   |
  1922   //      |---------------------|
  1923   //      | lock box (if sync)  |
  1924   //      |---------------------| <- lock_slot_offset
  1925   //      | klass (if static)   |
  1926   //      |---------------------| <- klass_slot_offset
  1927   //      | oopHandle area      |
  1928   //      |---------------------| <- oop_handle_offset
  1929   //      | outbound memory     |
  1930   //      | based arguments     |
  1931   //      |                     |
  1932   //      |---------------------|
  1933   //      | vararg area         |
  1934   //      |---------------------|
  1935   //      |                     |
  1936   // SP-> | out_preserved_slots |
  1937   //
  1938   //
  1941   // Now compute actual number of stack words we need rounding to make
  1942   // stack properly aligned.
  1943   stack_slots = round_to(stack_slots, 2 * VMRegImpl::slots_per_word);
  1945   int stack_size = stack_slots * VMRegImpl::stack_slot_size;
  1947   // Generate stack overflow check before creating frame
  1948   __ generate_stack_overflow_check(stack_size);
  1950   // Generate a new frame for the wrapper.
  1951   __ save(SP, -stack_size, SP);
  1953   int frame_complete = ((intptr_t)__ pc()) - start;
  1955   __ verify_thread();
  1958   //
  1959   // We immediately shuffle the arguments so that any vm call we have to
  1960   // make from here on out (sync slow path, jvmti, etc.) we will have
  1961   // captured the oops from our caller and have a valid oopMap for
  1962   // them.
  1964   // -----------------
  1965   // The Grand Shuffle
  1966   //
  1967   // Natives require 1 or 2 extra arguments over the normal ones: the JNIEnv*
  1968   // (derived from JavaThread* which is in L7_thread_cache) and, if static,
  1969   // the class mirror instead of a receiver.  This pretty much guarantees that
  1970   // register layout will not match.  We ignore these extra arguments during
  1971   // the shuffle. The shuffle is described by the two calling convention
  1972   // vectors we have in our possession. We simply walk the java vector to
  1973   // get the source locations and the c vector to get the destinations.
  1974   // Because we have a new window and the argument registers are completely
  1975   // disjoint ( I0 -> O1, I1 -> O2, ...) we have nothing to worry about
  1976   // here.
  1978   // This is a trick. We double the stack slots so we can claim
  1979   // the oops in the caller's frame. Since we are sure to have
  1980   // more args than the caller doubling is enough to make
  1981   // sure we can capture all the incoming oop args from the
  1982   // caller.
  1983   //
  1984   OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
  1985   int c_arg = total_c_args - 1;
  1986   // Record sp-based slot for receiver on stack for non-static methods
  1987   int receiver_offset = -1;
  1989   // We move the arguments backward because the floating point registers
  1990   // destination will always be to a register with a greater or equal register
  1991   // number or the stack.
  1993 #ifdef ASSERT
  1994   bool reg_destroyed[RegisterImpl::number_of_registers];
  1995   bool freg_destroyed[FloatRegisterImpl::number_of_registers];
  1996   for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) {
  1997     reg_destroyed[r] = false;
  1999   for ( int f = 0 ; f < FloatRegisterImpl::number_of_registers ; f++ ) {
  2000     freg_destroyed[f] = false;
  2003 #endif /* ASSERT */
  2005   for ( int i = total_in_args - 1; i >= 0 ; i--, c_arg-- ) {
  2007 #ifdef ASSERT
  2008     if (in_regs[i].first()->is_Register()) {
  2009       assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "ack!");
  2010     } else if (in_regs[i].first()->is_FloatRegister()) {
  2011       assert(!freg_destroyed[in_regs[i].first()->as_FloatRegister()->encoding(FloatRegisterImpl::S)], "ack!");
  2013     if (out_regs[c_arg].first()->is_Register()) {
  2014       reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
  2015     } else if (out_regs[c_arg].first()->is_FloatRegister()) {
  2016       freg_destroyed[out_regs[c_arg].first()->as_FloatRegister()->encoding(FloatRegisterImpl::S)] = true;
  2018 #endif /* ASSERT */
  2020     switch (in_sig_bt[i]) {
  2021       case T_ARRAY:
  2022       case T_OBJECT:
  2023         object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
  2024                     ((i == 0) && (!is_static)),
  2025                     &receiver_offset);
  2026         break;
  2027       case T_VOID:
  2028         break;
  2030       case T_FLOAT:
  2031         float_move(masm, in_regs[i], out_regs[c_arg]);
  2032           break;
  2034       case T_DOUBLE:
  2035         assert( i + 1 < total_in_args &&
  2036                 in_sig_bt[i + 1] == T_VOID &&
  2037                 out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
  2038         double_move(masm, in_regs[i], out_regs[c_arg]);
  2039         break;
  2041       case T_LONG :
  2042         long_move(masm, in_regs[i], out_regs[c_arg]);
  2043         break;
  2045       case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
  2047       default:
  2048         move32_64(masm, in_regs[i], out_regs[c_arg]);
  2052   // Pre-load a static method's oop into O1.  Used both by locking code and
  2053   // the normal JNI call code.
  2054   if (method->is_static()) {
  2055     __ set_oop_constant(JNIHandles::make_local(Klass::cast(method->method_holder())->java_mirror()), O1);
  2057     // Now handlize the static class mirror in O1.  It's known not-null.
  2058     __ st_ptr(O1, SP, klass_offset + STACK_BIAS);
  2059     map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
  2060     __ add(SP, klass_offset + STACK_BIAS, O1);
  2064   const Register L6_handle = L6;
  2066   if (method->is_synchronized()) {
  2067     __ mov(O1, L6_handle);
  2070   // We have all of the arguments setup at this point. We MUST NOT touch any Oregs
  2071   // except O6/O7. So if we must call out we must push a new frame. We immediately
  2072   // push a new frame and flush the windows.
  2074 #ifdef _LP64
  2075   intptr_t thepc = (intptr_t) __ pc();
  2077     address here = __ pc();
  2078     // Call the next instruction
  2079     __ call(here + 8, relocInfo::none);
  2080     __ delayed()->nop();
  2082 #else
  2083   intptr_t thepc = __ load_pc_address(O7, 0);
  2084 #endif /* _LP64 */
  2086   // We use the same pc/oopMap repeatedly when we call out
  2087   oop_maps->add_gc_map(thepc - start, map);
  2089   // O7 now has the pc loaded that we will use when we finally call to native.
  2091   // Save thread in L7; it crosses a bunch of VM calls below
  2092   // Don't use save_thread because it smashes G2 and we merely
  2093   // want to save a copy
  2094   __ mov(G2_thread, L7_thread_cache);
  2097   // If we create an inner frame once is plenty
  2098   // when we create it we must also save G2_thread
  2099   bool inner_frame_created = false;
  2101   // dtrace method entry support
  2103     SkipIfEqual skip_if(
  2104       masm, G3_scratch, &DTraceMethodProbes, Assembler::zero);
  2105     // create inner frame
  2106     __ save_frame(0);
  2107     __ mov(G2_thread, L7_thread_cache);
  2108     __ set_oop_constant(JNIHandles::make_local(method()), O1);
  2109     __ call_VM_leaf(L7_thread_cache,
  2110          CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
  2111          G2_thread, O1);
  2112     __ restore();
  2115   // RedefineClasses() tracing support for obsolete method entry
  2116   if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) {
  2117     // create inner frame
  2118     __ save_frame(0);
  2119     __ mov(G2_thread, L7_thread_cache);
  2120     __ set_oop_constant(JNIHandles::make_local(method()), O1);
  2121     __ call_VM_leaf(L7_thread_cache,
  2122          CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
  2123          G2_thread, O1);
  2124     __ restore();
  2127   // We are in the jni frame unless saved_frame is true in which case
  2128   // we are in one frame deeper (the "inner" frame). If we are in the
  2129   // "inner" frames the args are in the Iregs and if the jni frame then
  2130   // they are in the Oregs.
  2131   // If we ever need to go to the VM (for locking, jvmti) then
  2132   // we will always be in the "inner" frame.
  2134   // Lock a synchronized method
  2135   int lock_offset = -1;         // Set if locked
  2136   if (method->is_synchronized()) {
  2137     Register Roop = O1;
  2138     const Register L3_box = L3;
  2140     create_inner_frame(masm, &inner_frame_created);
  2142     __ ld_ptr(I1, 0, O1);
  2143     Label done;
  2145     lock_offset = (lock_slot_offset * VMRegImpl::stack_slot_size);
  2146     __ add(FP, lock_offset+STACK_BIAS, L3_box);
  2147 #ifdef ASSERT
  2148     if (UseBiasedLocking) {
  2149       // making the box point to itself will make it clear it went unused
  2150       // but also be obviously invalid
  2151       __ st_ptr(L3_box, L3_box, 0);
  2153 #endif // ASSERT
  2154     //
  2155     // Compiler_lock_object (Roop, Rmark, Rbox, Rscratch) -- kills Rmark, Rbox, Rscratch
  2156     //
  2157     __ compiler_lock_object(Roop, L1,    L3_box, L2);
  2158     __ br(Assembler::equal, false, Assembler::pt, done);
  2159     __ delayed() -> add(FP, lock_offset+STACK_BIAS, L3_box);
  2162     // None of the above fast optimizations worked so we have to get into the
  2163     // slow case of monitor enter.  Inline a special case of call_VM that
  2164     // disallows any pending_exception.
  2165     __ mov(Roop, O0);            // Need oop in O0
  2166     __ mov(L3_box, O1);
  2168     // Record last_Java_sp, in case the VM code releases the JVM lock.
  2170     __ set_last_Java_frame(FP, I7);
  2172     // do the call
  2173     __ call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), relocInfo::runtime_call_type);
  2174     __ delayed()->mov(L7_thread_cache, O2);
  2176     __ restore_thread(L7_thread_cache); // restore G2_thread
  2177     __ reset_last_Java_frame();
  2179 #ifdef ASSERT
  2180     { Label L;
  2181     __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O0);
  2182     __ br_null_short(O0, Assembler::pt, L);
  2183     __ stop("no pending exception allowed on exit from IR::monitorenter");
  2184     __ bind(L);
  2186 #endif
  2187     __ bind(done);
  2191   // Finally just about ready to make the JNI call
  2193   __ flush_windows();
  2194   if (inner_frame_created) {
  2195     __ restore();
  2196   } else {
  2197     // Store only what we need from this frame
  2198     // QQQ I think that non-v9 (like we care) we don't need these saves
  2199     // either as the flush traps and the current window goes too.
  2200     __ st_ptr(FP, SP, FP->sp_offset_in_saved_window()*wordSize + STACK_BIAS);
  2201     __ st_ptr(I7, SP, I7->sp_offset_in_saved_window()*wordSize + STACK_BIAS);
  2204   // get JNIEnv* which is first argument to native
  2206   __ add(G2_thread, in_bytes(JavaThread::jni_environment_offset()), O0);
  2208   // Use that pc we placed in O7 a while back as the current frame anchor
  2210   __ set_last_Java_frame(SP, O7);
  2212   // Transition from _thread_in_Java to _thread_in_native.
  2213   __ set(_thread_in_native, G3_scratch);
  2214   __ st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
  2216   // We flushed the windows ages ago now mark them as flushed
  2218   // mark windows as flushed
  2219   __ set(JavaFrameAnchor::flushed, G3_scratch);
  2221   Address flags(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset());
  2223 #ifdef _LP64
  2224   AddressLiteral dest(method->native_function());
  2225   __ relocate(relocInfo::runtime_call_type);
  2226   __ jumpl_to(dest, O7, O7);
  2227 #else
  2228   __ call(method->native_function(), relocInfo::runtime_call_type);
  2229 #endif
  2230   __ delayed()->st(G3_scratch, flags);
  2232   __ restore_thread(L7_thread_cache); // restore G2_thread
  2234   // Unpack native results.  For int-types, we do any needed sign-extension
  2235   // and move things into I0.  The return value there will survive any VM
  2236   // calls for blocking or unlocking.  An FP or OOP result (handle) is done
  2237   // specially in the slow-path code.
  2238   switch (ret_type) {
  2239   case T_VOID:    break;        // Nothing to do!
  2240   case T_FLOAT:   break;        // Got it where we want it (unless slow-path)
  2241   case T_DOUBLE:  break;        // Got it where we want it (unless slow-path)
  2242   // In 64 bits build result is in O0, in O0, O1 in 32bit build
  2243   case T_LONG:
  2244 #ifndef _LP64
  2245                   __ mov(O1, I1);
  2246 #endif
  2247                   // Fall thru
  2248   case T_OBJECT:                // Really a handle
  2249   case T_ARRAY:
  2250   case T_INT:
  2251                   __ mov(O0, I0);
  2252                   break;
  2253   case T_BOOLEAN: __ subcc(G0, O0, G0); __ addc(G0, 0, I0); break; // !0 => true; 0 => false
  2254   case T_BYTE   : __ sll(O0, 24, O0); __ sra(O0, 24, I0);   break;
  2255   case T_CHAR   : __ sll(O0, 16, O0); __ srl(O0, 16, I0);   break; // cannot use and3, 0xFFFF too big as immediate value!
  2256   case T_SHORT  : __ sll(O0, 16, O0); __ sra(O0, 16, I0);   break;
  2257     break;                      // Cannot de-handlize until after reclaiming jvm_lock
  2258   default:
  2259     ShouldNotReachHere();
  2262   // must we block?
  2264   // Block, if necessary, before resuming in _thread_in_Java state.
  2265   // In order for GC to work, don't clear the last_Java_sp until after blocking.
  2266   { Label no_block;
  2267     AddressLiteral sync_state(SafepointSynchronize::address_of_state());
  2269     // Switch thread to "native transition" state before reading the synchronization state.
  2270     // This additional state is necessary because reading and testing the synchronization
  2271     // state is not atomic w.r.t. GC, as this scenario demonstrates:
  2272     //     Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
  2273     //     VM thread changes sync state to synchronizing and suspends threads for GC.
  2274     //     Thread A is resumed to finish this native method, but doesn't block here since it
  2275     //     didn't see any synchronization is progress, and escapes.
  2276     __ set(_thread_in_native_trans, G3_scratch);
  2277     __ st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
  2278     if(os::is_MP()) {
  2279       if (UseMembar) {
  2280         // Force this write out before the read below
  2281         __ membar(Assembler::StoreLoad);
  2282       } else {
  2283         // Write serialization page so VM thread can do a pseudo remote membar.
  2284         // We use the current thread pointer to calculate a thread specific
  2285         // offset to write to within the page. This minimizes bus traffic
  2286         // due to cache line collision.
  2287         __ serialize_memory(G2_thread, G1_scratch, G3_scratch);
  2290     __ load_contents(sync_state, G3_scratch);
  2291     __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
  2293     Label L;
  2294     Address suspend_state(G2_thread, JavaThread::suspend_flags_offset());
  2295     __ br(Assembler::notEqual, false, Assembler::pn, L);
  2296     __ delayed()->ld(suspend_state, G3_scratch);
  2297     __ cmp_and_br_short(G3_scratch, 0, Assembler::equal, Assembler::pt, no_block);
  2298     __ bind(L);
  2300     // Block.  Save any potential method result value before the operation and
  2301     // use a leaf call to leave the last_Java_frame setup undisturbed. Doing this
  2302     // lets us share the oopMap we used when we went native rather the create
  2303     // a distinct one for this pc
  2304     //
  2305     save_native_result(masm, ret_type, stack_slots);
  2306     __ call_VM_leaf(L7_thread_cache,
  2307                     CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans),
  2308                     G2_thread);
  2310     // Restore any method result value
  2311     restore_native_result(masm, ret_type, stack_slots);
  2312     __ bind(no_block);
  2315   // thread state is thread_in_native_trans. Any safepoint blocking has already
  2316   // happened so we can now change state to _thread_in_Java.
  2319   __ set(_thread_in_Java, G3_scratch);
  2320   __ st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
  2323   Label no_reguard;
  2324   __ ld(G2_thread, JavaThread::stack_guard_state_offset(), G3_scratch);
  2325   __ cmp_and_br_short(G3_scratch, JavaThread::stack_guard_yellow_disabled, Assembler::notEqual, Assembler::pt, no_reguard);
  2327     save_native_result(masm, ret_type, stack_slots);
  2328   __ call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages));
  2329   __ delayed()->nop();
  2331   __ restore_thread(L7_thread_cache); // restore G2_thread
  2332     restore_native_result(masm, ret_type, stack_slots);
  2334   __ bind(no_reguard);
  2336   // Handle possible exception (will unlock if necessary)
  2338   // native result if any is live in freg or I0 (and I1 if long and 32bit vm)
  2340   // Unlock
  2341   if (method->is_synchronized()) {
  2342     Label done;
  2343     Register I2_ex_oop = I2;
  2344     const Register L3_box = L3;
  2345     // Get locked oop from the handle we passed to jni
  2346     __ ld_ptr(L6_handle, 0, L4);
  2347     __ add(SP, lock_offset+STACK_BIAS, L3_box);
  2348     // Must save pending exception around the slow-path VM call.  Since it's a
  2349     // leaf call, the pending exception (if any) can be kept in a register.
  2350     __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), I2_ex_oop);
  2351     // Now unlock
  2352     //                       (Roop, Rmark, Rbox,   Rscratch)
  2353     __ compiler_unlock_object(L4,   L1,    L3_box, L2);
  2354     __ br(Assembler::equal, false, Assembler::pt, done);
  2355     __ delayed()-> add(SP, lock_offset+STACK_BIAS, L3_box);
  2357     // save and restore any potential method result value around the unlocking
  2358     // operation.  Will save in I0 (or stack for FP returns).
  2359     save_native_result(masm, ret_type, stack_slots);
  2361     // Must clear pending-exception before re-entering the VM.  Since this is
  2362     // a leaf call, pending-exception-oop can be safely kept in a register.
  2363     __ st_ptr(G0, G2_thread, in_bytes(Thread::pending_exception_offset()));
  2365     // slow case of monitor enter.  Inline a special case of call_VM that
  2366     // disallows any pending_exception.
  2367     __ mov(L3_box, O1);
  2369     __ call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C), relocInfo::runtime_call_type);
  2370     __ delayed()->mov(L4, O0);              // Need oop in O0
  2372     __ restore_thread(L7_thread_cache); // restore G2_thread
  2374 #ifdef ASSERT
  2375     { Label L;
  2376     __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O0);
  2377     __ br_null_short(O0, Assembler::pt, L);
  2378     __ stop("no pending exception allowed on exit from IR::monitorexit");
  2379     __ bind(L);
  2381 #endif
  2382     restore_native_result(masm, ret_type, stack_slots);
  2383     // check_forward_pending_exception jump to forward_exception if any pending
  2384     // exception is set.  The forward_exception routine expects to see the
  2385     // exception in pending_exception and not in a register.  Kind of clumsy,
  2386     // since all folks who branch to forward_exception must have tested
  2387     // pending_exception first and hence have it in a register already.
  2388     __ st_ptr(I2_ex_oop, G2_thread, in_bytes(Thread::pending_exception_offset()));
  2389     __ bind(done);
  2392   // Tell dtrace about this method exit
  2394     SkipIfEqual skip_if(
  2395       masm, G3_scratch, &DTraceMethodProbes, Assembler::zero);
  2396     save_native_result(masm, ret_type, stack_slots);
  2397     __ set_oop_constant(JNIHandles::make_local(method()), O1);
  2398     __ call_VM_leaf(L7_thread_cache,
  2399        CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
  2400        G2_thread, O1);
  2401     restore_native_result(masm, ret_type, stack_slots);
  2404   // Clear "last Java frame" SP and PC.
  2405   __ verify_thread(); // G2_thread must be correct
  2406   __ reset_last_Java_frame();
  2408   // Unpack oop result
  2409   if (ret_type == T_OBJECT || ret_type == T_ARRAY) {
  2410       Label L;
  2411       __ addcc(G0, I0, G0);
  2412       __ brx(Assembler::notZero, true, Assembler::pt, L);
  2413       __ delayed()->ld_ptr(I0, 0, I0);
  2414       __ mov(G0, I0);
  2415       __ bind(L);
  2416       __ verify_oop(I0);
  2419   // reset handle block
  2420   __ ld_ptr(G2_thread, in_bytes(JavaThread::active_handles_offset()), L5);
  2421   __ st_ptr(G0, L5, JNIHandleBlock::top_offset_in_bytes());
  2423   __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), G3_scratch);
  2424   check_forward_pending_exception(masm, G3_scratch);
  2427   // Return
  2429 #ifndef _LP64
  2430   if (ret_type == T_LONG) {
  2432     // Must leave proper result in O0,O1 and G1 (c2/tiered only)
  2433     __ sllx(I0, 32, G1);          // Shift bits into high G1
  2434     __ srl (I1, 0, I1);           // Zero extend O1 (harmless?)
  2435     __ or3 (I1, G1, G1);          // OR 64 bits into G1
  2437 #endif
  2439   __ ret();
  2440   __ delayed()->restore();
  2442   __ flush();
  2444   nmethod *nm = nmethod::new_native_nmethod(method,
  2445                                             compile_id,
  2446                                             masm->code(),
  2447                                             vep_offset,
  2448                                             frame_complete,
  2449                                             stack_slots / VMRegImpl::slots_per_word,
  2450                                             (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
  2451                                             in_ByteSize(lock_offset),
  2452                                             oop_maps);
  2453   return nm;
  2457 #ifdef HAVE_DTRACE_H
  2458 // ---------------------------------------------------------------------------
  2459 // Generate a dtrace nmethod for a given signature.  The method takes arguments
  2460 // in the Java compiled code convention, marshals them to the native
  2461 // abi and then leaves nops at the position you would expect to call a native
  2462 // function. When the probe is enabled the nops are replaced with a trap
  2463 // instruction that dtrace inserts and the trace will cause a notification
  2464 // to dtrace.
  2465 //
  2466 // The probes are only able to take primitive types and java/lang/String as
  2467 // arguments.  No other java types are allowed. Strings are converted to utf8
  2468 // strings so that from dtrace point of view java strings are converted to C
  2469 // strings. There is an arbitrary fixed limit on the total space that a method
  2470 // can use for converting the strings. (256 chars per string in the signature).
  2471 // So any java string larger then this is truncated.
  2473 static int  fp_offset[ConcreteRegisterImpl::number_of_registers] = { 0 };
  2474 static bool offsets_initialized = false;
  2476 static VMRegPair reg64_to_VMRegPair(Register r) {
  2477   VMRegPair ret;
  2478   if (wordSize == 8) {
  2479     ret.set2(r->as_VMReg());
  2480   } else {
  2481     ret.set_pair(r->successor()->as_VMReg(), r->as_VMReg());
  2483   return ret;
  2487 nmethod *SharedRuntime::generate_dtrace_nmethod(
  2488     MacroAssembler *masm, methodHandle method) {
  2491   // generate_dtrace_nmethod is guarded by a mutex so we are sure to
  2492   // be single threaded in this method.
  2493   assert(AdapterHandlerLibrary_lock->owned_by_self(), "must be");
  2495   // Fill in the signature array, for the calling-convention call.
  2496   int total_args_passed = method->size_of_parameters();
  2498   BasicType* in_sig_bt  = NEW_RESOURCE_ARRAY(BasicType, total_args_passed);
  2499   VMRegPair  *in_regs   = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
  2501   // The signature we are going to use for the trap that dtrace will see
  2502   // java/lang/String is converted. We drop "this" and any other object
  2503   // is converted to NULL.  (A one-slot java/lang/Long object reference
  2504   // is converted to a two-slot long, which is why we double the allocation).
  2505   BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_args_passed * 2);
  2506   VMRegPair* out_regs   = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed * 2);
  2508   int i=0;
  2509   int total_strings = 0;
  2510   int first_arg_to_pass = 0;
  2511   int total_c_args = 0;
  2513   // Skip the receiver as dtrace doesn't want to see it
  2514   if( !method->is_static() ) {
  2515     in_sig_bt[i++] = T_OBJECT;
  2516     first_arg_to_pass = 1;
  2519   SignatureStream ss(method->signature());
  2520   for ( ; !ss.at_return_type(); ss.next()) {
  2521     BasicType bt = ss.type();
  2522     in_sig_bt[i++] = bt;  // Collect remaining bits of signature
  2523     out_sig_bt[total_c_args++] = bt;
  2524     if( bt == T_OBJECT) {
  2525       Symbol* s = ss.as_symbol_or_null();
  2526       if (s == vmSymbols::java_lang_String()) {
  2527         total_strings++;
  2528         out_sig_bt[total_c_args-1] = T_ADDRESS;
  2529       } else if (s == vmSymbols::java_lang_Boolean() ||
  2530                  s == vmSymbols::java_lang_Byte()) {
  2531         out_sig_bt[total_c_args-1] = T_BYTE;
  2532       } else if (s == vmSymbols::java_lang_Character() ||
  2533                  s == vmSymbols::java_lang_Short()) {
  2534         out_sig_bt[total_c_args-1] = T_SHORT;
  2535       } else if (s == vmSymbols::java_lang_Integer() ||
  2536                  s == vmSymbols::java_lang_Float()) {
  2537         out_sig_bt[total_c_args-1] = T_INT;
  2538       } else if (s == vmSymbols::java_lang_Long() ||
  2539                  s == vmSymbols::java_lang_Double()) {
  2540         out_sig_bt[total_c_args-1] = T_LONG;
  2541         out_sig_bt[total_c_args++] = T_VOID;
  2543     } else if ( bt == T_LONG || bt == T_DOUBLE ) {
  2544       in_sig_bt[i++] = T_VOID;   // Longs & doubles take 2 Java slots
  2545       // We convert double to long
  2546       out_sig_bt[total_c_args-1] = T_LONG;
  2547       out_sig_bt[total_c_args++] = T_VOID;
  2548     } else if ( bt == T_FLOAT) {
  2549       // We convert float to int
  2550       out_sig_bt[total_c_args-1] = T_INT;
  2554   assert(i==total_args_passed, "validly parsed signature");
  2556   // Now get the compiled-Java layout as input arguments
  2557   int comp_args_on_stack;
  2558   comp_args_on_stack = SharedRuntime::java_calling_convention(
  2559       in_sig_bt, in_regs, total_args_passed, false);
  2561   // We have received a description of where all the java arg are located
  2562   // on entry to the wrapper. We need to convert these args to where
  2563   // the a  native (non-jni) function would expect them. To figure out
  2564   // where they go we convert the java signature to a C signature and remove
  2565   // T_VOID for any long/double we might have received.
  2568   // Now figure out where the args must be stored and how much stack space
  2569   // they require (neglecting out_preserve_stack_slots but space for storing
  2570   // the 1st six register arguments). It's weird see int_stk_helper.
  2571   //
  2572   int out_arg_slots;
  2573   out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args);
  2575   // Calculate the total number of stack slots we will need.
  2577   // First count the abi requirement plus all of the outgoing args
  2578   int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
  2580   // Plus a temp for possible converion of float/double/long register args
  2582   int conversion_temp = stack_slots;
  2583   stack_slots += 2;
  2586   // Now space for the string(s) we must convert
  2588   int string_locs = stack_slots;
  2589   stack_slots += total_strings *
  2590                    (max_dtrace_string_size / VMRegImpl::stack_slot_size);
  2592   // Ok The space we have allocated will look like:
  2593   //
  2594   //
  2595   // FP-> |                     |
  2596   //      |---------------------|
  2597   //      | string[n]           |
  2598   //      |---------------------| <- string_locs[n]
  2599   //      | string[n-1]         |
  2600   //      |---------------------| <- string_locs[n-1]
  2601   //      | ...                 |
  2602   //      | ...                 |
  2603   //      |---------------------| <- string_locs[1]
  2604   //      | string[0]           |
  2605   //      |---------------------| <- string_locs[0]
  2606   //      | temp                |
  2607   //      |---------------------| <- conversion_temp
  2608   //      | outbound memory     |
  2609   //      | based arguments     |
  2610   //      |                     |
  2611   //      |---------------------|
  2612   //      |                     |
  2613   // SP-> | out_preserved_slots |
  2614   //
  2615   //
  2617   // Now compute actual number of stack words we need rounding to make
  2618   // stack properly aligned.
  2619   stack_slots = round_to(stack_slots, 4 * VMRegImpl::slots_per_word);
  2621   int stack_size = stack_slots * VMRegImpl::stack_slot_size;
  2623   intptr_t start = (intptr_t)__ pc();
  2625   // First thing make an ic check to see if we should even be here
  2628     Label L;
  2629     const Register temp_reg = G3_scratch;
  2630     AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub());
  2631     __ verify_oop(O0);
  2632     __ ld_ptr(O0, oopDesc::klass_offset_in_bytes(), temp_reg);
  2633     __ cmp_and_brx_short(temp_reg, G5_inline_cache_reg, Assembler::equal, Assembler::pt, L);
  2635     __ jump_to(ic_miss, temp_reg);
  2636     __ delayed()->nop();
  2637     __ align(CodeEntryAlignment);
  2638     __ bind(L);
  2641   int vep_offset = ((intptr_t)__ pc()) - start;
  2644   // The instruction at the verified entry point must be 5 bytes or longer
  2645   // because it can be patched on the fly by make_non_entrant. The stack bang
  2646   // instruction fits that requirement.
  2648   // Generate stack overflow check before creating frame
  2649   __ generate_stack_overflow_check(stack_size);
  2651   assert(((intptr_t)__ pc() - start - vep_offset) >= 5,
  2652          "valid size for make_non_entrant");
  2654   // Generate a new frame for the wrapper.
  2655   __ save(SP, -stack_size, SP);
  2657   // Frame is now completed as far a size and linkage.
  2659   int frame_complete = ((intptr_t)__ pc()) - start;
  2661 #ifdef ASSERT
  2662   bool reg_destroyed[RegisterImpl::number_of_registers];
  2663   bool freg_destroyed[FloatRegisterImpl::number_of_registers];
  2664   for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) {
  2665     reg_destroyed[r] = false;
  2667   for ( int f = 0 ; f < FloatRegisterImpl::number_of_registers ; f++ ) {
  2668     freg_destroyed[f] = false;
  2671 #endif /* ASSERT */
  2673   VMRegPair zero;
  2674   const Register g0 = G0; // without this we get a compiler warning (why??)
  2675   zero.set2(g0->as_VMReg());
  2677   int c_arg, j_arg;
  2679   Register conversion_off = noreg;
  2681   for (j_arg = first_arg_to_pass, c_arg = 0 ;
  2682        j_arg < total_args_passed ; j_arg++, c_arg++ ) {
  2684     VMRegPair src = in_regs[j_arg];
  2685     VMRegPair dst = out_regs[c_arg];
  2687 #ifdef ASSERT
  2688     if (src.first()->is_Register()) {
  2689       assert(!reg_destroyed[src.first()->as_Register()->encoding()], "ack!");
  2690     } else if (src.first()->is_FloatRegister()) {
  2691       assert(!freg_destroyed[src.first()->as_FloatRegister()->encoding(
  2692                                                FloatRegisterImpl::S)], "ack!");
  2694     if (dst.first()->is_Register()) {
  2695       reg_destroyed[dst.first()->as_Register()->encoding()] = true;
  2696     } else if (dst.first()->is_FloatRegister()) {
  2697       freg_destroyed[dst.first()->as_FloatRegister()->encoding(
  2698                                                  FloatRegisterImpl::S)] = true;
  2700 #endif /* ASSERT */
  2702     switch (in_sig_bt[j_arg]) {
  2703       case T_ARRAY:
  2704       case T_OBJECT:
  2706           if (out_sig_bt[c_arg] == T_BYTE  || out_sig_bt[c_arg] == T_SHORT ||
  2707               out_sig_bt[c_arg] == T_INT || out_sig_bt[c_arg] == T_LONG) {
  2708             // need to unbox a one-slot value
  2709             Register in_reg = L0;
  2710             Register tmp = L2;
  2711             if ( src.first()->is_reg() ) {
  2712               in_reg = src.first()->as_Register();
  2713             } else {
  2714               assert(Assembler::is_simm13(reg2offset(src.first()) + STACK_BIAS),
  2715                      "must be");
  2716               __ ld_ptr(FP, reg2offset(src.first()) + STACK_BIAS, in_reg);
  2718             // If the final destination is an acceptable register
  2719             if ( dst.first()->is_reg() ) {
  2720               if ( dst.is_single_phys_reg() || out_sig_bt[c_arg] != T_LONG ) {
  2721                 tmp = dst.first()->as_Register();
  2725             Label skipUnbox;
  2726             if ( wordSize == 4 && out_sig_bt[c_arg] == T_LONG ) {
  2727               __ mov(G0, tmp->successor());
  2729             __ br_null(in_reg, true, Assembler::pn, skipUnbox);
  2730             __ delayed()->mov(G0, tmp);
  2732             BasicType bt = out_sig_bt[c_arg];
  2733             int box_offset = java_lang_boxing_object::value_offset_in_bytes(bt);
  2734             switch (bt) {
  2735                 case T_BYTE:
  2736                   __ ldub(in_reg, box_offset, tmp); break;
  2737                 case T_SHORT:
  2738                   __ lduh(in_reg, box_offset, tmp); break;
  2739                 case T_INT:
  2740                   __ ld(in_reg, box_offset, tmp); break;
  2741                 case T_LONG:
  2742                   __ ld_long(in_reg, box_offset, tmp); break;
  2743                 default: ShouldNotReachHere();
  2746             __ bind(skipUnbox);
  2747             // If tmp wasn't final destination copy to final destination
  2748             if (tmp == L2) {
  2749               VMRegPair tmp_as_VM = reg64_to_VMRegPair(L2);
  2750               if (out_sig_bt[c_arg] == T_LONG) {
  2751                 long_move(masm, tmp_as_VM, dst);
  2752               } else {
  2753                 move32_64(masm, tmp_as_VM, out_regs[c_arg]);
  2756             if (out_sig_bt[c_arg] == T_LONG) {
  2757               assert(out_sig_bt[c_arg+1] == T_VOID, "must be");
  2758               ++c_arg; // move over the T_VOID to keep the loop indices in sync
  2760           } else if (out_sig_bt[c_arg] == T_ADDRESS) {
  2761             Register s =
  2762                 src.first()->is_reg() ? src.first()->as_Register() : L2;
  2763             Register d =
  2764                 dst.first()->is_reg() ? dst.first()->as_Register() : L2;
  2766             // We store the oop now so that the conversion pass can reach
  2767             // while in the inner frame. This will be the only store if
  2768             // the oop is NULL.
  2769             if (s != L2) {
  2770               // src is register
  2771               if (d != L2) {
  2772                 // dst is register
  2773                 __ mov(s, d);
  2774               } else {
  2775                 assert(Assembler::is_simm13(reg2offset(dst.first()) +
  2776                           STACK_BIAS), "must be");
  2777                 __ st_ptr(s, SP, reg2offset(dst.first()) + STACK_BIAS);
  2779             } else {
  2780                 // src not a register
  2781                 assert(Assembler::is_simm13(reg2offset(src.first()) +
  2782                            STACK_BIAS), "must be");
  2783                 __ ld_ptr(FP, reg2offset(src.first()) + STACK_BIAS, d);
  2784                 if (d == L2) {
  2785                   assert(Assembler::is_simm13(reg2offset(dst.first()) +
  2786                              STACK_BIAS), "must be");
  2787                   __ st_ptr(d, SP, reg2offset(dst.first()) + STACK_BIAS);
  2790           } else if (out_sig_bt[c_arg] != T_VOID) {
  2791             // Convert the arg to NULL
  2792             if (dst.first()->is_reg()) {
  2793               __ mov(G0, dst.first()->as_Register());
  2794             } else {
  2795               assert(Assembler::is_simm13(reg2offset(dst.first()) +
  2796                          STACK_BIAS), "must be");
  2797               __ st_ptr(G0, SP, reg2offset(dst.first()) + STACK_BIAS);
  2801         break;
  2802       case T_VOID:
  2803         break;
  2805       case T_FLOAT:
  2806         if (src.first()->is_stack()) {
  2807           // Stack to stack/reg is simple
  2808           move32_64(masm, src, dst);
  2809         } else {
  2810           if (dst.first()->is_reg()) {
  2811             // freg -> reg
  2812             int off =
  2813               STACK_BIAS + conversion_temp * VMRegImpl::stack_slot_size;
  2814             Register d = dst.first()->as_Register();
  2815             if (Assembler::is_simm13(off)) {
  2816               __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(),
  2817                      SP, off);
  2818               __ ld(SP, off, d);
  2819             } else {
  2820               if (conversion_off == noreg) {
  2821                 __ set(off, L6);
  2822                 conversion_off = L6;
  2824               __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(),
  2825                      SP, conversion_off);
  2826               __ ld(SP, conversion_off , d);
  2828           } else {
  2829             // freg -> mem
  2830             int off = STACK_BIAS + reg2offset(dst.first());
  2831             if (Assembler::is_simm13(off)) {
  2832               __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(),
  2833                      SP, off);
  2834             } else {
  2835               if (conversion_off == noreg) {
  2836                 __ set(off, L6);
  2837                 conversion_off = L6;
  2839               __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(),
  2840                      SP, conversion_off);
  2844         break;
  2846       case T_DOUBLE:
  2847         assert( j_arg + 1 < total_args_passed &&
  2848                 in_sig_bt[j_arg + 1] == T_VOID &&
  2849                 out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
  2850         if (src.first()->is_stack()) {
  2851           // Stack to stack/reg is simple
  2852           long_move(masm, src, dst);
  2853         } else {
  2854           Register d = dst.first()->is_reg() ? dst.first()->as_Register() : L2;
  2856           // Destination could be an odd reg on 32bit in which case
  2857           // we can't load direct to the destination.
  2859           if (!d->is_even() && wordSize == 4) {
  2860             d = L2;
  2862           int off = STACK_BIAS + conversion_temp * VMRegImpl::stack_slot_size;
  2863           if (Assembler::is_simm13(off)) {
  2864             __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(),
  2865                    SP, off);
  2866             __ ld_long(SP, off, d);
  2867           } else {
  2868             if (conversion_off == noreg) {
  2869               __ set(off, L6);
  2870               conversion_off = L6;
  2872             __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(),
  2873                    SP, conversion_off);
  2874             __ ld_long(SP, conversion_off, d);
  2876           if (d == L2) {
  2877             long_move(masm, reg64_to_VMRegPair(L2), dst);
  2880         break;
  2882       case T_LONG :
  2883         // 32bit can't do a split move of something like g1 -> O0, O1
  2884         // so use a memory temp
  2885         if (src.is_single_phys_reg() && wordSize == 4) {
  2886           Register tmp = L2;
  2887           if (dst.first()->is_reg() &&
  2888               (wordSize == 8 || dst.first()->as_Register()->is_even())) {
  2889             tmp = dst.first()->as_Register();
  2892           int off = STACK_BIAS + conversion_temp * VMRegImpl::stack_slot_size;
  2893           if (Assembler::is_simm13(off)) {
  2894             __ stx(src.first()->as_Register(), SP, off);
  2895             __ ld_long(SP, off, tmp);
  2896           } else {
  2897             if (conversion_off == noreg) {
  2898               __ set(off, L6);
  2899               conversion_off = L6;
  2901             __ stx(src.first()->as_Register(), SP, conversion_off);
  2902             __ ld_long(SP, conversion_off, tmp);
  2905           if (tmp == L2) {
  2906             long_move(masm, reg64_to_VMRegPair(L2), dst);
  2908         } else {
  2909           long_move(masm, src, dst);
  2911         break;
  2913       case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
  2915       default:
  2916         move32_64(masm, src, dst);
  2921   // If we have any strings we must store any register based arg to the stack
  2922   // This includes any still live xmm registers too.
  2924   if (total_strings > 0 ) {
  2926     // protect all the arg registers
  2927     __ save_frame(0);
  2928     __ mov(G2_thread, L7_thread_cache);
  2929     const Register L2_string_off = L2;
  2931     // Get first string offset
  2932     __ set(string_locs * VMRegImpl::stack_slot_size, L2_string_off);
  2934     for (c_arg = 0 ; c_arg < total_c_args ; c_arg++ ) {
  2935       if (out_sig_bt[c_arg] == T_ADDRESS) {
  2937         VMRegPair dst = out_regs[c_arg];
  2938         const Register d = dst.first()->is_reg() ?
  2939             dst.first()->as_Register()->after_save() : noreg;
  2941         // It's a string the oop and it was already copied to the out arg
  2942         // position
  2943         if (d != noreg) {
  2944           __ mov(d, O0);
  2945         } else {
  2946           assert(Assembler::is_simm13(reg2offset(dst.first()) + STACK_BIAS),
  2947                  "must be");
  2948           __ ld_ptr(FP,  reg2offset(dst.first()) + STACK_BIAS, O0);
  2950         Label skip;
  2952         __ br_null(O0, false, Assembler::pn, skip);
  2953         __ delayed()->add(FP, L2_string_off, O1);
  2955         if (d != noreg) {
  2956           __ mov(O1, d);
  2957         } else {
  2958           assert(Assembler::is_simm13(reg2offset(dst.first()) + STACK_BIAS),
  2959                  "must be");
  2960           __ st_ptr(O1, FP,  reg2offset(dst.first()) + STACK_BIAS);
  2963         __ call(CAST_FROM_FN_PTR(address, SharedRuntime::get_utf),
  2964                 relocInfo::runtime_call_type);
  2965         __ delayed()->add(L2_string_off, max_dtrace_string_size, L2_string_off);
  2967         __ bind(skip);
  2972     __ mov(L7_thread_cache, G2_thread);
  2973     __ restore();
  2978   // Ok now we are done. Need to place the nop that dtrace wants in order to
  2979   // patch in the trap
  2981   int patch_offset = ((intptr_t)__ pc()) - start;
  2983   __ nop();
  2986   // Return
  2988   __ ret();
  2989   __ delayed()->restore();
  2991   __ flush();
  2993   nmethod *nm = nmethod::new_dtrace_nmethod(
  2994       method, masm->code(), vep_offset, patch_offset, frame_complete,
  2995       stack_slots / VMRegImpl::slots_per_word);
  2996   return nm;
  3000 #endif // HAVE_DTRACE_H
  3002 // this function returns the adjust size (in number of words) to a c2i adapter
  3003 // activation for use during deoptimization
  3004 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) {
  3005   assert(callee_locals >= callee_parameters,
  3006           "test and remove; got more parms than locals");
  3007   if (callee_locals < callee_parameters)
  3008     return 0;                   // No adjustment for negative locals
  3009   int diff = (callee_locals - callee_parameters) * Interpreter::stackElementWords;
  3010   return round_to(diff, WordsPerLong);
  3013 // "Top of Stack" slots that may be unused by the calling convention but must
  3014 // otherwise be preserved.
  3015 // On Intel these are not necessary and the value can be zero.
  3016 // On Sparc this describes the words reserved for storing a register window
  3017 // when an interrupt occurs.
  3018 uint SharedRuntime::out_preserve_stack_slots() {
  3019   return frame::register_save_words * VMRegImpl::slots_per_word;
  3022 static void gen_new_frame(MacroAssembler* masm, bool deopt) {
  3023 //
  3024 // Common out the new frame generation for deopt and uncommon trap
  3025 //
  3026   Register        G3pcs              = G3_scratch; // Array of new pcs (input)
  3027   Register        Oreturn0           = O0;
  3028   Register        Oreturn1           = O1;
  3029   Register        O2UnrollBlock      = O2;
  3030   Register        O3array            = O3;         // Array of frame sizes (input)
  3031   Register        O4array_size       = O4;         // number of frames (input)
  3032   Register        O7frame_size       = O7;         // number of frames (input)
  3034   __ ld_ptr(O3array, 0, O7frame_size);
  3035   __ sub(G0, O7frame_size, O7frame_size);
  3036   __ save(SP, O7frame_size, SP);
  3037   __ ld_ptr(G3pcs, 0, I7);                      // load frame's new pc
  3039   #ifdef ASSERT
  3040   // make sure that the frames are aligned properly
  3041 #ifndef _LP64
  3042   __ btst(wordSize*2-1, SP);
  3043   __ breakpoint_trap(Assembler::notZero);
  3044 #endif
  3045   #endif
  3047   // Deopt needs to pass some extra live values from frame to frame
  3049   if (deopt) {
  3050     __ mov(Oreturn0->after_save(), Oreturn0);
  3051     __ mov(Oreturn1->after_save(), Oreturn1);
  3054   __ mov(O4array_size->after_save(), O4array_size);
  3055   __ sub(O4array_size, 1, O4array_size);
  3056   __ mov(O3array->after_save(), O3array);
  3057   __ mov(O2UnrollBlock->after_save(), O2UnrollBlock);
  3058   __ add(G3pcs, wordSize, G3pcs);               // point to next pc value
  3060   #ifdef ASSERT
  3061   // trash registers to show a clear pattern in backtraces
  3062   __ set(0xDEAD0000, I0);
  3063   __ add(I0,  2, I1);
  3064   __ add(I0,  4, I2);
  3065   __ add(I0,  6, I3);
  3066   __ add(I0,  8, I4);
  3067   // Don't touch I5 could have valuable savedSP
  3068   __ set(0xDEADBEEF, L0);
  3069   __ mov(L0, L1);
  3070   __ mov(L0, L2);
  3071   __ mov(L0, L3);
  3072   __ mov(L0, L4);
  3073   __ mov(L0, L5);
  3075   // trash the return value as there is nothing to return yet
  3076   __ set(0xDEAD0001, O7);
  3077   #endif
  3079   __ mov(SP, O5_savedSP);
  3083 static void make_new_frames(MacroAssembler* masm, bool deopt) {
  3084   //
  3085   // loop through the UnrollBlock info and create new frames
  3086   //
  3087   Register        G3pcs              = G3_scratch;
  3088   Register        Oreturn0           = O0;
  3089   Register        Oreturn1           = O1;
  3090   Register        O2UnrollBlock      = O2;
  3091   Register        O3array            = O3;
  3092   Register        O4array_size       = O4;
  3093   Label           loop;
  3095   // Before we make new frames, check to see if stack is available.
  3096   // Do this after the caller's return address is on top of stack
  3097   if (UseStackBanging) {
  3098     // Get total frame size for interpreted frames
  3099     __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes(), O4);
  3100     __ bang_stack_size(O4, O3, G3_scratch);
  3103   __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes(), O4array_size);
  3104   __ ld_ptr(O2UnrollBlock, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes(), G3pcs);
  3105   __ ld_ptr(O2UnrollBlock, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes(), O3array);
  3107   // Adjust old interpreter frame to make space for new frame's extra java locals
  3108   //
  3109   // We capture the original sp for the transition frame only because it is needed in
  3110   // order to properly calculate interpreter_sp_adjustment. Even though in real life
  3111   // every interpreter frame captures a savedSP it is only needed at the transition
  3112   // (fortunately). If we had to have it correct everywhere then we would need to
  3113   // be told the sp_adjustment for each frame we create. If the frame size array
  3114   // were to have twice the frame count entries then we could have pairs [sp_adjustment, frame_size]
  3115   // for each frame we create and keep up the illusion every where.
  3116   //
  3118   __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes(), O7);
  3119   __ mov(SP, O5_savedSP);       // remember initial sender's original sp before adjustment
  3120   __ sub(SP, O7, SP);
  3122 #ifdef ASSERT
  3123   // make sure that there is at least one entry in the array
  3124   __ tst(O4array_size);
  3125   __ breakpoint_trap(Assembler::zero);
  3126 #endif
  3128   // Now push the new interpreter frames
  3129   __ bind(loop);
  3131   // allocate a new frame, filling the registers
  3133   gen_new_frame(masm, deopt);        // allocate an interpreter frame
  3135   __ cmp_zero_and_br(Assembler::notZero, O4array_size, loop);
  3136   __ delayed()->add(O3array, wordSize, O3array);
  3137   __ ld_ptr(G3pcs, 0, O7);                      // load final frame new pc
  3141 //------------------------------generate_deopt_blob----------------------------
  3142 // Ought to generate an ideal graph & compile, but here's some SPARC ASM
  3143 // instead.
  3144 void SharedRuntime::generate_deopt_blob() {
  3145   // allocate space for the code
  3146   ResourceMark rm;
  3147   // setup code generation tools
  3148   int pad = VerifyThread ? 512 : 0;// Extra slop space for more verify code
  3149 #ifdef _LP64
  3150   CodeBuffer buffer("deopt_blob", 2100+pad, 512);
  3151 #else
  3152   // Measured 8/7/03 at 1212 in 32bit debug build (no VerifyThread)
  3153   // Measured 8/7/03 at 1396 in 32bit debug build (VerifyThread)
  3154   CodeBuffer buffer("deopt_blob", 1600+pad, 512);
  3155 #endif /* _LP64 */
  3156   MacroAssembler* masm               = new MacroAssembler(&buffer);
  3157   FloatRegister   Freturn0           = F0;
  3158   Register        Greturn1           = G1;
  3159   Register        Oreturn0           = O0;
  3160   Register        Oreturn1           = O1;
  3161   Register        O2UnrollBlock      = O2;
  3162   Register        L0deopt_mode       = L0;
  3163   Register        G4deopt_mode       = G4_scratch;
  3164   int             frame_size_words;
  3165   Address         saved_Freturn0_addr(FP, -sizeof(double) + STACK_BIAS);
  3166 #if !defined(_LP64) && defined(COMPILER2)
  3167   Address         saved_Greturn1_addr(FP, -sizeof(double) -sizeof(jlong) + STACK_BIAS);
  3168 #endif
  3169   Label           cont;
  3171   OopMapSet *oop_maps = new OopMapSet();
  3173   //
  3174   // This is the entry point for code which is returning to a de-optimized
  3175   // frame.
  3176   // The steps taken by this frame are as follows:
  3177   //   - push a dummy "register_save" and save the return values (O0, O1, F0/F1, G1)
  3178   //     and all potentially live registers (at a pollpoint many registers can be live).
  3179   //
  3180   //   - call the C routine: Deoptimization::fetch_unroll_info (this function
  3181   //     returns information about the number and size of interpreter frames
  3182   //     which are equivalent to the frame which is being deoptimized)
  3183   //   - deallocate the unpack frame, restoring only results values. Other
  3184   //     volatile registers will now be captured in the vframeArray as needed.
  3185   //   - deallocate the deoptimization frame
  3186   //   - in a loop using the information returned in the previous step
  3187   //     push new interpreter frames (take care to propagate the return
  3188   //     values through each new frame pushed)
  3189   //   - create a dummy "unpack_frame" and save the return values (O0, O1, F0)
  3190   //   - call the C routine: Deoptimization::unpack_frames (this function
  3191   //     lays out values on the interpreter frame which was just created)
  3192   //   - deallocate the dummy unpack_frame
  3193   //   - ensure that all the return values are correctly set and then do
  3194   //     a return to the interpreter entry point
  3195   //
  3196   // Refer to the following methods for more information:
  3197   //   - Deoptimization::fetch_unroll_info
  3198   //   - Deoptimization::unpack_frames
  3200   OopMap* map = NULL;
  3202   int start = __ offset();
  3204   // restore G2, the trampoline destroyed it
  3205   __ get_thread();
  3207   // On entry we have been called by the deoptimized nmethod with a call that
  3208   // replaced the original call (or safepoint polling location) so the deoptimizing
  3209   // pc is now in O7. Return values are still in the expected places
  3211   map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
  3212   __ ba(cont);
  3213   __ delayed()->mov(Deoptimization::Unpack_deopt, L0deopt_mode);
  3215   int exception_offset = __ offset() - start;
  3217   // restore G2, the trampoline destroyed it
  3218   __ get_thread();
  3220   // On entry we have been jumped to by the exception handler (or exception_blob
  3221   // for server).  O0 contains the exception oop and O7 contains the original
  3222   // exception pc.  So if we push a frame here it will look to the
  3223   // stack walking code (fetch_unroll_info) just like a normal call so
  3224   // state will be extracted normally.
  3226   // save exception oop in JavaThread and fall through into the
  3227   // exception_in_tls case since they are handled in same way except
  3228   // for where the pending exception is kept.
  3229   __ st_ptr(Oexception, G2_thread, JavaThread::exception_oop_offset());
  3231   //
  3232   // Vanilla deoptimization with an exception pending in exception_oop
  3233   //
  3234   int exception_in_tls_offset = __ offset() - start;
  3236   // No need to update oop_map  as each call to save_live_registers will produce identical oopmap
  3237   (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
  3239   // Restore G2_thread
  3240   __ get_thread();
  3242 #ifdef ASSERT
  3244     // verify that there is really an exception oop in exception_oop
  3245     Label has_exception;
  3246     __ ld_ptr(G2_thread, JavaThread::exception_oop_offset(), Oexception);
  3247     __ br_notnull_short(Oexception, Assembler::pt, has_exception);
  3248     __ stop("no exception in thread");
  3249     __ bind(has_exception);
  3251     // verify that there is no pending exception
  3252     Label no_pending_exception;
  3253     Address exception_addr(G2_thread, Thread::pending_exception_offset());
  3254     __ ld_ptr(exception_addr, Oexception);
  3255     __ br_null_short(Oexception, Assembler::pt, no_pending_exception);
  3256     __ stop("must not have pending exception here");
  3257     __ bind(no_pending_exception);
  3259 #endif
  3261   __ ba(cont);
  3262   __ delayed()->mov(Deoptimization::Unpack_exception, L0deopt_mode);;
  3264   //
  3265   // Reexecute entry, similar to c2 uncommon trap
  3266   //
  3267   int reexecute_offset = __ offset() - start;
  3269   // No need to update oop_map  as each call to save_live_registers will produce identical oopmap
  3270   (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
  3272   __ mov(Deoptimization::Unpack_reexecute, L0deopt_mode);
  3274   __ bind(cont);
  3276   __ set_last_Java_frame(SP, noreg);
  3278   // do the call by hand so we can get the oopmap
  3280   __ mov(G2_thread, L7_thread_cache);
  3281   __ call(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info), relocInfo::runtime_call_type);
  3282   __ delayed()->mov(G2_thread, O0);
  3284   // Set an oopmap for the call site this describes all our saved volatile registers
  3286   oop_maps->add_gc_map( __ offset()-start, map);
  3288   __ mov(L7_thread_cache, G2_thread);
  3290   __ reset_last_Java_frame();
  3292   // NOTE: we know that only O0/O1 will be reloaded by restore_result_registers
  3293   // so this move will survive
  3295   __ mov(L0deopt_mode, G4deopt_mode);
  3297   __ mov(O0, O2UnrollBlock->after_save());
  3299   RegisterSaver::restore_result_registers(masm);
  3301   Label noException;
  3302   __ cmp_and_br_short(G4deopt_mode, Deoptimization::Unpack_exception, Assembler::notEqual, Assembler::pt, noException);
  3304   // Move the pending exception from exception_oop to Oexception so
  3305   // the pending exception will be picked up the interpreter.
  3306   __ ld_ptr(G2_thread, in_bytes(JavaThread::exception_oop_offset()), Oexception);
  3307   __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_oop_offset()));
  3308   __ bind(noException);
  3310   // deallocate the deoptimization frame taking care to preserve the return values
  3311   __ mov(Oreturn0,     Oreturn0->after_save());
  3312   __ mov(Oreturn1,     Oreturn1->after_save());
  3313   __ mov(O2UnrollBlock, O2UnrollBlock->after_save());
  3314   __ restore();
  3316   // Allocate new interpreter frame(s) and possible c2i adapter frame
  3318   make_new_frames(masm, true);
  3320   // push a dummy "unpack_frame" taking care of float return values and
  3321   // call Deoptimization::unpack_frames to have the unpacker layout
  3322   // information in the interpreter frames just created and then return
  3323   // to the interpreter entry point
  3324   __ save(SP, -frame_size_words*wordSize, SP);
  3325   __ stf(FloatRegisterImpl::D, Freturn0, saved_Freturn0_addr);
  3326 #if !defined(_LP64)
  3327 #if defined(COMPILER2)
  3328   // 32-bit 1-register longs return longs in G1
  3329   __ stx(Greturn1, saved_Greturn1_addr);
  3330 #endif
  3331   __ set_last_Java_frame(SP, noreg);
  3332   __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, G4deopt_mode);
  3333 #else
  3334   // LP64 uses g4 in set_last_Java_frame
  3335   __ mov(G4deopt_mode, O1);
  3336   __ set_last_Java_frame(SP, G0);
  3337   __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, O1);
  3338 #endif
  3339   __ reset_last_Java_frame();
  3340   __ ldf(FloatRegisterImpl::D, saved_Freturn0_addr, Freturn0);
  3342 #if !defined(_LP64) && defined(COMPILER2)
  3343   // In 32 bit, C2 returns longs in G1 so restore the saved G1 into
  3344   // I0/I1 if the return value is long.
  3345   Label not_long;
  3346   __ cmp_and_br_short(O0,T_LONG, Assembler::notEqual, Assembler::pt, not_long);
  3347   __ ldd(saved_Greturn1_addr,I0);
  3348   __ bind(not_long);
  3349 #endif
  3350   __ ret();
  3351   __ delayed()->restore();
  3353   masm->flush();
  3354   _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_words);
  3355   _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
  3358 #ifdef COMPILER2
  3360 //------------------------------generate_uncommon_trap_blob--------------------
  3361 // Ought to generate an ideal graph & compile, but here's some SPARC ASM
  3362 // instead.
  3363 void SharedRuntime::generate_uncommon_trap_blob() {
  3364   // allocate space for the code
  3365   ResourceMark rm;
  3366   // setup code generation tools
  3367   int pad = VerifyThread ? 512 : 0;
  3368 #ifdef _LP64
  3369   CodeBuffer buffer("uncommon_trap_blob", 2700+pad, 512);
  3370 #else
  3371   // Measured 8/7/03 at 660 in 32bit debug build (no VerifyThread)
  3372   // Measured 8/7/03 at 1028 in 32bit debug build (VerifyThread)
  3373   CodeBuffer buffer("uncommon_trap_blob", 2000+pad, 512);
  3374 #endif
  3375   MacroAssembler* masm               = new MacroAssembler(&buffer);
  3376   Register        O2UnrollBlock      = O2;
  3377   Register        O2klass_index      = O2;
  3379   //
  3380   // This is the entry point for all traps the compiler takes when it thinks
  3381   // it cannot handle further execution of compilation code. The frame is
  3382   // deoptimized in these cases and converted into interpreter frames for
  3383   // execution
  3384   // The steps taken by this frame are as follows:
  3385   //   - push a fake "unpack_frame"
  3386   //   - call the C routine Deoptimization::uncommon_trap (this function
  3387   //     packs the current compiled frame into vframe arrays and returns
  3388   //     information about the number and size of interpreter frames which
  3389   //     are equivalent to the frame which is being deoptimized)
  3390   //   - deallocate the "unpack_frame"
  3391   //   - deallocate the deoptimization frame
  3392   //   - in a loop using the information returned in the previous step
  3393   //     push interpreter frames;
  3394   //   - create a dummy "unpack_frame"
  3395   //   - call the C routine: Deoptimization::unpack_frames (this function
  3396   //     lays out values on the interpreter frame which was just created)
  3397   //   - deallocate the dummy unpack_frame
  3398   //   - return to the interpreter entry point
  3399   //
  3400   //  Refer to the following methods for more information:
  3401   //   - Deoptimization::uncommon_trap
  3402   //   - Deoptimization::unpack_frame
  3404   // the unloaded class index is in O0 (first parameter to this blob)
  3406   // push a dummy "unpack_frame"
  3407   // and call Deoptimization::uncommon_trap to pack the compiled frame into
  3408   // vframe array and return the UnrollBlock information
  3409   __ save_frame(0);
  3410   __ set_last_Java_frame(SP, noreg);
  3411   __ mov(I0, O2klass_index);
  3412   __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap), G2_thread, O2klass_index);
  3413   __ reset_last_Java_frame();
  3414   __ mov(O0, O2UnrollBlock->after_save());
  3415   __ restore();
  3417   // deallocate the deoptimized frame taking care to preserve the return values
  3418   __ mov(O2UnrollBlock, O2UnrollBlock->after_save());
  3419   __ restore();
  3421   // Allocate new interpreter frame(s) and possible c2i adapter frame
  3423   make_new_frames(masm, false);
  3425   // push a dummy "unpack_frame" taking care of float return values and
  3426   // call Deoptimization::unpack_frames to have the unpacker layout
  3427   // information in the interpreter frames just created and then return
  3428   // to the interpreter entry point
  3429   __ save_frame(0);
  3430   __ set_last_Java_frame(SP, noreg);
  3431   __ mov(Deoptimization::Unpack_uncommon_trap, O3); // indicate it is the uncommon trap case
  3432   __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, O3);
  3433   __ reset_last_Java_frame();
  3434   __ ret();
  3435   __ delayed()->restore();
  3437   masm->flush();
  3438   _uncommon_trap_blob = UncommonTrapBlob::create(&buffer, NULL, __ total_frame_size_in_bytes(0)/wordSize);
  3441 #endif // COMPILER2
  3443 //------------------------------generate_handler_blob-------------------
  3444 //
  3445 // Generate a special Compile2Runtime blob that saves all registers, and sets
  3446 // up an OopMap.
  3447 //
  3448 // This blob is jumped to (via a breakpoint and the signal handler) from a
  3449 // safepoint in compiled code.  On entry to this blob, O7 contains the
  3450 // address in the original nmethod at which we should resume normal execution.
  3451 // Thus, this blob looks like a subroutine which must preserve lots of
  3452 // registers and return normally.  Note that O7 is never register-allocated,
  3453 // so it is guaranteed to be free here.
  3454 //
  3456 // The hardest part of what this blob must do is to save the 64-bit %o
  3457 // registers in the 32-bit build.  A simple 'save' turn the %o's to %i's and
  3458 // an interrupt will chop off their heads.  Making space in the caller's frame
  3459 // first will let us save the 64-bit %o's before save'ing, but we cannot hand
  3460 // the adjusted FP off to the GC stack-crawler: this will modify the caller's
  3461 // SP and mess up HIS OopMaps.  So we first adjust the caller's SP, then save
  3462 // the 64-bit %o's, then do a save, then fixup the caller's SP (our FP).
  3463 // Tricky, tricky, tricky...
  3465 SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, bool cause_return) {
  3466   assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
  3468   // allocate space for the code
  3469   ResourceMark rm;
  3470   // setup code generation tools
  3471   // Measured 8/7/03 at 896 in 32bit debug build (no VerifyThread)
  3472   // Measured 8/7/03 at 1080 in 32bit debug build (VerifyThread)
  3473   // even larger with TraceJumps
  3474   int pad = TraceJumps ? 512 : 0;
  3475   CodeBuffer buffer("handler_blob", 1600 + pad, 512);
  3476   MacroAssembler* masm                = new MacroAssembler(&buffer);
  3477   int             frame_size_words;
  3478   OopMapSet *oop_maps = new OopMapSet();
  3479   OopMap* map = NULL;
  3481   int start = __ offset();
  3483   // If this causes a return before the processing, then do a "restore"
  3484   if (cause_return) {
  3485     __ restore();
  3486   } else {
  3487     // Make it look like we were called via the poll
  3488     // so that frame constructor always sees a valid return address
  3489     __ ld_ptr(G2_thread, in_bytes(JavaThread::saved_exception_pc_offset()), O7);
  3490     __ sub(O7, frame::pc_return_offset, O7);
  3493   map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
  3495   // setup last_Java_sp (blows G4)
  3496   __ set_last_Java_frame(SP, noreg);
  3498   // call into the runtime to handle illegal instructions exception
  3499   // Do not use call_VM_leaf, because we need to make a GC map at this call site.
  3500   __ mov(G2_thread, O0);
  3501   __ save_thread(L7_thread_cache);
  3502   __ call(call_ptr);
  3503   __ delayed()->nop();
  3505   // Set an oopmap for the call site.
  3506   // We need this not only for callee-saved registers, but also for volatile
  3507   // registers that the compiler might be keeping live across a safepoint.
  3509   oop_maps->add_gc_map( __ offset() - start, map);
  3511   __ restore_thread(L7_thread_cache);
  3512   // clear last_Java_sp
  3513   __ reset_last_Java_frame();
  3515   // Check for exceptions
  3516   Label pending;
  3518   __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O1);
  3519   __ br_notnull_short(O1, Assembler::pn, pending);
  3521   RegisterSaver::restore_live_registers(masm);
  3523   // We are back the the original state on entry and ready to go.
  3525   __ retl();
  3526   __ delayed()->nop();
  3528   // Pending exception after the safepoint
  3530   __ bind(pending);
  3532   RegisterSaver::restore_live_registers(masm);
  3534   // We are back the the original state on entry.
  3536   // Tail-call forward_exception_entry, with the issuing PC in O7,
  3537   // so it looks like the original nmethod called forward_exception_entry.
  3538   __ set((intptr_t)StubRoutines::forward_exception_entry(), O0);
  3539   __ JMP(O0, 0);
  3540   __ delayed()->nop();
  3542   // -------------
  3543   // make sure all code is generated
  3544   masm->flush();
  3546   // return exception blob
  3547   return SafepointBlob::create(&buffer, oop_maps, frame_size_words);
  3550 //
  3551 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss
  3552 //
  3553 // Generate a stub that calls into vm to find out the proper destination
  3554 // of a java call. All the argument registers are live at this point
  3555 // but since this is generic code we don't know what they are and the caller
  3556 // must do any gc of the args.
  3557 //
  3558 RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
  3559   assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
  3561   // allocate space for the code
  3562   ResourceMark rm;
  3563   // setup code generation tools
  3564   // Measured 8/7/03 at 896 in 32bit debug build (no VerifyThread)
  3565   // Measured 8/7/03 at 1080 in 32bit debug build (VerifyThread)
  3566   // even larger with TraceJumps
  3567   int pad = TraceJumps ? 512 : 0;
  3568   CodeBuffer buffer(name, 1600 + pad, 512);
  3569   MacroAssembler* masm                = new MacroAssembler(&buffer);
  3570   int             frame_size_words;
  3571   OopMapSet *oop_maps = new OopMapSet();
  3572   OopMap* map = NULL;
  3574   int start = __ offset();
  3576   map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
  3578   int frame_complete = __ offset();
  3580   // setup last_Java_sp (blows G4)
  3581   __ set_last_Java_frame(SP, noreg);
  3583   // call into the runtime to handle illegal instructions exception
  3584   // Do not use call_VM_leaf, because we need to make a GC map at this call site.
  3585   __ mov(G2_thread, O0);
  3586   __ save_thread(L7_thread_cache);
  3587   __ call(destination, relocInfo::runtime_call_type);
  3588   __ delayed()->nop();
  3590   // O0 contains the address we are going to jump to assuming no exception got installed
  3592   // Set an oopmap for the call site.
  3593   // We need this not only for callee-saved registers, but also for volatile
  3594   // registers that the compiler might be keeping live across a safepoint.
  3596   oop_maps->add_gc_map( __ offset() - start, map);
  3598   __ restore_thread(L7_thread_cache);
  3599   // clear last_Java_sp
  3600   __ reset_last_Java_frame();
  3602   // Check for exceptions
  3603   Label pending;
  3605   __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O1);
  3606   __ br_notnull_short(O1, Assembler::pn, pending);
  3608   // get the returned methodOop
  3610   __ get_vm_result(G5_method);
  3611   __ stx(G5_method, SP, RegisterSaver::G5_offset()+STACK_BIAS);
  3613   // O0 is where we want to jump, overwrite G3 which is saved and scratch
  3615   __ stx(O0, SP, RegisterSaver::G3_offset()+STACK_BIAS);
  3617   RegisterSaver::restore_live_registers(masm);
  3619   // We are back the the original state on entry and ready to go.
  3621   __ JMP(G3, 0);
  3622   __ delayed()->nop();
  3624   // Pending exception after the safepoint
  3626   __ bind(pending);
  3628   RegisterSaver::restore_live_registers(masm);
  3630   // We are back the the original state on entry.
  3632   // Tail-call forward_exception_entry, with the issuing PC in O7,
  3633   // so it looks like the original nmethod called forward_exception_entry.
  3634   __ set((intptr_t)StubRoutines::forward_exception_entry(), O0);
  3635   __ JMP(O0, 0);
  3636   __ delayed()->nop();
  3638   // -------------
  3639   // make sure all code is generated
  3640   masm->flush();
  3642   // return the  blob
  3643   // frame_size_words or bytes??
  3644   return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_words, oop_maps, true);

mercurial