src/cpu/ppc/vm/sharedRuntime_ppc.cpp

Wed, 11 Dec 2013 00:06:11 +0100

author
goetz
date
Wed, 11 Dec 2013 00:06:11 +0100
changeset 6495
67fa91961822
parent 6486
b0133e4187d3
child 6511
31e80afe3fed
permissions
-rw-r--r--

8029940: PPC64 (part 122): C2 compiler port
Reviewed-by: kvn

     1 /*
     2  * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
     3  * Copyright 2012, 2013 SAP AG. All rights reserved.
     4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     5  *
     6  * This code is free software; you can redistribute it and/or modify it
     7  * under the terms of the GNU General Public License version 2 only, as
     8  * published by the Free Software Foundation.
     9  *
    10  * This code is distributed in the hope that it will be useful, but WITHOUT
    11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    13  * version 2 for more details (a copy is included in the LICENSE file that
    14  * accompanied this code).
    15  *
    16  * You should have received a copy of the GNU General Public License version
    17  * 2 along with this work; if not, write to the Free Software Foundation,
    18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    19  *
    20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    21  * or visit www.oracle.com if you need additional information or have any
    22  * questions.
    23  *
    24  */
    26 #include "precompiled.hpp"
    27 #include "asm/macroAssembler.inline.hpp"
    28 #include "code/debugInfoRec.hpp"
    29 #include "code/icBuffer.hpp"
    30 #include "code/vtableStubs.hpp"
    31 #include "interpreter/interpreter.hpp"
    32 #include "oops/compiledICHolder.hpp"
    33 #include "prims/jvmtiRedefineClassesTrace.hpp"
    34 #include "runtime/sharedRuntime.hpp"
    35 #include "runtime/vframeArray.hpp"
    36 #include "vmreg_ppc.inline.hpp"
    37 #ifdef COMPILER1
    38 #include "c1/c1_Runtime1.hpp"
    39 #endif
    40 #ifdef COMPILER2
    41 #include "opto/runtime.hpp"
    42 #endif
    44 #define __ masm->
    46 #ifdef PRODUCT
    47 #define BLOCK_COMMENT(str) // nothing
    48 #else
    49 #define BLOCK_COMMENT(str) __ block_comment(str)
    50 #endif
    52 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
    55 // Used by generate_deopt_blob.  Defined in .ad file.
    56 extern uint size_deopt_handler();
    59 class RegisterSaver {
    60  // Used for saving volatile registers.
    61  public:
    63   // Support different return pc locations.
    64   enum ReturnPCLocation {
    65     return_pc_is_lr,
    66     return_pc_is_r4,
    67     return_pc_is_thread_saved_exception_pc
    68   };
    70   static OopMap* push_frame_abi112_and_save_live_registers(MacroAssembler* masm,
    71                          int* out_frame_size_in_bytes,
    72                          bool generate_oop_map,
    73                          int return_pc_adjustment,
    74                          ReturnPCLocation return_pc_location);
    75   static void    restore_live_registers_and_pop_frame(MacroAssembler* masm,
    76                          int frame_size_in_bytes,
    77                          bool restore_ctr);
    79   static void push_frame_and_save_argument_registers(MacroAssembler* masm,
    80                          Register r_temp,
    81                          int frame_size,
    82                          int total_args,
    83                          const VMRegPair *regs, const VMRegPair *regs2 = NULL);
    84   static void restore_argument_registers_and_pop_frame(MacroAssembler*masm,
    85                          int frame_size,
    86                          int total_args,
    87                          const VMRegPair *regs, const VMRegPair *regs2 = NULL);
    89   // During deoptimization only the result registers need to be restored
    90   // all the other values have already been extracted.
    91   static void restore_result_registers(MacroAssembler* masm, int frame_size_in_bytes);
    93   // Constants and data structures:
    95   typedef enum {
    96     int_reg           = 0,
    97     float_reg         = 1,
    98     special_reg       = 2
    99   } RegisterType;
   101   typedef enum {
   102     reg_size          = 8,
   103     half_reg_size     = reg_size / 2,
   104   } RegisterConstants;
   106   typedef struct {
   107     RegisterType        reg_type;
   108     int                 reg_num;
   109     VMReg               vmreg;
   110   } LiveRegType;
   111 };
   114 #define RegisterSaver_LiveSpecialReg(regname) \
   115   { RegisterSaver::special_reg, regname->encoding(), regname->as_VMReg() }
   117 #define RegisterSaver_LiveIntReg(regname) \
   118   { RegisterSaver::int_reg,     regname->encoding(), regname->as_VMReg() }
   120 #define RegisterSaver_LiveFloatReg(regname) \
   121   { RegisterSaver::float_reg,   regname->encoding(), regname->as_VMReg() }
   123 static const RegisterSaver::LiveRegType RegisterSaver_LiveRegs[] = {
   124   // Live registers which get spilled to the stack. Register
   125   // positions in this array correspond directly to the stack layout.
   127   //
   128   // live special registers:
   129   //
   130   RegisterSaver_LiveSpecialReg(SR_CTR),
   131   //
   132   // live float registers:
   133   //
   134   RegisterSaver_LiveFloatReg( F0  ),
   135   RegisterSaver_LiveFloatReg( F1  ),
   136   RegisterSaver_LiveFloatReg( F2  ),
   137   RegisterSaver_LiveFloatReg( F3  ),
   138   RegisterSaver_LiveFloatReg( F4  ),
   139   RegisterSaver_LiveFloatReg( F5  ),
   140   RegisterSaver_LiveFloatReg( F6  ),
   141   RegisterSaver_LiveFloatReg( F7  ),
   142   RegisterSaver_LiveFloatReg( F8  ),
   143   RegisterSaver_LiveFloatReg( F9  ),
   144   RegisterSaver_LiveFloatReg( F10 ),
   145   RegisterSaver_LiveFloatReg( F11 ),
   146   RegisterSaver_LiveFloatReg( F12 ),
   147   RegisterSaver_LiveFloatReg( F13 ),
   148   RegisterSaver_LiveFloatReg( F14 ),
   149   RegisterSaver_LiveFloatReg( F15 ),
   150   RegisterSaver_LiveFloatReg( F16 ),
   151   RegisterSaver_LiveFloatReg( F17 ),
   152   RegisterSaver_LiveFloatReg( F18 ),
   153   RegisterSaver_LiveFloatReg( F19 ),
   154   RegisterSaver_LiveFloatReg( F20 ),
   155   RegisterSaver_LiveFloatReg( F21 ),
   156   RegisterSaver_LiveFloatReg( F22 ),
   157   RegisterSaver_LiveFloatReg( F23 ),
   158   RegisterSaver_LiveFloatReg( F24 ),
   159   RegisterSaver_LiveFloatReg( F25 ),
   160   RegisterSaver_LiveFloatReg( F26 ),
   161   RegisterSaver_LiveFloatReg( F27 ),
   162   RegisterSaver_LiveFloatReg( F28 ),
   163   RegisterSaver_LiveFloatReg( F29 ),
   164   RegisterSaver_LiveFloatReg( F30 ),
   165   RegisterSaver_LiveFloatReg( F31 ),
   166   //
   167   // live integer registers:
   168   //
   169   RegisterSaver_LiveIntReg(   R0  ),
   170   //RegisterSaver_LiveIntReg( R1  ), // stack pointer
   171   RegisterSaver_LiveIntReg(   R2  ),
   172   RegisterSaver_LiveIntReg(   R3  ),
   173   RegisterSaver_LiveIntReg(   R4  ),
   174   RegisterSaver_LiveIntReg(   R5  ),
   175   RegisterSaver_LiveIntReg(   R6  ),
   176   RegisterSaver_LiveIntReg(   R7  ),
   177   RegisterSaver_LiveIntReg(   R8  ),
   178   RegisterSaver_LiveIntReg(   R9  ),
   179   RegisterSaver_LiveIntReg(   R10 ),
   180   RegisterSaver_LiveIntReg(   R11 ),
   181   RegisterSaver_LiveIntReg(   R12 ),
   182   //RegisterSaver_LiveIntReg( R13 ), // system thread id
   183   RegisterSaver_LiveIntReg(   R14 ),
   184   RegisterSaver_LiveIntReg(   R15 ),
   185   RegisterSaver_LiveIntReg(   R16 ),
   186   RegisterSaver_LiveIntReg(   R17 ),
   187   RegisterSaver_LiveIntReg(   R18 ),
   188   RegisterSaver_LiveIntReg(   R19 ),
   189   RegisterSaver_LiveIntReg(   R20 ),
   190   RegisterSaver_LiveIntReg(   R21 ),
   191   RegisterSaver_LiveIntReg(   R22 ),
   192   RegisterSaver_LiveIntReg(   R23 ),
   193   RegisterSaver_LiveIntReg(   R24 ),
   194   RegisterSaver_LiveIntReg(   R25 ),
   195   RegisterSaver_LiveIntReg(   R26 ),
   196   RegisterSaver_LiveIntReg(   R27 ),
   197   RegisterSaver_LiveIntReg(   R28 ),
   198   RegisterSaver_LiveIntReg(   R29 ),
   199   RegisterSaver_LiveIntReg(   R31 ),
   200   RegisterSaver_LiveIntReg(   R30 ), // r30 must be the last register
   201 };
   203 OopMap* RegisterSaver::push_frame_abi112_and_save_live_registers(MacroAssembler* masm,
   204                          int* out_frame_size_in_bytes,
   205                          bool generate_oop_map,
   206                          int return_pc_adjustment,
   207                          ReturnPCLocation return_pc_location) {
   208   // Push an abi112-frame and store all registers which may be live.
   209   // If requested, create an OopMap: Record volatile registers as
   210   // callee-save values in an OopMap so their save locations will be
   211   // propagated to the RegisterMap of the caller frame during
   212   // StackFrameStream construction (needed for deoptimization; see
   213   // compiledVFrame::create_stack_value).
   214   // If return_pc_adjustment != 0 adjust the return pc by return_pc_adjustment.
   216   int i;
   217   int offset;
   219   // calcualte frame size
   220   const int regstosave_num       = sizeof(RegisterSaver_LiveRegs) /
   221                                    sizeof(RegisterSaver::LiveRegType);
   222   const int register_save_size   = regstosave_num * reg_size;
   223   const int frame_size_in_bytes  = round_to(register_save_size, frame::alignment_in_bytes)
   224                                    + frame::abi_112_size;
   225   *out_frame_size_in_bytes       = frame_size_in_bytes;
   226   const int frame_size_in_slots  = frame_size_in_bytes / sizeof(jint);
   227   const int register_save_offset = frame_size_in_bytes - register_save_size;
   229   // OopMap frame size is in c2 stack slots (sizeof(jint)) not bytes or words.
   230   OopMap* map = generate_oop_map ? new OopMap(frame_size_in_slots, 0) : NULL;
   232   BLOCK_COMMENT("push_frame_abi112_and_save_live_registers {");
   234   // Save r30 in the last slot of the not yet pushed frame so that we
   235   // can use it as scratch reg.
   236   __ std(R30, -reg_size, R1_SP);
   237   assert(-reg_size == register_save_offset - frame_size_in_bytes + ((regstosave_num-1)*reg_size),
   238          "consistency check");
   240   // save the flags
   241   // Do the save_LR_CR by hand and adjust the return pc if requested.
   242   __ mfcr(R30);
   243   __ std(R30, _abi(cr), R1_SP);
   244   switch (return_pc_location) {
   245     case return_pc_is_lr:    __ mflr(R30);           break;
   246     case return_pc_is_r4:    __ mr(R30, R4);     break;
   247     case return_pc_is_thread_saved_exception_pc:
   248                                  __ ld(R30, thread_(saved_exception_pc)); break;
   249     default: ShouldNotReachHere();
   250   }
   251   if (return_pc_adjustment != 0)
   252     __ addi(R30, R30, return_pc_adjustment);
   253   __ std(R30, _abi(lr), R1_SP);
   255   // push a new frame
   256   __ push_frame(frame_size_in_bytes, R30);
   258   // save all registers (ints and floats)
   259   offset = register_save_offset;
   260   for (int i = 0; i < regstosave_num; i++) {
   261     int reg_num  = RegisterSaver_LiveRegs[i].reg_num;
   262     int reg_type = RegisterSaver_LiveRegs[i].reg_type;
   264     switch (reg_type) {
   265       case RegisterSaver::int_reg: {
   266         if (reg_num != 30) { // We spilled R30 right at the beginning.
   267           __ std(as_Register(reg_num), offset, R1_SP);
   268         }
   269         break;
   270       }
   271       case RegisterSaver::float_reg: {
   272         __ stfd(as_FloatRegister(reg_num), offset, R1_SP);
   273         break;
   274       }
   275       case RegisterSaver::special_reg: {
   276         if (reg_num == SR_CTR_SpecialRegisterEnumValue) {
   277           __ mfctr(R30);
   278           __ std(R30, offset, R1_SP);
   279         } else {
   280           Unimplemented();
   281         }
   282         break;
   283       }
   284       default:
   285         ShouldNotReachHere();
   286     }
   288     if (generate_oop_map) {
   289       map->set_callee_saved(VMRegImpl::stack2reg(offset>>2),
   290                             RegisterSaver_LiveRegs[i].vmreg);
   291       map->set_callee_saved(VMRegImpl::stack2reg((offset + half_reg_size)>>2),
   292                             RegisterSaver_LiveRegs[i].vmreg->next());
   293     }
   294     offset += reg_size;
   295   }
   297   BLOCK_COMMENT("} push_frame_abi112_and_save_live_registers");
   299   // And we're done.
   300   return map;
   301 }
   304 // Pop the current frame and restore all the registers that we
   305 // saved.
   306 void RegisterSaver::restore_live_registers_and_pop_frame(MacroAssembler* masm,
   307                                                          int frame_size_in_bytes,
   308                                                          bool restore_ctr) {
   309   int i;
   310   int offset;
   311   const int regstosave_num       = sizeof(RegisterSaver_LiveRegs) /
   312                                    sizeof(RegisterSaver::LiveRegType);
   313   const int register_save_size   = regstosave_num * reg_size;
   314   const int register_save_offset = frame_size_in_bytes - register_save_size;
   316   BLOCK_COMMENT("restore_live_registers_and_pop_frame {");
   318   // restore all registers (ints and floats)
   319   offset = register_save_offset;
   320   for (int i = 0; i < regstosave_num; i++) {
   321     int reg_num  = RegisterSaver_LiveRegs[i].reg_num;
   322     int reg_type = RegisterSaver_LiveRegs[i].reg_type;
   324     switch (reg_type) {
   325       case RegisterSaver::int_reg: {
   326         if (reg_num != 30) // R30 restored at the end, it's the tmp reg!
   327           __ ld(as_Register(reg_num), offset, R1_SP);
   328         break;
   329       }
   330       case RegisterSaver::float_reg: {
   331         __ lfd(as_FloatRegister(reg_num), offset, R1_SP);
   332         break;
   333       }
   334       case RegisterSaver::special_reg: {
   335         if (reg_num == SR_CTR_SpecialRegisterEnumValue) {
   336           if (restore_ctr) { // Nothing to do here if ctr already contains the next address.
   337             __ ld(R30, offset, R1_SP);
   338             __ mtctr(R30);
   339           }
   340         } else {
   341           Unimplemented();
   342         }
   343         break;
   344       }
   345       default:
   346         ShouldNotReachHere();
   347     }
   348     offset += reg_size;
   349   }
   351   // pop the frame
   352   __ pop_frame();
   354   // restore the flags
   355   __ restore_LR_CR(R30);
   357   // restore scratch register's value
   358   __ ld(R30, -reg_size, R1_SP);
   360   BLOCK_COMMENT("} restore_live_registers_and_pop_frame");
   361 }
   363 void RegisterSaver::push_frame_and_save_argument_registers(MacroAssembler* masm, Register r_temp,
   364                                                            int frame_size,int total_args, const VMRegPair *regs,
   365                                                            const VMRegPair *regs2) {
   366   __ push_frame(frame_size, r_temp);
   367   int st_off = frame_size - wordSize;
   368   for (int i = 0; i < total_args; i++) {
   369     VMReg r_1 = regs[i].first();
   370     VMReg r_2 = regs[i].second();
   371     if (!r_1->is_valid()) {
   372       assert(!r_2->is_valid(), "");
   373       continue;
   374     }
   375     if (r_1->is_Register()) {
   376       Register r = r_1->as_Register();
   377       __ std(r, st_off, R1_SP);
   378       st_off -= wordSize;
   379     } else if (r_1->is_FloatRegister()) {
   380       FloatRegister f = r_1->as_FloatRegister();
   381       __ stfd(f, st_off, R1_SP);
   382       st_off -= wordSize;
   383     }
   384   }
   385   if (regs2 != NULL) {
   386     for (int i = 0; i < total_args; i++) {
   387       VMReg r_1 = regs2[i].first();
   388       VMReg r_2 = regs2[i].second();
   389       if (!r_1->is_valid()) {
   390         assert(!r_2->is_valid(), "");
   391         continue;
   392       }
   393       if (r_1->is_Register()) {
   394         Register r = r_1->as_Register();
   395         __ std(r, st_off, R1_SP);
   396         st_off -= wordSize;
   397       } else if (r_1->is_FloatRegister()) {
   398         FloatRegister f = r_1->as_FloatRegister();
   399         __ stfd(f, st_off, R1_SP);
   400         st_off -= wordSize;
   401       }
   402     }
   403   }
   404 }
   406 void RegisterSaver::restore_argument_registers_and_pop_frame(MacroAssembler*masm, int frame_size,
   407                                                              int total_args, const VMRegPair *regs,
   408                                                              const VMRegPair *regs2) {
   409   int st_off = frame_size - wordSize;
   410   for (int i = 0; i < total_args; i++) {
   411     VMReg r_1 = regs[i].first();
   412     VMReg r_2 = regs[i].second();
   413     if (r_1->is_Register()) {
   414       Register r = r_1->as_Register();
   415       __ ld(r, st_off, R1_SP);
   416       st_off -= wordSize;
   417     } else if (r_1->is_FloatRegister()) {
   418       FloatRegister f = r_1->as_FloatRegister();
   419       __ lfd(f, st_off, R1_SP);
   420       st_off -= wordSize;
   421     }
   422   }
   423   if (regs2 != NULL)
   424     for (int i = 0; i < total_args; i++) {
   425       VMReg r_1 = regs2[i].first();
   426       VMReg r_2 = regs2[i].second();
   427       if (r_1->is_Register()) {
   428         Register r = r_1->as_Register();
   429         __ ld(r, st_off, R1_SP);
   430         st_off -= wordSize;
   431       } else if (r_1->is_FloatRegister()) {
   432         FloatRegister f = r_1->as_FloatRegister();
   433         __ lfd(f, st_off, R1_SP);
   434         st_off -= wordSize;
   435       }
   436     }
   437   __ pop_frame();
   438 }
   440 // Restore the registers that might be holding a result.
   441 void RegisterSaver::restore_result_registers(MacroAssembler* masm, int frame_size_in_bytes) {
   442   int i;
   443   int offset;
   444   const int regstosave_num       = sizeof(RegisterSaver_LiveRegs) /
   445                                    sizeof(RegisterSaver::LiveRegType);
   446   const int register_save_size   = regstosave_num * reg_size;
   447   const int register_save_offset = frame_size_in_bytes - register_save_size;
   449   // restore all result registers (ints and floats)
   450   offset = register_save_offset;
   451   for (int i = 0; i < regstosave_num; i++) {
   452     int reg_num  = RegisterSaver_LiveRegs[i].reg_num;
   453     int reg_type = RegisterSaver_LiveRegs[i].reg_type;
   454     switch (reg_type) {
   455       case RegisterSaver::int_reg: {
   456         if (as_Register(reg_num)==R3_RET) // int result_reg
   457           __ ld(as_Register(reg_num), offset, R1_SP);
   458         break;
   459       }
   460       case RegisterSaver::float_reg: {
   461         if (as_FloatRegister(reg_num)==F1_RET) // float result_reg
   462           __ lfd(as_FloatRegister(reg_num), offset, R1_SP);
   463         break;
   464       }
   465       case RegisterSaver::special_reg: {
   466         // Special registers don't hold a result.
   467         break;
   468       }
   469       default:
   470         ShouldNotReachHere();
   471     }
   472     offset += reg_size;
   473   }
   474 }
   476 // Is vector's size (in bytes) bigger than a size saved by default?
   477 bool SharedRuntime::is_wide_vector(int size) {
   478   ResourceMark rm;
   479   // Note, MaxVectorSize == 8 on PPC64.
   480   assert(size <= 8, err_msg_res("%d bytes vectors are not supported", size));
   481   return size > 8;
   482 }
   483 #ifdef COMPILER2
   484 static int reg2slot(VMReg r) {
   485   return r->reg2stack() + SharedRuntime::out_preserve_stack_slots();
   486 }
   488 static int reg2offset(VMReg r) {
   489   return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
   490 }
   491 #endif
   493 // ---------------------------------------------------------------------------
   494 // Read the array of BasicTypes from a signature, and compute where the
   495 // arguments should go. Values in the VMRegPair regs array refer to 4-byte
   496 // quantities. Values less than VMRegImpl::stack0 are registers, those above
   497 // refer to 4-byte stack slots. All stack slots are based off of the stack pointer
   498 // as framesizes are fixed.
   499 // VMRegImpl::stack0 refers to the first slot 0(sp).
   500 // and VMRegImpl::stack0+1 refers to the memory word 4-bytes higher. Register
   501 // up to RegisterImpl::number_of_registers) are the 64-bit
   502 // integer registers.
   504 // Note: the INPUTS in sig_bt are in units of Java argument words, which are
   505 // either 32-bit or 64-bit depending on the build. The OUTPUTS are in 32-bit
   506 // units regardless of build. Of course for i486 there is no 64 bit build
   508 // The Java calling convention is a "shifted" version of the C ABI.
   509 // By skipping the first C ABI register we can call non-static jni methods
   510 // with small numbers of arguments without having to shuffle the arguments
   511 // at all. Since we control the java ABI we ought to at least get some
   512 // advantage out of it.
   514 const VMReg java_iarg_reg[8] = {
   515   R3->as_VMReg(),
   516   R4->as_VMReg(),
   517   R5->as_VMReg(),
   518   R6->as_VMReg(),
   519   R7->as_VMReg(),
   520   R8->as_VMReg(),
   521   R9->as_VMReg(),
   522   R10->as_VMReg()
   523 };
   525 const VMReg java_farg_reg[13] = {
   526   F1->as_VMReg(),
   527   F2->as_VMReg(),
   528   F3->as_VMReg(),
   529   F4->as_VMReg(),
   530   F5->as_VMReg(),
   531   F6->as_VMReg(),
   532   F7->as_VMReg(),
   533   F8->as_VMReg(),
   534   F9->as_VMReg(),
   535   F10->as_VMReg(),
   536   F11->as_VMReg(),
   537   F12->as_VMReg(),
   538   F13->as_VMReg()
   539 };
   541 const int num_java_iarg_registers = sizeof(java_iarg_reg) / sizeof(java_iarg_reg[0]);
   542 const int num_java_farg_registers = sizeof(java_farg_reg) / sizeof(java_farg_reg[0]);
   544 int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
   545                                            VMRegPair *regs,
   546                                            int total_args_passed,
   547                                            int is_outgoing) {
   548   // C2c calling conventions for compiled-compiled calls.
   549   // Put 8 ints/longs into registers _AND_ 13 float/doubles into
   550   // registers _AND_ put the rest on the stack.
   552   const int inc_stk_for_intfloat   = 1; // 1 slots for ints and floats
   553   const int inc_stk_for_longdouble = 2; // 2 slots for longs and doubles
   555   int i;
   556   VMReg reg;
   557   int stk = 0;
   558   int ireg = 0;
   559   int freg = 0;
   561   // We put the first 8 arguments into registers and the rest on the
   562   // stack, float arguments are already in their argument registers
   563   // due to c2c calling conventions (see calling_convention).
   564   for (int i = 0; i < total_args_passed; ++i) {
   565     switch(sig_bt[i]) {
   566     case T_BOOLEAN:
   567     case T_CHAR:
   568     case T_BYTE:
   569     case T_SHORT:
   570     case T_INT:
   571       if (ireg < num_java_iarg_registers) {
   572         // Put int/ptr in register
   573         reg = java_iarg_reg[ireg];
   574         ++ireg;
   575       } else {
   576         // Put int/ptr on stack.
   577         reg = VMRegImpl::stack2reg(stk);
   578         stk += inc_stk_for_intfloat;
   579       }
   580       regs[i].set1(reg);
   581       break;
   582     case T_LONG:
   583       assert(sig_bt[i+1] == T_VOID, "expecting half");
   584       if (ireg < num_java_iarg_registers) {
   585         // Put long in register.
   586         reg = java_iarg_reg[ireg];
   587         ++ireg;
   588       } else {
   589         // Put long on stack. They must be aligned to 2 slots.
   590         if (stk & 0x1) ++stk;
   591         reg = VMRegImpl::stack2reg(stk);
   592         stk += inc_stk_for_longdouble;
   593       }
   594       regs[i].set2(reg);
   595       break;
   596     case T_OBJECT:
   597     case T_ARRAY:
   598     case T_ADDRESS:
   599       if (ireg < num_java_iarg_registers) {
   600         // Put ptr in register.
   601         reg = java_iarg_reg[ireg];
   602         ++ireg;
   603       } else {
   604         // Put ptr on stack. Objects must be aligned to 2 slots too,
   605         // because "64-bit pointers record oop-ishness on 2 aligned
   606         // adjacent registers." (see OopFlow::build_oop_map).
   607         if (stk & 0x1) ++stk;
   608         reg = VMRegImpl::stack2reg(stk);
   609         stk += inc_stk_for_longdouble;
   610       }
   611       regs[i].set2(reg);
   612       break;
   613     case T_FLOAT:
   614       if (freg < num_java_farg_registers) {
   615         // Put float in register.
   616         reg = java_farg_reg[freg];
   617         ++freg;
   618       } else {
   619         // Put float on stack.
   620         reg = VMRegImpl::stack2reg(stk);
   621         stk += inc_stk_for_intfloat;
   622       }
   623       regs[i].set1(reg);
   624       break;
   625     case T_DOUBLE:
   626       assert(sig_bt[i+1] == T_VOID, "expecting half");
   627       if (freg < num_java_farg_registers) {
   628         // Put double in register.
   629         reg = java_farg_reg[freg];
   630         ++freg;
   631       } else {
   632         // Put double on stack. They must be aligned to 2 slots.
   633         if (stk & 0x1) ++stk;
   634         reg = VMRegImpl::stack2reg(stk);
   635         stk += inc_stk_for_longdouble;
   636       }
   637       regs[i].set2(reg);
   638       break;
   639     case T_VOID:
   640       // Do not count halves.
   641       regs[i].set_bad();
   642       break;
   643     default:
   644       ShouldNotReachHere();
   645     }
   646   }
   647   return round_to(stk, 2);
   648 }
   650 #ifdef COMPILER2
   651 // Calling convention for calling C code.
   652 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
   653                                         VMRegPair *regs,
   654                                         VMRegPair *regs2,
   655                                         int total_args_passed) {
   656   // Calling conventions for C runtime calls and calls to JNI native methods.
   657   //
   658   // PPC64 convention: Hoist the first 8 int/ptr/long's in the first 8
   659   // int regs, leaving int regs undefined if the arg is flt/dbl. Hoist
   660   // the first 13 flt/dbl's in the first 13 fp regs but additionally
   661   // copy flt/dbl to the stack if they are beyond the 8th argument.
   663   const VMReg iarg_reg[8] = {
   664     R3->as_VMReg(),
   665     R4->as_VMReg(),
   666     R5->as_VMReg(),
   667     R6->as_VMReg(),
   668     R7->as_VMReg(),
   669     R8->as_VMReg(),
   670     R9->as_VMReg(),
   671     R10->as_VMReg()
   672   };
   674   const VMReg farg_reg[13] = {
   675     F1->as_VMReg(),
   676     F2->as_VMReg(),
   677     F3->as_VMReg(),
   678     F4->as_VMReg(),
   679     F5->as_VMReg(),
   680     F6->as_VMReg(),
   681     F7->as_VMReg(),
   682     F8->as_VMReg(),
   683     F9->as_VMReg(),
   684     F10->as_VMReg(),
   685     F11->as_VMReg(),
   686     F12->as_VMReg(),
   687     F13->as_VMReg()
   688   };
   690   // Check calling conventions consistency.
   691   assert(sizeof(iarg_reg) / sizeof(iarg_reg[0]) == Argument::n_int_register_parameters_c &&
   692          sizeof(farg_reg) / sizeof(farg_reg[0]) == Argument::n_float_register_parameters_c,
   693          "consistency");
   695   // `Stk' counts stack slots. Due to alignment, 32 bit values occupy
   696   // 2 such slots, like 64 bit values do.
   697   const int inc_stk_for_intfloat   = 2; // 2 slots for ints and floats
   698   const int inc_stk_for_longdouble = 2; // 2 slots for longs and doubles
   700   int i;
   701   VMReg reg;
   702   // Leave room for C-compatible ABI_112.
   703   int stk = (frame::abi_112_size - frame::jit_out_preserve_size) / VMRegImpl::stack_slot_size;
   704   int arg = 0;
   705   int freg = 0;
   707   // Avoid passing C arguments in the wrong stack slots.
   708   assert((SharedRuntime::out_preserve_stack_slots() + stk) * VMRegImpl::stack_slot_size == 112,
   709          "passing C arguments in wrong stack slots");
   711   // We fill-out regs AND regs2 if an argument must be passed in a
   712   // register AND in a stack slot. If regs2 is NULL in such a
   713   // situation, we bail-out with a fatal error.
   714   for (int i = 0; i < total_args_passed; ++i, ++arg) {
   715     // Initialize regs2 to BAD.
   716     if (regs2 != NULL) regs2[i].set_bad();
   718     switch(sig_bt[i]) {
   720     //
   721     // If arguments 0-7 are integers, they are passed in integer registers.
   722     // Argument i is placed in iarg_reg[i].
   723     //
   724     case T_BOOLEAN:
   725     case T_CHAR:
   726     case T_BYTE:
   727     case T_SHORT:
   728     case T_INT:
   729       // We must cast ints to longs and use full 64 bit stack slots
   730       // here. We do the cast in GraphKit::gen_stub() and just guard
   731       // here against loosing that change.
   732       assert(CCallingConventionRequiresIntsAsLongs,
   733              "argument of type int should be promoted to type long");
   734       guarantee(i > 0 && sig_bt[i-1] == T_LONG,
   735                 "argument of type (bt) should have been promoted to type (T_LONG,bt) for bt in "
   736                 "{T_BOOLEAN, T_CHAR, T_BYTE, T_SHORT, T_INT}");
   737       // Do not count halves.
   738       regs[i].set_bad();
   739       --arg;
   740       break;
   741     case T_LONG:
   742       guarantee(sig_bt[i+1] == T_VOID    ||
   743                 sig_bt[i+1] == T_BOOLEAN || sig_bt[i+1] == T_CHAR  ||
   744                 sig_bt[i+1] == T_BYTE    || sig_bt[i+1] == T_SHORT ||
   745                 sig_bt[i+1] == T_INT,
   746                 "expecting type (T_LONG,half) or type (T_LONG,bt) with bt in {T_BOOLEAN, T_CHAR, T_BYTE, T_SHORT, T_INT}");
   747     case T_OBJECT:
   748     case T_ARRAY:
   749     case T_ADDRESS:
   750     case T_METADATA:
   751       // Oops are already boxed if required (JNI).
   752       if (arg < Argument::n_int_register_parameters_c) {
   753         reg = iarg_reg[arg];
   754       } else {
   755         reg = VMRegImpl::stack2reg(stk);
   756         stk += inc_stk_for_longdouble;
   757       }
   758       regs[i].set2(reg);
   759       break;
   761     //
   762     // Floats are treated differently from int regs:  The first 13 float arguments
   763     // are passed in registers (not the float args among the first 13 args).
   764     // Thus argument i is NOT passed in farg_reg[i] if it is float.  It is passed
   765     // in farg_reg[j] if argument i is the j-th float argument of this call.
   766     //
   767     case T_FLOAT:
   768       if (freg < Argument::n_float_register_parameters_c) {
   769         // Put float in register ...
   770         reg = farg_reg[freg];
   771         ++freg;
   773         // Argument i for i > 8 is placed on the stack even if it's
   774         // placed in a register (if it's a float arg). Aix disassembly
   775         // shows that xlC places these float args on the stack AND in
   776         // a register. This is not documented, but we follow this
   777         // convention, too.
   778         if (arg >= Argument::n_regs_not_on_stack_c) {
   779           // ... and on the stack.
   780           guarantee(regs2 != NULL, "must pass float in register and stack slot");
   781           VMReg reg2 = VMRegImpl::stack2reg(stk LINUX_ONLY(+1));
   782           regs2[i].set1(reg2);
   783           stk += inc_stk_for_intfloat;
   784         }
   786       } else {
   787         // Put float on stack.
   788         reg = VMRegImpl::stack2reg(stk LINUX_ONLY(+1));
   789         stk += inc_stk_for_intfloat;
   790       }
   791       regs[i].set1(reg);
   792       break;
   793     case T_DOUBLE:
   794       assert(sig_bt[i+1] == T_VOID, "expecting half");
   795       if (freg < Argument::n_float_register_parameters_c) {
   796         // Put double in register ...
   797         reg = farg_reg[freg];
   798         ++freg;
   800         // Argument i for i > 8 is placed on the stack even if it's
   801         // placed in a register (if it's a double arg). Aix disassembly
   802         // shows that xlC places these float args on the stack AND in
   803         // a register. This is not documented, but we follow this
   804         // convention, too.
   805         if (arg >= Argument::n_regs_not_on_stack_c) {
   806           // ... and on the stack.
   807           guarantee(regs2 != NULL, "must pass float in register and stack slot");
   808           VMReg reg2 = VMRegImpl::stack2reg(stk);
   809           regs2[i].set2(reg2);
   810           stk += inc_stk_for_longdouble;
   811         }
   812       } else {
   813         // Put double on stack.
   814         reg = VMRegImpl::stack2reg(stk);
   815         stk += inc_stk_for_longdouble;
   816       }
   817       regs[i].set2(reg);
   818       break;
   820     case T_VOID:
   821       // Do not count halves.
   822       regs[i].set_bad();
   823       --arg;
   824       break;
   825     default:
   826       ShouldNotReachHere();
   827     }
   828   }
   830   return round_to(stk, 2);
   831 }
   832 #endif // COMPILER2
   834 static address gen_c2i_adapter(MacroAssembler *masm,
   835                             int total_args_passed,
   836                             int comp_args_on_stack,
   837                             const BasicType *sig_bt,
   838                             const VMRegPair *regs,
   839                             Label& call_interpreter,
   840                             const Register& ientry) {
   842   address c2i_entrypoint;
   844   const Register sender_SP = R21_sender_SP; // == R21_tmp1
   845   const Register code      = R22_tmp2;
   846   //const Register ientry  = R23_tmp3;
   847   const Register value_regs[] = { R24_tmp4, R25_tmp5, R26_tmp6 };
   848   const int num_value_regs = sizeof(value_regs) / sizeof(Register);
   849   int value_regs_index = 0;
   851   const Register return_pc = R27_tmp7;
   852   const Register tmp       = R28_tmp8;
   854   assert_different_registers(sender_SP, code, ientry, return_pc, tmp);
   856   // Adapter needs TOP_IJAVA_FRAME_ABI.
   857   const int adapter_size = frame::top_ijava_frame_abi_size +
   858                            round_to(total_args_passed * wordSize, frame::alignment_in_bytes);
   860   // regular (verified) c2i entry point
   861   c2i_entrypoint = __ pc();
   863   // Does compiled code exists? If yes, patch the caller's callsite.
   864   __ ld(code, method_(code));
   865   __ cmpdi(CCR0, code, 0);
   866   __ ld(ientry, method_(interpreter_entry)); // preloaded
   867   __ beq(CCR0, call_interpreter);
   870   // Patch caller's callsite, method_(code) was not NULL which means that
   871   // compiled code exists.
   872   __ mflr(return_pc);
   873   __ std(return_pc, _abi(lr), R1_SP);
   874   RegisterSaver::push_frame_and_save_argument_registers(masm, tmp, adapter_size, total_args_passed, regs);
   876   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite), R19_method, return_pc);
   878   RegisterSaver::restore_argument_registers_and_pop_frame(masm, adapter_size, total_args_passed, regs);
   879   __ ld(return_pc, _abi(lr), R1_SP);
   880   __ ld(ientry, method_(interpreter_entry)); // preloaded
   881   __ mtlr(return_pc);
   884   // Call the interpreter.
   885   __ BIND(call_interpreter);
   886   __ mtctr(ientry);
   888   // Get a copy of the current SP for loading caller's arguments.
   889   __ mr(sender_SP, R1_SP);
   891   // Add space for the adapter.
   892   __ resize_frame(-adapter_size, R12_scratch2);
   894   int st_off = adapter_size - wordSize;
   896   // Write the args into the outgoing interpreter space.
   897   for (int i = 0; i < total_args_passed; i++) {
   898     VMReg r_1 = regs[i].first();
   899     VMReg r_2 = regs[i].second();
   900     if (!r_1->is_valid()) {
   901       assert(!r_2->is_valid(), "");
   902       continue;
   903     }
   904     if (r_1->is_stack()) {
   905       Register tmp_reg = value_regs[value_regs_index];
   906       value_regs_index = (value_regs_index + 1) % num_value_regs;
   907       // The calling convention produces OptoRegs that ignore the out
   908       // preserve area (JIT's ABI). We must account for it here.
   909       int ld_off = (r_1->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
   910       if (!r_2->is_valid()) {
   911         __ lwz(tmp_reg, ld_off, sender_SP);
   912       } else {
   913         __ ld(tmp_reg, ld_off, sender_SP);
   914       }
   915       // Pretend stack targets were loaded into tmp_reg.
   916       r_1 = tmp_reg->as_VMReg();
   917     }
   919     if (r_1->is_Register()) {
   920       Register r = r_1->as_Register();
   921       if (!r_2->is_valid()) {
   922         __ stw(r, st_off, R1_SP);
   923         st_off-=wordSize;
   924       } else {
   925         // Longs are given 2 64-bit slots in the interpreter, but the
   926         // data is passed in only 1 slot.
   927         if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
   928           DEBUG_ONLY( __ li(tmp, 0); __ std(tmp, st_off, R1_SP); )
   929           st_off-=wordSize;
   930         }
   931         __ std(r, st_off, R1_SP);
   932         st_off-=wordSize;
   933       }
   934     } else {
   935       assert(r_1->is_FloatRegister(), "");
   936       FloatRegister f = r_1->as_FloatRegister();
   937       if (!r_2->is_valid()) {
   938         __ stfs(f, st_off, R1_SP);
   939         st_off-=wordSize;
   940       } else {
   941         // In 64bit, doubles are given 2 64-bit slots in the interpreter, but the
   942         // data is passed in only 1 slot.
   943         // One of these should get known junk...
   944         DEBUG_ONLY( __ li(tmp, 0); __ std(tmp, st_off, R1_SP); )
   945         st_off-=wordSize;
   946         __ stfd(f, st_off, R1_SP);
   947         st_off-=wordSize;
   948       }
   949     }
   950   }
   952   // Jump to the interpreter just as if interpreter was doing it.
   954 #ifdef CC_INTERP
   955   const Register tos = R17_tos;
   956 #endif
   958   // load TOS
   959   __ addi(tos, R1_SP, st_off);
   961   // Frame_manager expects initial_caller_sp (= SP without resize by c2i) in R21_tmp1.
   962   assert(sender_SP == R21_sender_SP, "passing initial caller's SP in wrong register");
   963   __ bctr();
   965   return c2i_entrypoint;
   966 }
   968 static void gen_i2c_adapter(MacroAssembler *masm,
   969                             int total_args_passed,
   970                             int comp_args_on_stack,
   971                             const BasicType *sig_bt,
   972                             const VMRegPair *regs) {
   974   // Load method's entry-point from methodOop.
   975   __ ld(R12_scratch2, in_bytes(Method::from_compiled_offset()), R19_method);
   976   __ mtctr(R12_scratch2);
   978   // We will only enter here from an interpreted frame and never from after
   979   // passing thru a c2i. Azul allowed this but we do not. If we lose the
   980   // race and use a c2i we will remain interpreted for the race loser(s).
   981   // This removes all sorts of headaches on the x86 side and also eliminates
   982   // the possibility of having c2i -> i2c -> c2i -> ... endless transitions.
   984   // Note: r13 contains the senderSP on entry. We must preserve it since
   985   // we may do a i2c -> c2i transition if we lose a race where compiled
   986   // code goes non-entrant while we get args ready.
   987   // In addition we use r13 to locate all the interpreter args as
   988   // we must align the stack to 16 bytes on an i2c entry else we
   989   // lose alignment we expect in all compiled code and register
   990   // save code can segv when fxsave instructions find improperly
   991   // aligned stack pointer.
   993 #ifdef CC_INTERP
   994   const Register ld_ptr = R17_tos;
   995 #endif
   996   const Register value_regs[] = { R22_tmp2, R23_tmp3, R24_tmp4, R25_tmp5, R26_tmp6 };
   997   const int num_value_regs = sizeof(value_regs) / sizeof(Register);
   998   int value_regs_index = 0;
  1000   int ld_offset = total_args_passed*wordSize;
  1002   // Cut-out for having no stack args. Since up to 2 int/oop args are passed
  1003   // in registers, we will occasionally have no stack args.
  1004   int comp_words_on_stack = 0;
  1005   if (comp_args_on_stack) {
  1006     // Sig words on the stack are greater-than VMRegImpl::stack0. Those in
  1007     // registers are below. By subtracting stack0, we either get a negative
  1008     // number (all values in registers) or the maximum stack slot accessed.
  1010     // Convert 4-byte c2 stack slots to words.
  1011     comp_words_on_stack = round_to(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
  1012     // Round up to miminum stack alignment, in wordSize.
  1013     comp_words_on_stack = round_to(comp_words_on_stack, 2);
  1014     __ resize_frame(-comp_words_on_stack * wordSize, R11_scratch1);
  1017   // Now generate the shuffle code.  Pick up all register args and move the
  1018   // rest through register value=Z_R12.
  1019   BLOCK_COMMENT("Shuffle arguments");
  1020   for (int i = 0; i < total_args_passed; i++) {
  1021     if (sig_bt[i] == T_VOID) {
  1022       assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
  1023       continue;
  1026     // Pick up 0, 1 or 2 words from ld_ptr.
  1027     assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
  1028             "scrambled load targets?");
  1029     VMReg r_1 = regs[i].first();
  1030     VMReg r_2 = regs[i].second();
  1031     if (!r_1->is_valid()) {
  1032       assert(!r_2->is_valid(), "");
  1033       continue;
  1035     if (r_1->is_FloatRegister()) {
  1036       if (!r_2->is_valid()) {
  1037         __ lfs(r_1->as_FloatRegister(), ld_offset, ld_ptr);
  1038         ld_offset-=wordSize;
  1039       } else {
  1040         // Skip the unused interpreter slot.
  1041         __ lfd(r_1->as_FloatRegister(), ld_offset-wordSize, ld_ptr);
  1042         ld_offset-=2*wordSize;
  1044     } else {
  1045       Register r;
  1046       if (r_1->is_stack()) {
  1047         // Must do a memory to memory move thru "value".
  1048         r = value_regs[value_regs_index];
  1049         value_regs_index = (value_regs_index + 1) % num_value_regs;
  1050       } else {
  1051         r = r_1->as_Register();
  1053       if (!r_2->is_valid()) {
  1054         // Not sure we need to do this but it shouldn't hurt.
  1055         if (sig_bt[i] == T_OBJECT || sig_bt[i] == T_ADDRESS || sig_bt[i] == T_ARRAY) {
  1056           __ ld(r, ld_offset, ld_ptr);
  1057           ld_offset-=wordSize;
  1058         } else {
  1059           __ lwz(r, ld_offset, ld_ptr);
  1060           ld_offset-=wordSize;
  1062       } else {
  1063         // In 64bit, longs are given 2 64-bit slots in the interpreter, but the
  1064         // data is passed in only 1 slot.
  1065         if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
  1066           ld_offset-=wordSize;
  1068         __ ld(r, ld_offset, ld_ptr);
  1069         ld_offset-=wordSize;
  1072       if (r_1->is_stack()) {
  1073         // Now store value where the compiler expects it
  1074         int st_off = (r_1->reg2stack() + SharedRuntime::out_preserve_stack_slots())*VMRegImpl::stack_slot_size;
  1076         if (sig_bt[i] == T_INT   || sig_bt[i] == T_FLOAT ||sig_bt[i] == T_BOOLEAN ||
  1077             sig_bt[i] == T_SHORT || sig_bt[i] == T_CHAR  || sig_bt[i] == T_BYTE) {
  1078           __ stw(r, st_off, R1_SP);
  1079         } else {
  1080           __ std(r, st_off, R1_SP);
  1086   BLOCK_COMMENT("Store method oop");
  1087   // Store method oop into thread->callee_target.
  1088   // We might end up in handle_wrong_method if the callee is
  1089   // deoptimized as we race thru here. If that happens we don't want
  1090   // to take a safepoint because the caller frame will look
  1091   // interpreted and arguments are now "compiled" so it is much better
  1092   // to make this transition invisible to the stack walking
  1093   // code. Unfortunately if we try and find the callee by normal means
  1094   // a safepoint is possible. So we stash the desired callee in the
  1095   // thread and the vm will find there should this case occur.
  1096   __ std(R19_method, thread_(callee_target));
  1098   // Jump to the compiled code just as if compiled code was doing it.
  1099   __ bctr();
  1102 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
  1103                                                             int total_args_passed,
  1104                                                             int comp_args_on_stack,
  1105                                                             const BasicType *sig_bt,
  1106                                                             const VMRegPair *regs,
  1107                                                             AdapterFingerPrint* fingerprint) {
  1108   address i2c_entry;
  1109   address c2i_unverified_entry;
  1110   address c2i_entry;
  1113   // entry: i2c
  1115   __ align(CodeEntryAlignment);
  1116   i2c_entry = __ pc();
  1117   gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);
  1120   // entry: c2i unverified
  1122   __ align(CodeEntryAlignment);
  1123   BLOCK_COMMENT("c2i unverified entry");
  1124   c2i_unverified_entry = __ pc();
  1126   // inline_cache contains a compiledICHolder
  1127   const Register ic             = R19_method;
  1128   const Register ic_klass       = R11_scratch1;
  1129   const Register receiver_klass = R12_scratch2;
  1130   const Register code           = R21_tmp1;
  1131   const Register ientry         = R23_tmp3;
  1133   assert_different_registers(ic, ic_klass, receiver_klass, R3_ARG1, code, ientry);
  1134   assert(R11_scratch1 == R11, "need prologue scratch register");
  1136   Label call_interpreter;
  1138   assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()),
  1139          "klass offset should reach into any page");
  1140   // Check for NULL argument if we don't have implicit null checks.
  1141   if (!ImplicitNullChecks || !os::zero_page_read_protected()) {
  1142     if (TrapBasedNullChecks) {
  1143       __ trap_null_check(R3_ARG1);
  1144     } else {
  1145       Label valid;
  1146       __ cmpdi(CCR0, R3_ARG1, 0);
  1147       __ bne_predict_taken(CCR0, valid);
  1148       // We have a null argument, branch to ic_miss_stub.
  1149       __ b64_patchable((address)SharedRuntime::get_ic_miss_stub(),
  1150                        relocInfo::runtime_call_type);
  1151       __ BIND(valid);
  1154   // Assume argument is not NULL, load klass from receiver.
  1155   __ load_klass(receiver_klass, R3_ARG1);
  1157   __ ld(ic_klass, CompiledICHolder::holder_klass_offset(), ic);
  1159   if (TrapBasedICMissChecks) {
  1160     __ trap_ic_miss_check(receiver_klass, ic_klass);
  1161   } else {
  1162     Label valid;
  1163     __ cmpd(CCR0, receiver_klass, ic_klass);
  1164     __ beq_predict_taken(CCR0, valid);
  1165     // We have an unexpected klass, branch to ic_miss_stub.
  1166     __ b64_patchable((address)SharedRuntime::get_ic_miss_stub(),
  1167                      relocInfo::runtime_call_type);
  1168     __ BIND(valid);
  1171   // Argument is valid and klass is as expected, continue.
  1173   // Extract method from inline cache, verified entry point needs it.
  1174   __ ld(R19_method, CompiledICHolder::holder_method_offset(), ic);
  1175   assert(R19_method == ic, "the inline cache register is dead here");
  1177   __ ld(code, method_(code));
  1178   __ cmpdi(CCR0, code, 0);
  1179   __ ld(ientry, method_(interpreter_entry)); // preloaded
  1180   __ beq_predict_taken(CCR0, call_interpreter);
  1182   // Branch to ic_miss_stub.
  1183   __ b64_patchable((address)SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type);
  1185   // entry: c2i
  1187   c2i_entry = gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, call_interpreter, ientry);
  1189   return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
  1192 #ifdef COMPILER2
  1193 // An oop arg. Must pass a handle not the oop itself.
  1194 static void object_move(MacroAssembler* masm,
  1195                         int frame_size_in_slots,
  1196                         OopMap* oop_map, int oop_handle_offset,
  1197                         bool is_receiver, int* receiver_offset,
  1198                         VMRegPair src, VMRegPair dst,
  1199                         Register r_caller_sp, Register r_temp_1, Register r_temp_2) {
  1200   assert(!is_receiver || (is_receiver && (*receiver_offset == -1)),
  1201          "receiver has already been moved");
  1203   // We must pass a handle. First figure out the location we use as a handle.
  1205   if (src.first()->is_stack()) {
  1206     // stack to stack or reg
  1208     const Register r_handle = dst.first()->is_stack() ? r_temp_1 : dst.first()->as_Register();
  1209     Label skip;
  1210     const int oop_slot_in_callers_frame = reg2slot(src.first());
  1212     guarantee(!is_receiver, "expecting receiver in register");
  1213     oop_map->set_oop(VMRegImpl::stack2reg(oop_slot_in_callers_frame + frame_size_in_slots));
  1215     __ addi(r_handle, r_caller_sp, reg2offset(src.first()));
  1216     __ ld(  r_temp_2, reg2offset(src.first()), r_caller_sp);
  1217     __ cmpdi(CCR0, r_temp_2, 0);
  1218     __ bne(CCR0, skip);
  1219     // Use a NULL handle if oop is NULL.
  1220     __ li(r_handle, 0);
  1221     __ bind(skip);
  1223     if (dst.first()->is_stack()) {
  1224       // stack to stack
  1225       __ std(r_handle, reg2offset(dst.first()), R1_SP);
  1226     } else {
  1227       // stack to reg
  1228       // Nothing to do, r_handle is already the dst register.
  1230   } else {
  1231     // reg to stack or reg
  1232     const Register r_oop      = src.first()->as_Register();
  1233     const Register r_handle   = dst.first()->is_stack() ? r_temp_1 : dst.first()->as_Register();
  1234     const int oop_slot        = (r_oop->encoding()-R3_ARG1->encoding()) * VMRegImpl::slots_per_word
  1235                                 + oop_handle_offset; // in slots
  1236     const int oop_offset = oop_slot * VMRegImpl::stack_slot_size;
  1237     Label skip;
  1239     if (is_receiver) {
  1240       *receiver_offset = oop_offset;
  1242     oop_map->set_oop(VMRegImpl::stack2reg(oop_slot));
  1244     __ std( r_oop,    oop_offset, R1_SP);
  1245     __ addi(r_handle, R1_SP, oop_offset);
  1247     __ cmpdi(CCR0, r_oop, 0);
  1248     __ bne(CCR0, skip);
  1249     // Use a NULL handle if oop is NULL.
  1250     __ li(r_handle, 0);
  1251     __ bind(skip);
  1253     if (dst.first()->is_stack()) {
  1254       // reg to stack
  1255       __ std(r_handle, reg2offset(dst.first()), R1_SP);
  1256     } else {
  1257       // reg to reg
  1258       // Nothing to do, r_handle is already the dst register.
  1263 static void int_move(MacroAssembler*masm,
  1264                      VMRegPair src, VMRegPair dst,
  1265                      Register r_caller_sp, Register r_temp) {
  1266   assert(src.first()->is_valid() && src.second() == src.first()->next(), "incoming must be long-int");
  1267   assert(dst.first()->is_valid() && dst.second() == dst.first()->next(), "outgoing must be long");
  1269   if (src.first()->is_stack()) {
  1270     if (dst.first()->is_stack()) {
  1271       // stack to stack
  1272       __ lwa(r_temp, reg2offset(src.first()), r_caller_sp);
  1273       __ std(r_temp, reg2offset(dst.first()), R1_SP);
  1274     } else {
  1275       // stack to reg
  1276       __ lwa(dst.first()->as_Register(), reg2offset(src.first()), r_caller_sp);
  1278   } else if (dst.first()->is_stack()) {
  1279     // reg to stack
  1280     __ extsw(r_temp, src.first()->as_Register());
  1281     __ std(r_temp, reg2offset(dst.first()), R1_SP);
  1282   } else {
  1283     // reg to reg
  1284     __ extsw(dst.first()->as_Register(), src.first()->as_Register());
  1288 static void long_move(MacroAssembler*masm,
  1289                       VMRegPair src, VMRegPair dst,
  1290                       Register r_caller_sp, Register r_temp) {
  1291   assert(src.first()->is_valid() && src.second() == src.first()->next(), "incoming must be long");
  1292   assert(dst.first()->is_valid() && dst.second() == dst.first()->next(), "outgoing must be long");
  1294   if (src.first()->is_stack()) {
  1295     if (dst.first()->is_stack()) {
  1296       // stack to stack
  1297       __ ld( r_temp, reg2offset(src.first()), r_caller_sp);
  1298       __ std(r_temp, reg2offset(dst.first()), R1_SP);
  1299     } else {
  1300       // stack to reg
  1301       __ ld(dst.first()->as_Register(), reg2offset(src.first()), r_caller_sp);
  1303   } else if (dst.first()->is_stack()) {
  1304     // reg to stack
  1305     __ std(src.first()->as_Register(), reg2offset(dst.first()), R1_SP);
  1306   } else {
  1307     // reg to reg
  1308     if (dst.first()->as_Register() != src.first()->as_Register())
  1309       __ mr(dst.first()->as_Register(), src.first()->as_Register());
  1313 static void float_move(MacroAssembler*masm,
  1314                        VMRegPair src, VMRegPair dst,
  1315                        Register r_caller_sp, Register r_temp) {
  1316   assert(src.first()->is_valid() && !src.second()->is_valid(), "incoming must be float");
  1317   assert(dst.first()->is_valid() && !dst.second()->is_valid(), "outgoing must be float");
  1319   if (src.first()->is_stack()) {
  1320     if (dst.first()->is_stack()) {
  1321       // stack to stack
  1322       __ lwz(r_temp, reg2offset(src.first()), r_caller_sp);
  1323       __ stw(r_temp, reg2offset(dst.first()), R1_SP);
  1324     } else {
  1325       // stack to reg
  1326       __ lfs(dst.first()->as_FloatRegister(), reg2offset(src.first()), r_caller_sp);
  1328   } else if (dst.first()->is_stack()) {
  1329     // reg to stack
  1330     __ stfs(src.first()->as_FloatRegister(), reg2offset(dst.first()), R1_SP);
  1331   } else {
  1332     // reg to reg
  1333     if (dst.first()->as_FloatRegister() != src.first()->as_FloatRegister())
  1334       __ fmr(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister());
  1338 static void double_move(MacroAssembler*masm,
  1339                         VMRegPair src, VMRegPair dst,
  1340                         Register r_caller_sp, Register r_temp) {
  1341   assert(src.first()->is_valid() && src.second() == src.first()->next(), "incoming must be double");
  1342   assert(dst.first()->is_valid() && dst.second() == dst.first()->next(), "outgoing must be double");
  1344   if (src.first()->is_stack()) {
  1345     if (dst.first()->is_stack()) {
  1346       // stack to stack
  1347       __ ld( r_temp, reg2offset(src.first()), r_caller_sp);
  1348       __ std(r_temp, reg2offset(dst.first()), R1_SP);
  1349     } else {
  1350       // stack to reg
  1351       __ lfd(dst.first()->as_FloatRegister(), reg2offset(src.first()), r_caller_sp);
  1353   } else if (dst.first()->is_stack()) {
  1354     // reg to stack
  1355     __ stfd(src.first()->as_FloatRegister(), reg2offset(dst.first()), R1_SP);
  1356   } else {
  1357     // reg to reg
  1358     if (dst.first()->as_FloatRegister() != src.first()->as_FloatRegister())
  1359       __ fmr(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister());
  1363 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
  1364   switch (ret_type) {
  1365     case T_BOOLEAN:
  1366     case T_CHAR:
  1367     case T_BYTE:
  1368     case T_SHORT:
  1369     case T_INT:
  1370       __ stw (R3_RET,  frame_slots*VMRegImpl::stack_slot_size, R1_SP);
  1371       break;
  1372     case T_ARRAY:
  1373     case T_OBJECT:
  1374     case T_LONG:
  1375       __ std (R3_RET,  frame_slots*VMRegImpl::stack_slot_size, R1_SP);
  1376       break;
  1377     case T_FLOAT:
  1378       __ stfs(F1_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP);
  1379       break;
  1380     case T_DOUBLE:
  1381       __ stfd(F1_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP);
  1382       break;
  1383     case T_VOID:
  1384       break;
  1385     default:
  1386       ShouldNotReachHere();
  1387       break;
  1391 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
  1392   switch (ret_type) {
  1393     case T_BOOLEAN:
  1394     case T_CHAR:
  1395     case T_BYTE:
  1396     case T_SHORT:
  1397     case T_INT:
  1398       __ lwz(R3_RET,  frame_slots*VMRegImpl::stack_slot_size, R1_SP);
  1399       break;
  1400     case T_ARRAY:
  1401     case T_OBJECT:
  1402     case T_LONG:
  1403       __ ld (R3_RET,  frame_slots*VMRegImpl::stack_slot_size, R1_SP);
  1404       break;
  1405     case T_FLOAT:
  1406       __ lfs(F1_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP);
  1407       break;
  1408     case T_DOUBLE:
  1409       __ lfd(F1_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP);
  1410       break;
  1411     case T_VOID:
  1412       break;
  1413     default:
  1414       ShouldNotReachHere();
  1415       break;
  1419 static void save_or_restore_arguments(MacroAssembler* masm,
  1420                                       const int stack_slots,
  1421                                       const int total_in_args,
  1422                                       const int arg_save_area,
  1423                                       OopMap* map,
  1424                                       VMRegPair* in_regs,
  1425                                       BasicType* in_sig_bt) {
  1426   // If map is non-NULL then the code should store the values,
  1427   // otherwise it should load them.
  1428   int slot = arg_save_area;
  1429   // Save down double word first.
  1430   for (int i = 0; i < total_in_args; i++) {
  1431     if (in_regs[i].first()->is_FloatRegister() && in_sig_bt[i] == T_DOUBLE) {
  1432       int offset = slot * VMRegImpl::stack_slot_size;
  1433       slot += VMRegImpl::slots_per_word;
  1434       assert(slot <= stack_slots, "overflow (after DOUBLE stack slot)");
  1435       if (map != NULL) {
  1436         __ stfd(in_regs[i].first()->as_FloatRegister(), offset, R1_SP);
  1437       } else {
  1438         __ lfd(in_regs[i].first()->as_FloatRegister(), offset, R1_SP);
  1440     } else if (in_regs[i].first()->is_Register() &&
  1441         (in_sig_bt[i] == T_LONG || in_sig_bt[i] == T_ARRAY)) {
  1442       int offset = slot * VMRegImpl::stack_slot_size;
  1443       if (map != NULL) {
  1444         __ std(in_regs[i].first()->as_Register(), offset, R1_SP);
  1445         if (in_sig_bt[i] == T_ARRAY) {
  1446           map->set_oop(VMRegImpl::stack2reg(slot));
  1448       } else {
  1449         __ ld(in_regs[i].first()->as_Register(), offset, R1_SP);
  1451       slot += VMRegImpl::slots_per_word;
  1452       assert(slot <= stack_slots, "overflow (after LONG/ARRAY stack slot)");
  1455   // Save or restore single word registers.
  1456   for (int i = 0; i < total_in_args; i++) {
  1457     // PPC64: pass ints as longs: must only deal with floats here.
  1458     if (in_regs[i].first()->is_FloatRegister()) {
  1459       if (in_sig_bt[i] == T_FLOAT) {
  1460         int offset = slot * VMRegImpl::stack_slot_size;
  1461         slot++;
  1462         assert(slot <= stack_slots, "overflow (after FLOAT stack slot)");
  1463         if (map != NULL) {
  1464           __ stfs(in_regs[i].first()->as_FloatRegister(), offset, R1_SP);
  1465         } else {
  1466           __ lfs(in_regs[i].first()->as_FloatRegister(), offset, R1_SP);
  1469     } else if (in_regs[i].first()->is_stack()) {
  1470       if (in_sig_bt[i] == T_ARRAY && map != NULL) {
  1471         int offset_in_older_frame = in_regs[i].first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
  1472         map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + stack_slots));
  1478 // Check GC_locker::needs_gc and enter the runtime if it's true. This
  1479 // keeps a new JNI critical region from starting until a GC has been
  1480 // forced. Save down any oops in registers and describe them in an
  1481 // OopMap.
  1482 static void check_needs_gc_for_critical_native(MacroAssembler* masm,
  1483                                                const int stack_slots,
  1484                                                const int total_in_args,
  1485                                                const int arg_save_area,
  1486                                                OopMapSet* oop_maps,
  1487                                                VMRegPair* in_regs,
  1488                                                BasicType* in_sig_bt,
  1489                                                Register tmp_reg ) {
  1490   __ block_comment("check GC_locker::needs_gc");
  1491   Label cont;
  1492   __ lbz(tmp_reg, (RegisterOrConstant)(intptr_t)GC_locker::needs_gc_address());
  1493   __ cmplwi(CCR0, tmp_reg, 0);
  1494   __ beq(CCR0, cont);
  1496   // Save down any values that are live in registers and call into the
  1497   // runtime to halt for a GC.
  1498   OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
  1499   save_or_restore_arguments(masm, stack_slots, total_in_args,
  1500                             arg_save_area, map, in_regs, in_sig_bt);
  1502   __ mr(R3_ARG1, R16_thread);
  1503   __ set_last_Java_frame(R1_SP, noreg);
  1505   __ block_comment("block_for_jni_critical");
  1506   address entry_point = CAST_FROM_FN_PTR(address, SharedRuntime::block_for_jni_critical);
  1507   __ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, entry_point), relocInfo::runtime_call_type);
  1508   address start           = __ pc() - __ offset(),
  1509           calls_return_pc = __ last_calls_return_pc();
  1510   oop_maps->add_gc_map(calls_return_pc - start, map);
  1512   __ reset_last_Java_frame();
  1514   // Reload all the register arguments.
  1515   save_or_restore_arguments(masm, stack_slots, total_in_args,
  1516                             arg_save_area, NULL, in_regs, in_sig_bt);
  1518   __ BIND(cont);
  1520 #ifdef ASSERT
  1521   if (StressCriticalJNINatives) {
  1522     // Stress register saving.
  1523     OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
  1524     save_or_restore_arguments(masm, stack_slots, total_in_args,
  1525                               arg_save_area, map, in_regs, in_sig_bt);
  1526     // Destroy argument registers.
  1527     for (int i = 0; i < total_in_args; i++) {
  1528       if (in_regs[i].first()->is_Register()) {
  1529         const Register reg = in_regs[i].first()->as_Register();
  1530         __ neg(reg, reg);
  1531       } else if (in_regs[i].first()->is_FloatRegister()) {
  1532         __ fneg(in_regs[i].first()->as_FloatRegister(), in_regs[i].first()->as_FloatRegister());
  1536     save_or_restore_arguments(masm, stack_slots, total_in_args,
  1537                               arg_save_area, NULL, in_regs, in_sig_bt);
  1539 #endif
  1542 static void move_ptr(MacroAssembler* masm, VMRegPair src, VMRegPair dst, Register r_caller_sp, Register r_temp) {
  1543   if (src.first()->is_stack()) {
  1544     if (dst.first()->is_stack()) {
  1545       // stack to stack
  1546       __ ld(r_temp, reg2offset(src.first()), r_caller_sp);
  1547       __ std(r_temp, reg2offset(dst.first()), R1_SP);
  1548     } else {
  1549       // stack to reg
  1550       __ ld(dst.first()->as_Register(), reg2offset(src.first()), r_caller_sp);
  1552   } else if (dst.first()->is_stack()) {
  1553     // reg to stack
  1554     __ std(src.first()->as_Register(), reg2offset(dst.first()), R1_SP);
  1555   } else {
  1556     if (dst.first() != src.first()) {
  1557       __ mr(dst.first()->as_Register(), src.first()->as_Register());
  1562 // Unpack an array argument into a pointer to the body and the length
  1563 // if the array is non-null, otherwise pass 0 for both.
  1564 static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType in_elem_type,
  1565                                   VMRegPair body_arg, VMRegPair length_arg, Register r_caller_sp,
  1566                                   Register tmp_reg, Register tmp2_reg) {
  1567   assert(!body_arg.first()->is_Register() || body_arg.first()->as_Register() != tmp_reg,
  1568          "possible collision");
  1569   assert(!length_arg.first()->is_Register() || length_arg.first()->as_Register() != tmp_reg,
  1570          "possible collision");
  1572   // Pass the length, ptr pair.
  1573   Label set_out_args;
  1574   VMRegPair tmp, tmp2;
  1575   tmp.set_ptr(tmp_reg->as_VMReg());
  1576   tmp2.set_ptr(tmp2_reg->as_VMReg());
  1577   if (reg.first()->is_stack()) {
  1578     // Load the arg up from the stack.
  1579     move_ptr(masm, reg, tmp, r_caller_sp, /*unused*/ R0);
  1580     reg = tmp;
  1582   __ li(tmp2_reg, 0); // Pass zeros if Array=null.
  1583   if (tmp_reg != reg.first()->as_Register()) __ li(tmp_reg, 0);
  1584   __ cmpdi(CCR0, reg.first()->as_Register(), 0);
  1585   __ beq(CCR0, set_out_args);
  1586   __ lwa(tmp2_reg, arrayOopDesc::length_offset_in_bytes(), reg.first()->as_Register());
  1587   __ addi(tmp_reg, reg.first()->as_Register(), arrayOopDesc::base_offset_in_bytes(in_elem_type));
  1588   __ bind(set_out_args);
  1589   move_ptr(masm, tmp, body_arg, r_caller_sp, /*unused*/ R0);
  1590   move_ptr(masm, tmp2, length_arg, r_caller_sp, /*unused*/ R0); // Same as move32_64 on PPC64.
  1593 static void verify_oop_args(MacroAssembler* masm,
  1594                             methodHandle method,
  1595                             const BasicType* sig_bt,
  1596                             const VMRegPair* regs) {
  1597   Register temp_reg = R19_method;  // not part of any compiled calling seq
  1598   if (VerifyOops) {
  1599     for (int i = 0; i < method->size_of_parameters(); i++) {
  1600       if (sig_bt[i] == T_OBJECT ||
  1601           sig_bt[i] == T_ARRAY) {
  1602         VMReg r = regs[i].first();
  1603         assert(r->is_valid(), "bad oop arg");
  1604         if (r->is_stack()) {
  1605           __ ld(temp_reg, reg2offset(r), R1_SP);
  1606           __ verify_oop(temp_reg);
  1607         } else {
  1608           __ verify_oop(r->as_Register());
  1615 static void gen_special_dispatch(MacroAssembler* masm,
  1616                                  methodHandle method,
  1617                                  const BasicType* sig_bt,
  1618                                  const VMRegPair* regs) {
  1619   verify_oop_args(masm, method, sig_bt, regs);
  1620   vmIntrinsics::ID iid = method->intrinsic_id();
  1622   // Now write the args into the outgoing interpreter space
  1623   bool     has_receiver   = false;
  1624   Register receiver_reg   = noreg;
  1625   int      member_arg_pos = -1;
  1626   Register member_reg     = noreg;
  1627   int      ref_kind       = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
  1628   if (ref_kind != 0) {
  1629     member_arg_pos = method->size_of_parameters() - 1;  // trailing MemberName argument
  1630     member_reg = R19_method;  // known to be free at this point
  1631     has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
  1632   } else if (iid == vmIntrinsics::_invokeBasic) {
  1633     has_receiver = true;
  1634   } else {
  1635     fatal(err_msg_res("unexpected intrinsic id %d", iid));
  1638   if (member_reg != noreg) {
  1639     // Load the member_arg into register, if necessary.
  1640     SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs);
  1641     VMReg r = regs[member_arg_pos].first();
  1642     if (r->is_stack()) {
  1643       __ ld(member_reg, reg2offset(r), R1_SP);
  1644     } else {
  1645       // no data motion is needed
  1646       member_reg = r->as_Register();
  1650   if (has_receiver) {
  1651     // Make sure the receiver is loaded into a register.
  1652     assert(method->size_of_parameters() > 0, "oob");
  1653     assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");
  1654     VMReg r = regs[0].first();
  1655     assert(r->is_valid(), "bad receiver arg");
  1656     if (r->is_stack()) {
  1657       // Porting note:  This assumes that compiled calling conventions always
  1658       // pass the receiver oop in a register.  If this is not true on some
  1659       // platform, pick a temp and load the receiver from stack.
  1660       fatal("receiver always in a register");
  1661       receiver_reg = R11_scratch1;  // TODO (hs24): is R11_scratch1 really free at this point?
  1662       __ ld(receiver_reg, reg2offset(r), R1_SP);
  1663     } else {
  1664       // no data motion is needed
  1665       receiver_reg = r->as_Register();
  1669   // Figure out which address we are really jumping to:
  1670   MethodHandles::generate_method_handle_dispatch(masm, iid,
  1671                                                  receiver_reg, member_reg, /*for_compiler_entry:*/ true);
  1674 #endif // COMPILER2
  1676 // ---------------------------------------------------------------------------
  1677 // Generate a native wrapper for a given method. The method takes arguments
  1678 // in the Java compiled code convention, marshals them to the native
  1679 // convention (handlizes oops, etc), transitions to native, makes the call,
  1680 // returns to java state (possibly blocking), unhandlizes any result and
  1681 // returns.
  1682 //
  1683 // Critical native functions are a shorthand for the use of
  1684 // GetPrimtiveArrayCritical and disallow the use of any other JNI
  1685 // functions.  The wrapper is expected to unpack the arguments before
  1686 // passing them to the callee and perform checks before and after the
  1687 // native call to ensure that they GC_locker
  1688 // lock_critical/unlock_critical semantics are followed.  Some other
  1689 // parts of JNI setup are skipped like the tear down of the JNI handle
  1690 // block and the check for pending exceptions it's impossible for them
  1691 // to be thrown.
  1692 //
  1693 // They are roughly structured like this:
  1694 //   if (GC_locker::needs_gc())
  1695 //     SharedRuntime::block_for_jni_critical();
  1696 //   tranistion to thread_in_native
  1697 //   unpack arrray arguments and call native entry point
  1698 //   check for safepoint in progress
  1699 //   check if any thread suspend flags are set
  1700 //     call into JVM and possible unlock the JNI critical
  1701 //     if a GC was suppressed while in the critical native.
  1702 //   transition back to thread_in_Java
  1703 //   return to caller
  1704 //
  1705 nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
  1706                                                 methodHandle method,
  1707                                                 int compile_id,
  1708                                                 BasicType *in_sig_bt,
  1709                                                 VMRegPair *in_regs,
  1710                                                 BasicType ret_type) {
  1711 #ifdef COMPILER2
  1712   if (method->is_method_handle_intrinsic()) {
  1713     vmIntrinsics::ID iid = method->intrinsic_id();
  1714     intptr_t start = (intptr_t)__ pc();
  1715     int vep_offset = ((intptr_t)__ pc()) - start;
  1716     gen_special_dispatch(masm,
  1717                          method,
  1718                          in_sig_bt,
  1719                          in_regs);
  1720     int frame_complete = ((intptr_t)__ pc()) - start;  // not complete, period
  1721     __ flush();
  1722     int stack_slots = SharedRuntime::out_preserve_stack_slots();  // no out slots at all, actually
  1723     return nmethod::new_native_nmethod(method,
  1724                                        compile_id,
  1725                                        masm->code(),
  1726                                        vep_offset,
  1727                                        frame_complete,
  1728                                        stack_slots / VMRegImpl::slots_per_word,
  1729                                        in_ByteSize(-1),
  1730                                        in_ByteSize(-1),
  1731                                        (OopMapSet*)NULL);
  1734   bool is_critical_native = true;
  1735   address native_func = method->critical_native_function();
  1736   if (native_func == NULL) {
  1737     native_func = method->native_function();
  1738     is_critical_native = false;
  1740   assert(native_func != NULL, "must have function");
  1742   // First, create signature for outgoing C call
  1743   // --------------------------------------------------------------------------
  1745   int total_in_args = method->size_of_parameters();
  1746   // We have received a description of where all the java args are located
  1747   // on entry to the wrapper. We need to convert these args to where
  1748   // the jni function will expect them. To figure out where they go
  1749   // we convert the java signature to a C signature by inserting
  1750   // the hidden arguments as arg[0] and possibly arg[1] (static method)
  1751   //
  1752   // Additionally, on ppc64 we must convert integers to longs in the C
  1753   // signature. We do this in advance in order to have no trouble with
  1754   // indexes into the bt-arrays.
  1755   // So convert the signature and registers now, and adjust the total number
  1756   // of in-arguments accordingly.
  1757   int i2l_argcnt = convert_ints_to_longints_argcnt(total_in_args, in_sig_bt); // PPC64: pass ints as longs.
  1759   // Calculate the total number of C arguments and create arrays for the
  1760   // signature and the outgoing registers.
  1761   // On ppc64, we have two arrays for the outgoing registers, because
  1762   // some floating-point arguments must be passed in registers _and_
  1763   // in stack locations.
  1764   bool method_is_static = method->is_static();
  1765   int  total_c_args     = i2l_argcnt;
  1767   if (!is_critical_native) {
  1768     int n_hidden_args = method_is_static ? 2 : 1;
  1769     total_c_args += n_hidden_args;
  1770   } else {
  1771     // No JNIEnv*, no this*, but unpacked arrays (base+length).
  1772     for (int i = 0; i < total_in_args; i++) {
  1773       if (in_sig_bt[i] == T_ARRAY) {
  1774         total_c_args += 2; // PPC64: T_LONG, T_INT, T_ADDRESS (see convert_ints_to_longints and c_calling_convention)
  1779   BasicType *out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
  1780   VMRegPair *out_regs   = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
  1781   VMRegPair *out_regs2  = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
  1782   BasicType* in_elem_bt = NULL;
  1784   // Create the signature for the C call:
  1785   //   1) add the JNIEnv*
  1786   //   2) add the class if the method is static
  1787   //   3) copy the rest of the incoming signature (shifted by the number of
  1788   //      hidden arguments).
  1790   int argc = 0;
  1791   if (!is_critical_native) {
  1792     convert_ints_to_longints(i2l_argcnt, total_in_args, in_sig_bt, in_regs); // PPC64: pass ints as longs.
  1794     out_sig_bt[argc++] = T_ADDRESS;
  1795     if (method->is_static()) {
  1796       out_sig_bt[argc++] = T_OBJECT;
  1799     for (int i = 0; i < total_in_args ; i++ ) {
  1800       out_sig_bt[argc++] = in_sig_bt[i];
  1802   } else {
  1803     Thread* THREAD = Thread::current();
  1804     in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, i2l_argcnt);
  1805     SignatureStream ss(method->signature());
  1806     int o = 0;
  1807     for (int i = 0; i < total_in_args ; i++, o++) {
  1808       if (in_sig_bt[i] == T_ARRAY) {
  1809         // Arrays are passed as int, elem* pair
  1810         Symbol* atype = ss.as_symbol(CHECK_NULL);
  1811         const char* at = atype->as_C_string();
  1812         if (strlen(at) == 2) {
  1813           assert(at[0] == '[', "must be");
  1814           switch (at[1]) {
  1815             case 'B': in_elem_bt[o] = T_BYTE; break;
  1816             case 'C': in_elem_bt[o] = T_CHAR; break;
  1817             case 'D': in_elem_bt[o] = T_DOUBLE; break;
  1818             case 'F': in_elem_bt[o] = T_FLOAT; break;
  1819             case 'I': in_elem_bt[o] = T_INT; break;
  1820             case 'J': in_elem_bt[o] = T_LONG; break;
  1821             case 'S': in_elem_bt[o] = T_SHORT; break;
  1822             case 'Z': in_elem_bt[o] = T_BOOLEAN; break;
  1823             default: ShouldNotReachHere();
  1826       } else {
  1827         in_elem_bt[o] = T_VOID;
  1828         switch(in_sig_bt[i]) { // PPC64: pass ints as longs.
  1829           case T_BOOLEAN:
  1830           case T_CHAR:
  1831           case T_BYTE:
  1832           case T_SHORT:
  1833           case T_INT: in_elem_bt[++o] = T_VOID; break;
  1834           default: break;
  1837       if (in_sig_bt[i] != T_VOID) {
  1838         assert(in_sig_bt[i] == ss.type(), "must match");
  1839         ss.next();
  1842     assert(i2l_argcnt==o, "must match");
  1844     convert_ints_to_longints(i2l_argcnt, total_in_args, in_sig_bt, in_regs); // PPC64: pass ints as longs.
  1846     for (int i = 0; i < total_in_args ; i++ ) {
  1847       if (in_sig_bt[i] == T_ARRAY) {
  1848         // Arrays are passed as int, elem* pair.
  1849         out_sig_bt[argc++] = T_LONG; // PPC64: pass ints as longs.
  1850         out_sig_bt[argc++] = T_INT;
  1851         out_sig_bt[argc++] = T_ADDRESS;
  1852       } else {
  1853         out_sig_bt[argc++] = in_sig_bt[i];
  1859   // Compute the wrapper's frame size.
  1860   // --------------------------------------------------------------------------
  1862   // Now figure out where the args must be stored and how much stack space
  1863   // they require.
  1864   //
  1865   // Compute framesize for the wrapper. We need to handlize all oops in
  1866   // incoming registers.
  1867   //
  1868   // Calculate the total number of stack slots we will need:
  1869   //   1) abi requirements
  1870   //   2) outgoing arguments
  1871   //   3) space for inbound oop handle area
  1872   //   4) space for handlizing a klass if static method
  1873   //   5) space for a lock if synchronized method
  1874   //   6) workspace for saving return values, int <-> float reg moves, etc.
  1875   //   7) alignment
  1876   //
  1877   // Layout of the native wrapper frame:
  1878   // (stack grows upwards, memory grows downwards)
  1879   //
  1880   // NW     [ABI_112]                  <-- 1) R1_SP
  1881   //        [outgoing arguments]       <-- 2) R1_SP + out_arg_slot_offset
  1882   //        [oopHandle area]           <-- 3) R1_SP + oop_handle_offset (save area for critical natives)
  1883   //        klass                      <-- 4) R1_SP + klass_offset
  1884   //        lock                       <-- 5) R1_SP + lock_offset
  1885   //        [workspace]                <-- 6) R1_SP + workspace_offset
  1886   //        [alignment] (optional)     <-- 7)
  1887   // caller [JIT_TOP_ABI_48]           <-- r_callers_sp
  1888   //
  1889   // - *_slot_offset Indicates offset from SP in number of stack slots.
  1890   // - *_offset      Indicates offset from SP in bytes.
  1892   int stack_slots = c_calling_convention(out_sig_bt, out_regs, out_regs2, total_c_args) // 1+2)
  1893                   + SharedRuntime::out_preserve_stack_slots(); // See c_calling_convention.
  1895   // Now the space for the inbound oop handle area.
  1896   int total_save_slots = num_java_iarg_registers * VMRegImpl::slots_per_word;
  1897   if (is_critical_native) {
  1898     // Critical natives may have to call out so they need a save area
  1899     // for register arguments.
  1900     int double_slots = 0;
  1901     int single_slots = 0;
  1902     for (int i = 0; i < total_in_args; i++) {
  1903       if (in_regs[i].first()->is_Register()) {
  1904         const Register reg = in_regs[i].first()->as_Register();
  1905         switch (in_sig_bt[i]) {
  1906           case T_BOOLEAN:
  1907           case T_BYTE:
  1908           case T_SHORT:
  1909           case T_CHAR:
  1910           case T_INT:  /*single_slots++;*/ break; // PPC64: pass ints as longs.
  1911           case T_ARRAY:
  1912           case T_LONG: double_slots++; break;
  1913           default:  ShouldNotReachHere();
  1915       } else if (in_regs[i].first()->is_FloatRegister()) {
  1916         switch (in_sig_bt[i]) {
  1917           case T_FLOAT:  single_slots++; break;
  1918           case T_DOUBLE: double_slots++; break;
  1919           default:  ShouldNotReachHere();
  1923     total_save_slots = double_slots * 2 + round_to(single_slots, 2); // round to even
  1926   int oop_handle_slot_offset = stack_slots;
  1927   stack_slots += total_save_slots;                                                // 3)
  1929   int klass_slot_offset = 0;
  1930   int klass_offset      = -1;
  1931   if (method_is_static && !is_critical_native) {                                  // 4)
  1932     klass_slot_offset  = stack_slots;
  1933     klass_offset       = klass_slot_offset * VMRegImpl::stack_slot_size;
  1934     stack_slots       += VMRegImpl::slots_per_word;
  1937   int lock_slot_offset = 0;
  1938   int lock_offset      = -1;
  1939   if (method->is_synchronized()) {                                                // 5)
  1940     lock_slot_offset   = stack_slots;
  1941     lock_offset        = lock_slot_offset * VMRegImpl::stack_slot_size;
  1942     stack_slots       += VMRegImpl::slots_per_word;
  1945   int workspace_slot_offset = stack_slots;                                        // 6)
  1946   stack_slots         += 2;
  1948   // Now compute actual number of stack words we need.
  1949   // Rounding to make stack properly aligned.
  1950   stack_slots = round_to(stack_slots,                                             // 7)
  1951                          frame::alignment_in_bytes / VMRegImpl::stack_slot_size);
  1952   int frame_size_in_bytes = stack_slots * VMRegImpl::stack_slot_size;
  1955   // Now we can start generating code.
  1956   // --------------------------------------------------------------------------
  1958   intptr_t start_pc = (intptr_t)__ pc();
  1959   intptr_t vep_start_pc;
  1960   intptr_t frame_done_pc;
  1961   intptr_t oopmap_pc;
  1963   Label    ic_miss;
  1964   Label    handle_pending_exception;
  1966   Register r_callers_sp = R21;
  1967   Register r_temp_1     = R22;
  1968   Register r_temp_2     = R23;
  1969   Register r_temp_3     = R24;
  1970   Register r_temp_4     = R25;
  1971   Register r_temp_5     = R26;
  1972   Register r_temp_6     = R27;
  1973   Register r_return_pc  = R28;
  1975   Register r_carg1_jnienv        = noreg;
  1976   Register r_carg2_classorobject = noreg;
  1977   if (!is_critical_native) {
  1978     r_carg1_jnienv        = out_regs[0].first()->as_Register();
  1979     r_carg2_classorobject = out_regs[1].first()->as_Register();
  1983   // Generate the Unverified Entry Point (UEP).
  1984   // --------------------------------------------------------------------------
  1985   assert(start_pc == (intptr_t)__ pc(), "uep must be at start");
  1987   // Check ic: object class == cached class?
  1988   if (!method_is_static) {
  1989   Register ic = as_Register(Matcher::inline_cache_reg_encode());
  1990   Register receiver_klass = r_temp_1;
  1992   __ cmpdi(CCR0, R3_ARG1, 0);
  1993   __ beq(CCR0, ic_miss);
  1994   __ verify_oop(R3_ARG1);
  1995   __ load_klass(receiver_klass, R3_ARG1);
  1997   __ cmpd(CCR0, receiver_klass, ic);
  1998   __ bne(CCR0, ic_miss);
  2002   // Generate the Verified Entry Point (VEP).
  2003   // --------------------------------------------------------------------------
  2004   vep_start_pc = (intptr_t)__ pc();
  2006   __ save_LR_CR(r_temp_1);
  2007   __ generate_stack_overflow_check(frame_size_in_bytes); // Check before creating frame.
  2008   __ mr(r_callers_sp, R1_SP);                       // Remember frame pointer.
  2009   __ push_frame(frame_size_in_bytes, r_temp_1);          // Push the c2n adapter's frame.
  2010   frame_done_pc = (intptr_t)__ pc();
  2012   // Native nmethod wrappers never take possesion of the oop arguments.
  2013   // So the caller will gc the arguments.
  2014   // The only thing we need an oopMap for is if the call is static.
  2015   //
  2016   // An OopMap for lock (and class if static), and one for the VM call itself.
  2017   OopMapSet *oop_maps = new OopMapSet();
  2018   OopMap    *oop_map  = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
  2020   if (is_critical_native) {
  2021     check_needs_gc_for_critical_native(masm, stack_slots, total_in_args, oop_handle_slot_offset, oop_maps, in_regs, in_sig_bt, r_temp_1);
  2024   // Move arguments from register/stack to register/stack.
  2025   // --------------------------------------------------------------------------
  2026   //
  2027   // We immediately shuffle the arguments so that for any vm call we have
  2028   // to make from here on out (sync slow path, jvmti, etc.) we will have
  2029   // captured the oops from our caller and have a valid oopMap for them.
  2030   //
  2031   // Natives require 1 or 2 extra arguments over the normal ones: the JNIEnv*
  2032   // (derived from JavaThread* which is in R16_thread) and, if static,
  2033   // the class mirror instead of a receiver. This pretty much guarantees that
  2034   // register layout will not match. We ignore these extra arguments during
  2035   // the shuffle. The shuffle is described by the two calling convention
  2036   // vectors we have in our possession. We simply walk the java vector to
  2037   // get the source locations and the c vector to get the destinations.
  2039   // Record sp-based slot for receiver on stack for non-static methods.
  2040   int receiver_offset = -1;
  2042   // We move the arguments backward because the floating point registers
  2043   // destination will always be to a register with a greater or equal
  2044   // register number or the stack.
  2045   //   in  is the index of the incoming Java arguments
  2046   //   out is the index of the outgoing C arguments
  2048 #ifdef ASSERT
  2049   bool reg_destroyed[RegisterImpl::number_of_registers];
  2050   bool freg_destroyed[FloatRegisterImpl::number_of_registers];
  2051   for (int r = 0 ; r < RegisterImpl::number_of_registers ; r++) {
  2052     reg_destroyed[r] = false;
  2054   for (int f = 0 ; f < FloatRegisterImpl::number_of_registers ; f++) {
  2055     freg_destroyed[f] = false;
  2057 #endif // ASSERT
  2059   for (int in = total_in_args - 1, out = total_c_args - 1; in >= 0 ; in--, out--) {
  2061 #ifdef ASSERT
  2062     if (in_regs[in].first()->is_Register()) {
  2063       assert(!reg_destroyed[in_regs[in].first()->as_Register()->encoding()], "ack!");
  2064     } else if (in_regs[in].first()->is_FloatRegister()) {
  2065       assert(!freg_destroyed[in_regs[in].first()->as_FloatRegister()->encoding()], "ack!");
  2067     if (out_regs[out].first()->is_Register()) {
  2068       reg_destroyed[out_regs[out].first()->as_Register()->encoding()] = true;
  2069     } else if (out_regs[out].first()->is_FloatRegister()) {
  2070       freg_destroyed[out_regs[out].first()->as_FloatRegister()->encoding()] = true;
  2072     if (out_regs2[out].first()->is_Register()) {
  2073       reg_destroyed[out_regs2[out].first()->as_Register()->encoding()] = true;
  2074     } else if (out_regs2[out].first()->is_FloatRegister()) {
  2075       freg_destroyed[out_regs2[out].first()->as_FloatRegister()->encoding()] = true;
  2077 #endif // ASSERT
  2079     switch (in_sig_bt[in]) {
  2080       case T_BOOLEAN:
  2081       case T_CHAR:
  2082       case T_BYTE:
  2083       case T_SHORT:
  2084       case T_INT:
  2085         guarantee(in > 0 && in_sig_bt[in-1] == T_LONG,
  2086                   "expecting type (T_LONG,bt) for bt in {T_BOOLEAN, T_CHAR, T_BYTE, T_SHORT, T_INT}");
  2087         break;
  2088       case T_LONG:
  2089         if (in_sig_bt[in+1] == T_VOID) {
  2090           long_move(masm, in_regs[in], out_regs[out], r_callers_sp, r_temp_1);
  2091         } else {
  2092           guarantee(in_sig_bt[in+1] == T_BOOLEAN || in_sig_bt[in+1] == T_CHAR  ||
  2093                     in_sig_bt[in+1] == T_BYTE    || in_sig_bt[in+1] == T_SHORT ||
  2094                     in_sig_bt[in+1] == T_INT,
  2095                  "expecting type (T_LONG,bt) for bt in {T_BOOLEAN, T_CHAR, T_BYTE, T_SHORT, T_INT}");
  2096           int_move(masm, in_regs[in], out_regs[out], r_callers_sp, r_temp_1);
  2098         break;
  2099       case T_ARRAY:
  2100         if (is_critical_native) {
  2101           int body_arg = out;
  2102           out -= 2; // Point to length arg. PPC64: pass ints as longs.
  2103           unpack_array_argument(masm, in_regs[in], in_elem_bt[in], out_regs[body_arg], out_regs[out],
  2104                                 r_callers_sp, r_temp_1, r_temp_2);
  2105           break;
  2107       case T_OBJECT:
  2108         assert(!is_critical_native, "no oop arguments");
  2109         object_move(masm, stack_slots,
  2110                     oop_map, oop_handle_slot_offset,
  2111                     ((in == 0) && (!method_is_static)), &receiver_offset,
  2112                     in_regs[in], out_regs[out],
  2113                     r_callers_sp, r_temp_1, r_temp_2);
  2114         break;
  2115       case T_VOID:
  2116         break;
  2117       case T_FLOAT:
  2118         float_move(masm, in_regs[in], out_regs[out], r_callers_sp, r_temp_1);
  2119         if (out_regs2[out].first()->is_valid()) {
  2120           float_move(masm, in_regs[in], out_regs2[out], r_callers_sp, r_temp_1);
  2122         break;
  2123       case T_DOUBLE:
  2124         double_move(masm, in_regs[in], out_regs[out], r_callers_sp, r_temp_1);
  2125         if (out_regs2[out].first()->is_valid()) {
  2126           double_move(masm, in_regs[in], out_regs2[out], r_callers_sp, r_temp_1);
  2128         break;
  2129       case T_ADDRESS:
  2130         fatal("found type (T_ADDRESS) in java args");
  2131         break;
  2132       default:
  2133         ShouldNotReachHere();
  2134         break;
  2138   // Pre-load a static method's oop into ARG2.
  2139   // Used both by locking code and the normal JNI call code.
  2140   if (method_is_static && !is_critical_native) {
  2141     __ set_oop_constant(JNIHandles::make_local(method->method_holder()->java_mirror()),
  2142                         r_carg2_classorobject);
  2144     // Now handlize the static class mirror in carg2. It's known not-null.
  2145     __ std(r_carg2_classorobject, klass_offset, R1_SP);
  2146     oop_map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
  2147     __ addi(r_carg2_classorobject, R1_SP, klass_offset);
  2150   // Get JNIEnv* which is first argument to native.
  2151   if (!is_critical_native) {
  2152     __ addi(r_carg1_jnienv, R16_thread, in_bytes(JavaThread::jni_environment_offset()));
  2155   // NOTE:
  2156   //
  2157   // We have all of the arguments setup at this point.
  2158   // We MUST NOT touch any outgoing regs from this point on.
  2159   // So if we must call out we must push a new frame.
  2161   // Get current pc for oopmap, and load it patchable relative to global toc.
  2162   oopmap_pc = (intptr_t) __ pc();
  2163   __ calculate_address_from_global_toc(r_return_pc, (address)oopmap_pc, true, true, true, true);
  2165   // We use the same pc/oopMap repeatedly when we call out.
  2166   oop_maps->add_gc_map(oopmap_pc - start_pc, oop_map);
  2168   // r_return_pc now has the pc loaded that we will use when we finally call
  2169   // to native.
  2171   // Make sure that thread is non-volatile; it crosses a bunch of VM calls below.
  2172   assert(R16_thread->is_nonvolatile(), "thread must be in non-volatile register");
  2175 # if 0
  2176   // DTrace method entry
  2177 # endif
  2179   // Lock a synchronized method.
  2180   // --------------------------------------------------------------------------
  2182   if (method->is_synchronized()) {
  2183     assert(!is_critical_native, "unhandled");
  2184     ConditionRegister r_flag = CCR1;
  2185     Register          r_oop  = r_temp_4;
  2186     const Register    r_box  = r_temp_5;
  2187     Label             done, locked;
  2189     // Load the oop for the object or class. r_carg2_classorobject contains
  2190     // either the handlized oop from the incoming arguments or the handlized
  2191     // class mirror (if the method is static).
  2192     __ ld(r_oop, 0, r_carg2_classorobject);
  2194     // Get the lock box slot's address.
  2195     __ addi(r_box, R1_SP, lock_offset);
  2197 #   ifdef ASSERT
  2198     if (UseBiasedLocking) {
  2199       // Making the box point to itself will make it clear it went unused
  2200       // but also be obviously invalid.
  2201       __ std(r_box, 0, r_box);
  2203 #   endif // ASSERT
  2205     // Try fastpath for locking.
  2206     // fast_lock kills r_temp_1, r_temp_2, r_temp_3.
  2207     __ compiler_fast_lock_object(r_flag, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3);
  2208     __ beq(r_flag, locked);
  2210     // None of the above fast optimizations worked so we have to get into the
  2211     // slow case of monitor enter. Inline a special case of call_VM that
  2212     // disallows any pending_exception.
  2214     // Save argument registers and leave room for C-compatible ABI_112.
  2215     int frame_size = frame::abi_112_size +
  2216                      round_to(total_c_args * wordSize, frame::alignment_in_bytes);
  2217     __ mr(R11_scratch1, R1_SP);
  2218     RegisterSaver::push_frame_and_save_argument_registers(masm, R12_scratch2, frame_size, total_c_args, out_regs, out_regs2);
  2220     // Do the call.
  2221     __ set_last_Java_frame(R11_scratch1, r_return_pc);
  2222     assert(r_return_pc->is_nonvolatile(), "expecting return pc to be in non-volatile register");
  2223     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), r_oop, r_box, R16_thread);
  2224     __ reset_last_Java_frame();
  2226     RegisterSaver::restore_argument_registers_and_pop_frame(masm, frame_size, total_c_args, out_regs, out_regs2);
  2228     __ asm_assert_mem8_is_zero(thread_(pending_exception),
  2229        "no pending exception allowed on exit from SharedRuntime::complete_monitor_locking_C", 0);
  2231     __ bind(locked);
  2235   // Publish thread state
  2236   // --------------------------------------------------------------------------
  2238   // Use that pc we placed in r_return_pc a while back as the current frame anchor.
  2239   __ set_last_Java_frame(R1_SP, r_return_pc);
  2241   // Transition from _thread_in_Java to _thread_in_native.
  2242   __ li(R0, _thread_in_native);
  2243   __ release();
  2244   // TODO: PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size");
  2245   __ stw(R0, thread_(thread_state));
  2246   if (UseMembar) {
  2247     __ fence();
  2251   // The JNI call
  2252   // --------------------------------------------------------------------------
  2254   FunctionDescriptor* fd_native_method = (FunctionDescriptor*) native_func;
  2255   __ call_c(fd_native_method, relocInfo::runtime_call_type);
  2258   // Now, we are back from the native code.
  2261   // Unpack the native result.
  2262   // --------------------------------------------------------------------------
  2264   // For int-types, we do any needed sign-extension required.
  2265   // Care must be taken that the return values (R3_RET and F1_RET)
  2266   // will survive any VM calls for blocking or unlocking.
  2267   // An OOP result (handle) is done specially in the slow-path code.
  2269   switch (ret_type) {
  2270     case T_VOID:    break;        // Nothing to do!
  2271     case T_FLOAT:   break;        // Got it where we want it (unless slow-path).
  2272     case T_DOUBLE:  break;        // Got it where we want it (unless slow-path).
  2273     case T_LONG:    break;        // Got it where we want it (unless slow-path).
  2274     case T_OBJECT:  break;        // Really a handle.
  2275                                   // Cannot de-handlize until after reclaiming jvm_lock.
  2276     case T_ARRAY:   break;
  2278     case T_BOOLEAN: {             // 0 -> false(0); !0 -> true(1)
  2279       Label skip_modify;
  2280       __ cmpwi(CCR0, R3_RET, 0);
  2281       __ beq(CCR0, skip_modify);
  2282       __ li(R3_RET, 1);
  2283       __ bind(skip_modify);
  2284       break;
  2286     case T_BYTE: {                // sign extension
  2287       __ extsb(R3_RET, R3_RET);
  2288       break;
  2290     case T_CHAR: {                // unsigned result
  2291       __ andi(R3_RET, R3_RET, 0xffff);
  2292       break;
  2294     case T_SHORT: {               // sign extension
  2295       __ extsh(R3_RET, R3_RET);
  2296       break;
  2298     case T_INT:                   // nothing to do
  2299       break;
  2300     default:
  2301       ShouldNotReachHere();
  2302       break;
  2306   // Publish thread state
  2307   // --------------------------------------------------------------------------
  2309   // Switch thread to "native transition" state before reading the
  2310   // synchronization state. This additional state is necessary because reading
  2311   // and testing the synchronization state is not atomic w.r.t. GC, as this
  2312   // scenario demonstrates:
  2313   //   - Java thread A, in _thread_in_native state, loads _not_synchronized
  2314   //     and is preempted.
  2315   //   - VM thread changes sync state to synchronizing and suspends threads
  2316   //     for GC.
  2317   //   - Thread A is resumed to finish this native method, but doesn't block
  2318   //     here since it didn't see any synchronization in progress, and escapes.
  2320   // Transition from _thread_in_native to _thread_in_native_trans.
  2321   __ li(R0, _thread_in_native_trans);
  2322   __ release();
  2323   // TODO: PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size");
  2324   __ stw(R0, thread_(thread_state));
  2327   // Must we block?
  2328   // --------------------------------------------------------------------------
  2330   // Block, if necessary, before resuming in _thread_in_Java state.
  2331   // In order for GC to work, don't clear the last_Java_sp until after blocking.
  2332   Label after_transition;
  2334     Label no_block, sync;
  2336     if (os::is_MP()) {
  2337       if (UseMembar) {
  2338         // Force this write out before the read below.
  2339         __ fence();
  2340       } else {
  2341         // Write serialization page so VM thread can do a pseudo remote membar.
  2342         // We use the current thread pointer to calculate a thread specific
  2343         // offset to write to within the page. This minimizes bus traffic
  2344         // due to cache line collision.
  2345         __ serialize_memory(R16_thread, r_temp_4, r_temp_5);
  2349     Register sync_state_addr = r_temp_4;
  2350     Register sync_state      = r_temp_5;
  2351     Register suspend_flags   = r_temp_6;
  2353     __ load_const(sync_state_addr, SafepointSynchronize::address_of_state(), /*temp*/ sync_state);
  2355     // TODO: PPC port assert(4 == SafepointSynchronize::sz_state(), "unexpected field size");
  2356     __ lwz(sync_state, 0, sync_state_addr);
  2358     // TODO: PPC port assert(4 == Thread::sz_suspend_flags(), "unexpected field size");
  2359     __ lwz(suspend_flags, thread_(suspend_flags));
  2361     __ acquire();
  2363     Label do_safepoint;
  2364     // No synchronization in progress nor yet synchronized.
  2365     __ cmpwi(CCR0, sync_state, SafepointSynchronize::_not_synchronized);
  2366     // Not suspended.
  2367     __ cmpwi(CCR1, suspend_flags, 0);
  2369     __ bne(CCR0, sync);
  2370     __ beq(CCR1, no_block);
  2372     // Block. Save any potential method result value before the operation and
  2373     // use a leaf call to leave the last_Java_frame setup undisturbed. Doing this
  2374     // lets us share the oopMap we used when we went native rather than create
  2375     // a distinct one for this pc.
  2376     __ bind(sync);
  2378     address entry_point = is_critical_native
  2379       ? CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans_and_transition)
  2380       : CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans);
  2381     save_native_result(masm, ret_type, workspace_slot_offset);
  2382     __ call_VM_leaf(entry_point, R16_thread);
  2383     restore_native_result(masm, ret_type, workspace_slot_offset);
  2385     if (is_critical_native) {
  2386       __ b(after_transition); // No thread state transition here.
  2388     __ bind(no_block);
  2391   // Publish thread state.
  2392   // --------------------------------------------------------------------------
  2394   // Thread state is thread_in_native_trans. Any safepoint blocking has
  2395   // already happened so we can now change state to _thread_in_Java.
  2397   // Transition from _thread_in_native_trans to _thread_in_Java.
  2398   __ li(R0, _thread_in_Java);
  2399   __ release();
  2400   // TODO: PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size");
  2401   __ stw(R0, thread_(thread_state));
  2402   if (UseMembar) {
  2403     __ fence();
  2405   __ bind(after_transition);
  2407   // Reguard any pages if necessary.
  2408   // --------------------------------------------------------------------------
  2410   Label no_reguard;
  2411   __ lwz(r_temp_1, thread_(stack_guard_state));
  2412   __ cmpwi(CCR0, r_temp_1, JavaThread::stack_guard_yellow_disabled);
  2413   __ bne(CCR0, no_reguard);
  2415   save_native_result(masm, ret_type, workspace_slot_offset);
  2416   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages));
  2417   restore_native_result(masm, ret_type, workspace_slot_offset);
  2419   __ bind(no_reguard);
  2422   // Unlock
  2423   // --------------------------------------------------------------------------
  2425   if (method->is_synchronized()) {
  2427     ConditionRegister r_flag   = CCR1;
  2428     const Register r_oop       = r_temp_4;
  2429     const Register r_box       = r_temp_5;
  2430     const Register r_exception = r_temp_6;
  2431     Label done;
  2433     // Get oop and address of lock object box.
  2434     if (method_is_static) {
  2435       assert(klass_offset != -1, "");
  2436       __ ld(r_oop, klass_offset, R1_SP);
  2437     } else {
  2438       assert(receiver_offset != -1, "");
  2439       __ ld(r_oop, receiver_offset, R1_SP);
  2441     __ addi(r_box, R1_SP, lock_offset);
  2443     // Try fastpath for unlocking.
  2444     __ compiler_fast_unlock_object(r_flag, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3);
  2445     __ beq(r_flag, done);
  2447     // Save and restore any potential method result value around the unlocking operation.
  2448     save_native_result(masm, ret_type, workspace_slot_offset);
  2450     // Must save pending exception around the slow-path VM call. Since it's a
  2451     // leaf call, the pending exception (if any) can be kept in a register.
  2452     __ ld(r_exception, thread_(pending_exception));
  2453     assert(r_exception->is_nonvolatile(), "exception register must be non-volatile");
  2454     __ li(R0, 0);
  2455     __ std(R0, thread_(pending_exception));
  2457     // Slow case of monitor enter.
  2458     // Inline a special case of call_VM that disallows any pending_exception.
  2459     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C), r_oop, r_box);
  2461     __ asm_assert_mem8_is_zero(thread_(pending_exception),
  2462        "no pending exception allowed on exit from SharedRuntime::complete_monitor_unlocking_C", 0);
  2464     restore_native_result(masm, ret_type, workspace_slot_offset);
  2466     // Check_forward_pending_exception jump to forward_exception if any pending
  2467     // exception is set. The forward_exception routine expects to see the
  2468     // exception in pending_exception and not in a register. Kind of clumsy,
  2469     // since all folks who branch to forward_exception must have tested
  2470     // pending_exception first and hence have it in a register already.
  2471     __ std(r_exception, thread_(pending_exception));
  2473     __ bind(done);
  2476 # if 0
  2477   // DTrace method exit
  2478 # endif
  2480   // Clear "last Java frame" SP and PC.
  2481   // --------------------------------------------------------------------------
  2483   __ reset_last_Java_frame();
  2485   // Unpack oop result.
  2486   // --------------------------------------------------------------------------
  2488   if (ret_type == T_OBJECT || ret_type == T_ARRAY) {
  2489     Label skip_unboxing;
  2490     __ cmpdi(CCR0, R3_RET, 0);
  2491     __ beq(CCR0, skip_unboxing);
  2492     __ ld(R3_RET, 0, R3_RET);
  2493     __ bind(skip_unboxing);
  2494     __ verify_oop(R3_RET);
  2498   // Reset handle block.
  2499   // --------------------------------------------------------------------------
  2500   if (!is_critical_native) {
  2501   __ ld(r_temp_1, thread_(active_handles));
  2502   // TODO: PPC port assert(4 == JNIHandleBlock::top_size_in_bytes(), "unexpected field size");
  2503   __ li(r_temp_2, 0);
  2504   __ stw(r_temp_2, JNIHandleBlock::top_offset_in_bytes(), r_temp_1);
  2507   // Check for pending exceptions.
  2508   // --------------------------------------------------------------------------
  2509   __ ld(r_temp_2, thread_(pending_exception));
  2510   __ cmpdi(CCR0, r_temp_2, 0);
  2511   __ bne(CCR0, handle_pending_exception);
  2514   // Return
  2515   // --------------------------------------------------------------------------
  2517   __ pop_frame();
  2518   __ restore_LR_CR(R11);
  2519   __ blr();
  2522   // Handler for pending exceptions (out-of-line).
  2523   // --------------------------------------------------------------------------
  2525   // Since this is a native call, we know the proper exception handler
  2526   // is the empty function. We just pop this frame and then jump to
  2527   // forward_exception_entry.
  2528   if (!is_critical_native) {
  2529   __ align(InteriorEntryAlignment);
  2530   __ bind(handle_pending_exception);
  2532   __ pop_frame();
  2533   __ restore_LR_CR(R11);
  2534   __ b64_patchable((address)StubRoutines::forward_exception_entry(),
  2535                        relocInfo::runtime_call_type);
  2538   // Handler for a cache miss (out-of-line).
  2539   // --------------------------------------------------------------------------
  2541   if (!method_is_static) {
  2542   __ align(InteriorEntryAlignment);
  2543   __ bind(ic_miss);
  2545   __ b64_patchable((address)SharedRuntime::get_ic_miss_stub(),
  2546                        relocInfo::runtime_call_type);
  2549   // Done.
  2550   // --------------------------------------------------------------------------
  2552   __ flush();
  2554   nmethod *nm = nmethod::new_native_nmethod(method,
  2555                                             compile_id,
  2556                                             masm->code(),
  2557                                             vep_start_pc-start_pc,
  2558                                             frame_done_pc-start_pc,
  2559                                             stack_slots / VMRegImpl::slots_per_word,
  2560                                             (method_is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
  2561                                             in_ByteSize(lock_offset),
  2562                                             oop_maps);
  2564   if (is_critical_native) {
  2565     nm->set_lazy_critical_native(true);
  2568   return nm;
  2569 #else
  2570   ShouldNotReachHere();
  2571   return NULL;
  2572 #endif // COMPILER2
  2575 // This function returns the adjust size (in number of words) to a c2i adapter
  2576 // activation for use during deoptimization.
  2577 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) {
  2578   return round_to((callee_locals - callee_parameters) * Interpreter::stackElementWords, frame::alignment_in_bytes);
  2581 uint SharedRuntime::out_preserve_stack_slots() {
  2582 #ifdef COMPILER2
  2583   return frame::jit_out_preserve_size / VMRegImpl::stack_slot_size;
  2584 #else
  2585   return 0;
  2586 #endif
  2589 #ifdef COMPILER2
  2590 // Frame generation for deopt and uncommon trap blobs.
  2591 static void push_skeleton_frame(MacroAssembler* masm, bool deopt,
  2592                                 /* Read */
  2593                                 Register unroll_block_reg,
  2594                                 /* Update */
  2595                                 Register frame_sizes_reg,
  2596                                 Register number_of_frames_reg,
  2597                                 Register pcs_reg,
  2598                                 /* Invalidate */
  2599                                 Register frame_size_reg,
  2600                                 Register pc_reg) {
  2602   __ ld(pc_reg, 0, pcs_reg);
  2603   __ ld(frame_size_reg, 0, frame_sizes_reg);
  2604   __ std(pc_reg, _abi(lr), R1_SP);
  2605   __ push_frame(frame_size_reg, R0/*tmp*/);
  2606 #ifdef CC_INTERP
  2607   __ std(R1_SP, _parent_ijava_frame_abi(initial_caller_sp), R1_SP);
  2608 #else
  2609   Unimplemented();
  2610 #endif
  2611   __ addi(number_of_frames_reg, number_of_frames_reg, -1);
  2612   __ addi(frame_sizes_reg, frame_sizes_reg, wordSize);
  2613   __ addi(pcs_reg, pcs_reg, wordSize);
  2616 // Loop through the UnrollBlock info and create new frames.
  2617 static void push_skeleton_frames(MacroAssembler* masm, bool deopt,
  2618                                  /* read */
  2619                                  Register unroll_block_reg,
  2620                                  /* invalidate */
  2621                                  Register frame_sizes_reg,
  2622                                  Register number_of_frames_reg,
  2623                                  Register pcs_reg,
  2624                                  Register frame_size_reg,
  2625                                  Register pc_reg) {
  2626   Label loop;
  2628  // _number_of_frames is of type int (deoptimization.hpp)
  2629   __ lwa(number_of_frames_reg,
  2630              Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes(),
  2631              unroll_block_reg);
  2632   __ ld(pcs_reg,
  2633             Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes(),
  2634             unroll_block_reg);
  2635   __ ld(frame_sizes_reg,
  2636             Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes(),
  2637             unroll_block_reg);
  2639   // stack: (caller_of_deoptee, ...).
  2641   // At this point we either have an interpreter frame or a compiled
  2642   // frame on top of stack. If it is a compiled frame we push a new c2i
  2643   // adapter here
  2645   // Memorize top-frame stack-pointer.
  2646   __ mr(frame_size_reg/*old_sp*/, R1_SP);
  2648   // Resize interpreter top frame OR C2I adapter.
  2650   // At this moment, the top frame (which is the caller of the deoptee) is
  2651   // an interpreter frame or a newly pushed C2I adapter or an entry frame.
  2652   // The top frame has a TOP_IJAVA_FRAME_ABI and the frame contains the
  2653   // outgoing arguments.
  2654   //
  2655   // In order to push the interpreter frame for the deoptee, we need to
  2656   // resize the top frame such that we are able to place the deoptee's
  2657   // locals in the frame.
  2658   // Additionally, we have to turn the top frame's TOP_IJAVA_FRAME_ABI
  2659   // into a valid PARENT_IJAVA_FRAME_ABI.
  2661   __ lwa(R11_scratch1,
  2662              Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes(),
  2663              unroll_block_reg);
  2664   __ neg(R11_scratch1, R11_scratch1);
  2666   // R11_scratch1 contains size of locals for frame resizing.
  2667   // R12_scratch2 contains top frame's lr.
  2669   // Resize frame by complete frame size prevents TOC from being
  2670   // overwritten by locals. A more stack space saving way would be
  2671   // to copy the TOC to its location in the new abi.
  2672   __ addi(R11_scratch1, R11_scratch1, - frame::parent_ijava_frame_abi_size);
  2674   // now, resize the frame
  2675   __ resize_frame(R11_scratch1, pc_reg/*tmp*/);
  2677   // In the case where we have resized a c2i frame above, the optional
  2678   // alignment below the locals has size 32 (why?).
  2679   __ std(R12_scratch2, _abi(lr), R1_SP);
  2681   // Initialize initial_caller_sp.
  2682   __ std(frame_size_reg/*old_sp*/, _parent_ijava_frame_abi(initial_caller_sp), R1_SP);
  2684 #ifdef ASSERT
  2685   // Make sure that there is at least one entry in the array.
  2686   __ cmpdi(CCR0, number_of_frames_reg, 0);
  2687   __ asm_assert_ne("array_size must be > 0", 0x205);
  2688 #endif
  2690   // Now push the new interpreter frames.
  2691   //
  2692   __ bind(loop);
  2693   // Allocate a new frame, fill in the pc.
  2694   push_skeleton_frame(masm, deopt,
  2695                       unroll_block_reg,
  2696                       frame_sizes_reg,
  2697                       number_of_frames_reg,
  2698                       pcs_reg,
  2699                       frame_size_reg,
  2700                       pc_reg);
  2701   __ cmpdi(CCR0, number_of_frames_reg, 0);
  2702   __ bne(CCR0, loop);
  2704   // Get the return address pointing into the frame manager.
  2705   __ ld(R0, 0, pcs_reg);
  2706   // Store it in the top interpreter frame.
  2707   __ std(R0, _abi(lr), R1_SP);
  2708   // Initialize frame_manager_lr of interpreter top frame.
  2709 #ifdef CC_INTERP
  2710   __ std(R0, _top_ijava_frame_abi(frame_manager_lr), R1_SP);
  2711 #endif
  2713 #endif
  2715 void SharedRuntime::generate_deopt_blob() {
  2716   // Allocate space for the code
  2717   ResourceMark rm;
  2718   // Setup code generation tools
  2719   CodeBuffer buffer("deopt_blob", 2048, 1024);
  2720   InterpreterMacroAssembler* masm = new InterpreterMacroAssembler(&buffer);
  2721   Label exec_mode_initialized;
  2722   int frame_size_in_words;
  2723   OopMap* map = NULL;
  2724   OopMapSet *oop_maps = new OopMapSet();
  2726   // size of ABI112 plus spill slots for R3_RET and F1_RET.
  2727   const int frame_size_in_bytes = frame::abi_112_spill_size;
  2728   const int frame_size_in_slots = frame_size_in_bytes / sizeof(jint);
  2729   int first_frame_size_in_bytes = 0; // frame size of "unpack frame" for call to fetch_unroll_info.
  2731   const Register exec_mode_reg = R21_tmp1;
  2733   const address start = __ pc();
  2735 #ifdef COMPILER2
  2736   // --------------------------------------------------------------------------
  2737   // Prolog for non exception case!
  2739   // We have been called from the deopt handler of the deoptee.
  2740   //
  2741   // deoptee:
  2742   //                      ...
  2743   //                      call X
  2744   //                      ...
  2745   //  deopt_handler:      call_deopt_stub
  2746   //  cur. return pc  --> ...
  2747   //
  2748   // So currently SR_LR points behind the call in the deopt handler.
  2749   // We adjust it such that it points to the start of the deopt handler.
  2750   // The return_pc has been stored in the frame of the deoptee and
  2751   // will replace the address of the deopt_handler in the call
  2752   // to Deoptimization::fetch_unroll_info below.
  2753   // We can't grab a free register here, because all registers may
  2754   // contain live values, so let the RegisterSaver do the adjustment
  2755   // of the return pc.
  2756   const int return_pc_adjustment_no_exception = -size_deopt_handler();
  2758   // Push the "unpack frame"
  2759   // Save everything in sight.
  2760   map = RegisterSaver::push_frame_abi112_and_save_live_registers(masm,
  2761                                                                  &first_frame_size_in_bytes,
  2762                                                                  /*generate_oop_map=*/ true,
  2763                                                                  return_pc_adjustment_no_exception,
  2764                                                                  RegisterSaver::return_pc_is_lr);
  2765   assert(map != NULL, "OopMap must have been created");
  2767   __ li(exec_mode_reg, Deoptimization::Unpack_deopt);
  2768   // Save exec mode for unpack_frames.
  2769   __ b(exec_mode_initialized);
  2771   // --------------------------------------------------------------------------
  2772   // Prolog for exception case
  2774   // An exception is pending.
  2775   // We have been called with a return (interpreter) or a jump (exception blob).
  2776   //
  2777   // - R3_ARG1: exception oop
  2778   // - R4_ARG2: exception pc
  2780   int exception_offset = __ pc() - start;
  2782   BLOCK_COMMENT("Prolog for exception case");
  2784   // The RegisterSaves doesn't need to adjust the return pc for this situation.
  2785   const int return_pc_adjustment_exception = 0;
  2787   // Push the "unpack frame".
  2788   // Save everything in sight.
  2789   assert(R4 == R4_ARG2, "exception pc must be in r4");
  2790   RegisterSaver::push_frame_abi112_and_save_live_registers(masm,
  2791                                                            &first_frame_size_in_bytes,
  2792                                                            /*generate_oop_map=*/ false,
  2793                                                            return_pc_adjustment_exception,
  2794                                                            RegisterSaver::return_pc_is_r4);
  2796   // Deopt during an exception. Save exec mode for unpack_frames.
  2797   __ li(exec_mode_reg, Deoptimization::Unpack_exception);
  2799   // Store exception oop and pc in thread (location known to GC).
  2800   // This is needed since the call to "fetch_unroll_info()" may safepoint.
  2801   __ std(R3_ARG1, in_bytes(JavaThread::exception_oop_offset()), R16_thread);
  2802   __ std(R4_ARG2, in_bytes(JavaThread::exception_pc_offset()),  R16_thread);
  2804   // fall through
  2806   // --------------------------------------------------------------------------
  2807   __ BIND(exec_mode_initialized);
  2810   const Register unroll_block_reg = R22_tmp2;
  2812   // We need to set `last_Java_frame' because `fetch_unroll_info' will
  2813   // call `last_Java_frame()'. The value of the pc in the frame is not
  2814   // particularly important. It just needs to identify this blob.
  2815   __ set_last_Java_frame(R1_SP, noreg);
  2817   // With EscapeAnalysis turned on, this call may safepoint!
  2818   __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info), R16_thread);
  2819   address calls_return_pc = __ last_calls_return_pc();
  2820   // Set an oopmap for the call site that describes all our saved registers.
  2821   oop_maps->add_gc_map(calls_return_pc - start, map);
  2823   __ reset_last_Java_frame();
  2824   // Save the return value.
  2825   __ mr(unroll_block_reg, R3_RET);
  2827   // Restore only the result registers that have been saved
  2828   // by save_volatile_registers(...).
  2829   RegisterSaver::restore_result_registers(masm, first_frame_size_in_bytes);
  2831   // In excp_deopt_mode, restore and clear exception oop which we
  2832   // stored in the thread during exception entry above. The exception
  2833   // oop will be the return value of this stub.
  2834   Label skip_restore_excp;
  2835   __ cmpdi(CCR0, exec_mode_reg, Deoptimization::Unpack_exception);
  2836   __ bne(CCR0, skip_restore_excp);
  2837   __ ld(R3_RET, in_bytes(JavaThread::exception_oop_offset()), R16_thread);
  2838   __ ld(R4_ARG2, in_bytes(JavaThread::exception_pc_offset()), R16_thread);
  2839   __ li(R0, 0);
  2840   __ std(R0, in_bytes(JavaThread::exception_pc_offset()),  R16_thread);
  2841   __ std(R0, in_bytes(JavaThread::exception_oop_offset()), R16_thread);
  2842   __ BIND(skip_restore_excp);
  2844   // reload narrro_oop_base
  2845   if (UseCompressedOops && Universe::narrow_oop_base() != 0) {
  2846     __ load_const_optimized(R30, Universe::narrow_oop_base());
  2849   __ pop_frame();
  2851   // stack: (deoptee, optional i2c, caller of deoptee, ...).
  2853   // pop the deoptee's frame
  2854   __ pop_frame();
  2856   // stack: (caller_of_deoptee, ...).
  2858   // Loop through the `UnrollBlock' info and create interpreter frames.
  2859   push_skeleton_frames(masm, true/*deopt*/,
  2860                        unroll_block_reg,
  2861                        R23_tmp3,
  2862                        R24_tmp4,
  2863                        R25_tmp5,
  2864                        R26_tmp6,
  2865                        R27_tmp7);
  2867   // stack: (skeletal interpreter frame, ..., optional skeletal
  2868   // interpreter frame, optional c2i, caller of deoptee, ...).
  2871   // push an `unpack_frame' taking care of float / int return values.
  2872   __ push_frame(frame_size_in_bytes, R0/*tmp*/);
  2874   // stack: (unpack frame, skeletal interpreter frame, ..., optional
  2875   // skeletal interpreter frame, optional c2i, caller of deoptee,
  2876   // ...).
  2878   // Spill live volatile registers since we'll do a call.
  2879   __ std( R3_RET,  _abi_112_spill(spill_ret),  R1_SP);
  2880   __ stfd(F1_RET, _abi_112_spill(spill_fret), R1_SP);
  2882   // Let the unpacker layout information in the skeletal frames just
  2883   // allocated.
  2884   __ get_PC_trash_LR(R3_RET);
  2885   __ set_last_Java_frame(/*sp*/R1_SP, /*pc*/R3_RET);
  2886   // This is a call to a LEAF method, so no oop map is required.
  2887   __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames),
  2888                   R16_thread/*thread*/, exec_mode_reg/*exec_mode*/);
  2889   __ reset_last_Java_frame();
  2891   // Restore the volatiles saved above.
  2892   __ ld( R3_RET, _abi_112_spill(spill_ret),  R1_SP);
  2893   __ lfd(F1_RET, _abi_112_spill(spill_fret), R1_SP);
  2895   // Pop the unpack frame.
  2896   __ pop_frame();
  2897   __ restore_LR_CR(R0);
  2899   // stack: (top interpreter frame, ..., optional interpreter frame,
  2900   // optional c2i, caller of deoptee, ...).
  2902   // Initialize R14_state.
  2903   __ ld(R14_state, 0, R1_SP);
  2904   __ addi(R14_state, R14_state, -frame::interpreter_frame_cinterpreterstate_size_in_bytes());
  2905   // Also inititialize R15_prev_state.
  2906   __ restore_prev_state();
  2908   // Return to the interpreter entry point.
  2909   __ blr();
  2910   __ flush();
  2911 #else // COMPILER2
  2912   __ unimplemented("deopt blob needed only with compiler");
  2913   int exception_offset = __ pc() - start;
  2914 #endif // COMPILER2
  2916   _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, 0, first_frame_size_in_bytes / wordSize);
  2919 #ifdef COMPILER2
  2920 void SharedRuntime::generate_uncommon_trap_blob() {
  2921   // Allocate space for the code.
  2922   ResourceMark rm;
  2923   // Setup code generation tools.
  2924   CodeBuffer buffer("uncommon_trap_blob", 2048, 1024);
  2925   InterpreterMacroAssembler* masm = new InterpreterMacroAssembler(&buffer);
  2926   address start = __ pc();
  2928   Register unroll_block_reg = R21_tmp1;
  2929   Register klass_index_reg  = R22_tmp2;
  2930   Register unc_trap_reg     = R23_tmp3;
  2932   OopMapSet* oop_maps = new OopMapSet();
  2933   int frame_size_in_bytes = frame::abi_112_size;
  2934   OopMap* map = new OopMap(frame_size_in_bytes / sizeof(jint), 0);
  2936   // stack: (deoptee, optional i2c, caller_of_deoptee, ...).
  2938   // Push a dummy `unpack_frame' and call
  2939   // `Deoptimization::uncommon_trap' to pack the compiled frame into a
  2940   // vframe array and return the `UnrollBlock' information.
  2942   // Save LR to compiled frame.
  2943   __ save_LR_CR(R11_scratch1);
  2945   // Push an "uncommon_trap" frame.
  2946   __ push_frame_abi112(0, R11_scratch1);
  2948   // stack: (unpack frame, deoptee, optional i2c, caller_of_deoptee, ...).
  2950   // Set the `unpack_frame' as last_Java_frame.
  2951   // `Deoptimization::uncommon_trap' expects it and considers its
  2952   // sender frame as the deoptee frame.
  2953   // Remember the offset of the instruction whose address will be
  2954   // moved to R11_scratch1.
  2955   address gc_map_pc = __ get_PC_trash_LR(R11_scratch1);
  2957   __ set_last_Java_frame(/*sp*/R1_SP, /*pc*/R11_scratch1);
  2959   __ mr(klass_index_reg, R3);
  2960   __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap),
  2961                   R16_thread, klass_index_reg);
  2963   // Set an oopmap for the call site.
  2964   oop_maps->add_gc_map(gc_map_pc - start, map);
  2966   __ reset_last_Java_frame();
  2968   // Pop the `unpack frame'.
  2969   __ pop_frame();
  2971   // stack: (deoptee, optional i2c, caller_of_deoptee, ...).
  2973   // Save the return value.
  2974   __ mr(unroll_block_reg, R3_RET);
  2976   // Pop the uncommon_trap frame.
  2977   __ pop_frame();
  2979   // stack: (caller_of_deoptee, ...).
  2981   // Allocate new interpreter frame(s) and possibly a c2i adapter
  2982   // frame.
  2983   push_skeleton_frames(masm, false/*deopt*/,
  2984                        unroll_block_reg,
  2985                        R22_tmp2,
  2986                        R23_tmp3,
  2987                        R24_tmp4,
  2988                        R25_tmp5,
  2989                        R26_tmp6);
  2991   // stack: (skeletal interpreter frame, ..., optional skeletal
  2992   // interpreter frame, optional c2i, caller of deoptee, ...).
  2994   // Push a dummy `unpack_frame' taking care of float return values.
  2995   // Call `Deoptimization::unpack_frames' to layout information in the
  2996   // interpreter frames just created.
  2998   // Push a simple "unpack frame" here.
  2999   __ push_frame_abi112(0, R11_scratch1);
  3001   // stack: (unpack frame, skeletal interpreter frame, ..., optional
  3002   // skeletal interpreter frame, optional c2i, caller of deoptee,
  3003   // ...).
  3005   // Set the "unpack_frame" as last_Java_frame.
  3006   __ get_PC_trash_LR(R11_scratch1);
  3007   __ set_last_Java_frame(/*sp*/R1_SP, /*pc*/R11_scratch1);
  3009   // Indicate it is the uncommon trap case.
  3010   __ li(unc_trap_reg, Deoptimization::Unpack_uncommon_trap);
  3011   // Let the unpacker layout information in the skeletal frames just
  3012   // allocated.
  3013   __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames),
  3014                   R16_thread, unc_trap_reg);
  3016   __ reset_last_Java_frame();
  3017   // Pop the `unpack frame'.
  3018   __ pop_frame();
  3019   // Restore LR from top interpreter frame.
  3020   __ restore_LR_CR(R11_scratch1);
  3022   // stack: (top interpreter frame, ..., optional interpreter frame,
  3023   // optional c2i, caller of deoptee, ...).
  3025   // Initialize R14_state, ...
  3026   __ ld(R11_scratch1, 0, R1_SP);
  3027   __ addi(R14_state, R11_scratch1, -frame::interpreter_frame_cinterpreterstate_size_in_bytes());
  3028   // also initialize R15_prev_state.
  3029   __ restore_prev_state();
  3030   // Return to the interpreter entry point.
  3031   __ blr();
  3033   masm->flush();
  3035   _uncommon_trap_blob = UncommonTrapBlob::create(&buffer, oop_maps, frame_size_in_bytes/wordSize);
  3037 #endif // COMPILER2
  3039 // Generate a special Compile2Runtime blob that saves all registers, and setup oopmap.
  3040 SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) {
  3041   assert(StubRoutines::forward_exception_entry() != NULL,
  3042          "must be generated before");
  3044   ResourceMark rm;
  3045   OopMapSet *oop_maps = new OopMapSet();
  3046   OopMap* map;
  3048   // Allocate space for the code. Setup code generation tools.
  3049   CodeBuffer buffer("handler_blob", 2048, 1024);
  3050   MacroAssembler* masm = new MacroAssembler(&buffer);
  3052   address start = __ pc();
  3053   int frame_size_in_bytes = 0;
  3055   RegisterSaver::ReturnPCLocation return_pc_location;
  3056   bool cause_return = (poll_type == POLL_AT_RETURN);
  3057   if (cause_return) {
  3058     // Nothing to do here. The frame has already been popped in MachEpilogNode.
  3059     // Register LR already contains the return pc.
  3060     return_pc_location = RegisterSaver::return_pc_is_lr;
  3061   } else {
  3062     // Use thread()->saved_exception_pc() as return pc.
  3063     return_pc_location = RegisterSaver::return_pc_is_thread_saved_exception_pc;
  3066   // Save registers, fpu state, and flags.
  3067   map = RegisterSaver::push_frame_abi112_and_save_live_registers(masm,
  3068                                                                  &frame_size_in_bytes,
  3069                                                                  /*generate_oop_map=*/ true,
  3070                                                                  /*return_pc_adjustment=*/0,
  3071                                                                  return_pc_location);
  3073   // The following is basically a call_VM. However, we need the precise
  3074   // address of the call in order to generate an oopmap. Hence, we do all the
  3075   // work outselves.
  3076   __ set_last_Java_frame(/*sp=*/R1_SP, /*pc=*/noreg);
  3078   // The return address must always be correct so that the frame constructor
  3079   // never sees an invalid pc.
  3081   // Do the call
  3082   __ call_VM_leaf(call_ptr, R16_thread);
  3083   address calls_return_pc = __ last_calls_return_pc();
  3085   // Set an oopmap for the call site. This oopmap will map all
  3086   // oop-registers and debug-info registers as callee-saved. This
  3087   // will allow deoptimization at this safepoint to find all possible
  3088   // debug-info recordings, as well as let GC find all oops.
  3089   oop_maps->add_gc_map(calls_return_pc - start, map);
  3091   Label noException;
  3093   // Clear the last Java frame.
  3094   __ reset_last_Java_frame();
  3096   BLOCK_COMMENT("  Check pending exception.");
  3097   const Register pending_exception = R0;
  3098   __ ld(pending_exception, thread_(pending_exception));
  3099   __ cmpdi(CCR0, pending_exception, 0);
  3100   __ beq(CCR0, noException);
  3102   // Exception pending
  3103   RegisterSaver::restore_live_registers_and_pop_frame(masm,
  3104                                                       frame_size_in_bytes,
  3105                                                       /*restore_ctr=*/true);
  3108   BLOCK_COMMENT("  Jump to forward_exception_entry.");
  3109   // Jump to forward_exception_entry, with the issuing PC in LR
  3110   // so it looks like the original nmethod called forward_exception_entry.
  3111   __ b64_patchable(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type);
  3113   // No exception case.
  3114   __ BIND(noException);
  3117   // Normal exit, restore registers and exit.
  3118   RegisterSaver::restore_live_registers_and_pop_frame(masm,
  3119                                                       frame_size_in_bytes,
  3120                                                       /*restore_ctr=*/true);
  3122   __ blr();
  3124   // Make sure all code is generated
  3125   masm->flush();
  3127   // Fill-out other meta info
  3128   // CodeBlob frame size is in words.
  3129   return SafepointBlob::create(&buffer, oop_maps, frame_size_in_bytes / wordSize);
  3132 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss)
  3133 //
  3134 // Generate a stub that calls into the vm to find out the proper destination
  3135 // of a java call. All the argument registers are live at this point
  3136 // but since this is generic code we don't know what they are and the caller
  3137 // must do any gc of the args.
  3138 //
  3139 RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
  3141   // allocate space for the code
  3142   ResourceMark rm;
  3144   CodeBuffer buffer(name, 1000, 512);
  3145   MacroAssembler* masm = new MacroAssembler(&buffer);
  3147   int frame_size_in_bytes;
  3149   OopMapSet *oop_maps = new OopMapSet();
  3150   OopMap* map = NULL;
  3152   address start = __ pc();
  3154   map = RegisterSaver::push_frame_abi112_and_save_live_registers(masm,
  3155                                                                  &frame_size_in_bytes,
  3156                                                                  /*generate_oop_map*/ true,
  3157                                                                  /*return_pc_adjustment*/ 0,
  3158                                                                  RegisterSaver::return_pc_is_lr);
  3160   // Use noreg as last_Java_pc, the return pc will be reconstructed
  3161   // from the physical frame.
  3162   __ set_last_Java_frame(/*sp*/R1_SP, noreg);
  3164   int frame_complete = __ offset();
  3166   // Pass R19_method as 2nd (optional) argument, used by
  3167   // counter_overflow_stub.
  3168   __ call_VM_leaf(destination, R16_thread, R19_method);
  3169   address calls_return_pc = __ last_calls_return_pc();
  3170   // Set an oopmap for the call site.
  3171   // We need this not only for callee-saved registers, but also for volatile
  3172   // registers that the compiler might be keeping live across a safepoint.
  3173   // Create the oopmap for the call's return pc.
  3174   oop_maps->add_gc_map(calls_return_pc - start, map);
  3176   // R3_RET contains the address we are going to jump to assuming no exception got installed.
  3178   // clear last_Java_sp
  3179   __ reset_last_Java_frame();
  3181   // Check for pending exceptions.
  3182   BLOCK_COMMENT("Check for pending exceptions.");
  3183   Label pending;
  3184   __ ld(R11_scratch1, thread_(pending_exception));
  3185   __ cmpdi(CCR0, R11_scratch1, 0);
  3186   __ bne(CCR0, pending);
  3188   __ mtctr(R3_RET); // Ctr will not be touched by restore_live_registers_and_pop_frame.
  3190   RegisterSaver::restore_live_registers_and_pop_frame(masm, frame_size_in_bytes, /*restore_ctr*/ false);
  3192   // Get the returned methodOop.
  3193   __ get_vm_result_2(R19_method);
  3195   __ bctr();
  3198   // Pending exception after the safepoint.
  3199   __ BIND(pending);
  3201   RegisterSaver::restore_live_registers_and_pop_frame(masm, frame_size_in_bytes, /*restore_ctr*/ true);
  3203   // exception pending => remove activation and forward to exception handler
  3205   __ li(R11_scratch1, 0);
  3206   __ ld(R3_ARG1, thread_(pending_exception));
  3207   __ std(R11_scratch1, in_bytes(JavaThread::vm_result_offset()), R16_thread);
  3208   __ b64_patchable(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type);
  3210   // -------------
  3211   // Make sure all code is generated.
  3212   masm->flush();
  3214   // return the blob
  3215   // frame_size_words or bytes??
  3216   return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_bytes/wordSize,
  3217                                        oop_maps, true);

mercurial