src/cpu/sparc/vm/methodHandles_sparc.cpp

Tue, 12 Oct 2010 23:51:20 -0700

author
iveresov
date
Tue, 12 Oct 2010 23:51:20 -0700
changeset 2203
c393f046f4c5
parent 2201
d55217dc206f
child 2204
5beba6174298
permissions
-rw-r--r--

6991512: G1 barriers fail with 64bit C1
Summary: Fix compare-and-swap intrinsic problem with G1 post-barriers and issue with branch ranges in G1 stubs on sparc
Reviewed-by: never, kvn

     1 /*
     2  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "incls/_precompiled.incl"
    26 #include "incls/_methodHandles_sparc.cpp.incl"
    28 #define __ _masm->
    30 address MethodHandleEntry::start_compiled_entry(MacroAssembler* _masm,
    31                                                 address interpreted_entry) {
    32   // Just before the actual machine code entry point, allocate space
    33   // for a MethodHandleEntry::Data record, so that we can manage everything
    34   // from one base pointer.
    35   __ align(wordSize);
    36   address target = __ pc() + sizeof(Data);
    37   while (__ pc() < target) {
    38     __ nop();
    39     __ align(wordSize);
    40   }
    42   MethodHandleEntry* me = (MethodHandleEntry*) __ pc();
    43   me->set_end_address(__ pc());         // set a temporary end_address
    44   me->set_from_interpreted_entry(interpreted_entry);
    45   me->set_type_checking_entry(NULL);
    47   return (address) me;
    48 }
    50 MethodHandleEntry* MethodHandleEntry::finish_compiled_entry(MacroAssembler* _masm,
    51                                                 address start_addr) {
    52   MethodHandleEntry* me = (MethodHandleEntry*) start_addr;
    53   assert(me->end_address() == start_addr, "valid ME");
    55   // Fill in the real end_address:
    56   __ align(wordSize);
    57   me->set_end_address(__ pc());
    59   return me;
    60 }
    63 // Code generation
    64 address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm) {
    65   // I5_savedSP: sender SP (must preserve)
    66   // G4 (Gargs): incoming argument list (must preserve)
    67   // G5_method:  invoke methodOop; becomes method type.
    68   // G3_method_handle: receiver method handle (must load from sp[MethodTypeForm.vmslots])
    69   // O0, O1: garbage temps, blown away
    70   Register O0_argslot = O0;
    71   Register O1_scratch = O1;
    73   // emit WrongMethodType path first, to enable back-branch from main path
    74   Label wrong_method_type;
    75   __ bind(wrong_method_type);
    76   __ jump_to(AddressLiteral(Interpreter::throw_WrongMethodType_entry()), O1_scratch);
    77   __ delayed()->nop();
    79   // here's where control starts out:
    80   __ align(CodeEntryAlignment);
    81   address entry_point = __ pc();
    83   // fetch the MethodType from the method handle into G5_method_type
    84   {
    85     Register tem = G5_method;
    86     assert(tem == G5_method_type, "yes, it's the same register");
    87     for (jint* pchase = methodOopDesc::method_type_offsets_chain(); (*pchase) != -1; pchase++) {
    88       __ ld_ptr(Address(tem, *pchase), G5_method_type);
    89     }
    90   }
    92   // given the MethodType, find out where the MH argument is buried
    93   __ load_heap_oop(Address(G5_method_type, __ delayed_value(java_dyn_MethodType::form_offset_in_bytes, O1_scratch)),        O0_argslot);
    94   __ ldsw(         Address(O0_argslot,     __ delayed_value(java_dyn_MethodTypeForm::vmslots_offset_in_bytes, O1_scratch)), O0_argslot);
    95   __ ld_ptr(__ argument_address(O0_argslot), G3_method_handle);
    97   __ check_method_handle_type(G5_method_type, G3_method_handle, O1_scratch, wrong_method_type);
    98   __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
   100   return entry_point;
   101 }
   104 #ifdef ASSERT
   105 static void verify_argslot(MacroAssembler* _masm, Register argslot_reg, Register temp_reg, const char* error_message) {
   106   // Verify that argslot lies within (Gargs, FP].
   107   Label L_ok, L_bad;
   108 #ifdef _LP64
   109   __ add(FP, STACK_BIAS, temp_reg);
   110   __ cmp(argslot_reg, temp_reg);
   111 #else
   112   __ cmp(argslot_reg, FP);
   113 #endif
   114   __ brx(Assembler::greaterUnsigned, false, Assembler::pn, L_bad);
   115   __ delayed()->nop();
   116   __ cmp(Gargs, argslot_reg);
   117   __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, L_ok);
   118   __ delayed()->nop();
   119   __ bind(L_bad);
   120   __ stop(error_message);
   121   __ bind(L_ok);
   122 }
   123 #endif
   126 // Helper to insert argument slots into the stack.
   127 // arg_slots must be a multiple of stack_move_unit() and <= 0
   128 void MethodHandles::insert_arg_slots(MacroAssembler* _masm,
   129                                      RegisterOrConstant arg_slots,
   130                                      int arg_mask,
   131                                      Register argslot_reg,
   132                                      Register temp_reg, Register temp2_reg, Register temp3_reg) {
   133   assert(temp3_reg != noreg, "temp3 required");
   134   assert_different_registers(argslot_reg, temp_reg, temp2_reg, temp3_reg,
   135                              (!arg_slots.is_register() ? Gargs : arg_slots.as_register()));
   137 #ifdef ASSERT
   138   verify_argslot(_masm, argslot_reg, temp_reg, "insertion point must fall within current frame");
   139   if (arg_slots.is_register()) {
   140     Label L_ok, L_bad;
   141     __ cmp(arg_slots.as_register(), (int32_t) NULL_WORD);
   142     __ br(Assembler::greater, false, Assembler::pn, L_bad);
   143     __ delayed()->nop();
   144     __ btst(-stack_move_unit() - 1, arg_slots.as_register());
   145     __ br(Assembler::zero, false, Assembler::pt, L_ok);
   146     __ delayed()->nop();
   147     __ bind(L_bad);
   148     __ stop("assert arg_slots <= 0 and clear low bits");
   149     __ bind(L_ok);
   150   } else {
   151     assert(arg_slots.as_constant() <= 0, "");
   152     assert(arg_slots.as_constant() % -stack_move_unit() == 0, "");
   153   }
   154 #endif // ASSERT
   156 #ifdef _LP64
   157   if (arg_slots.is_register()) {
   158     // Was arg_slots register loaded as signed int?
   159     Label L_ok;
   160     __ sll(arg_slots.as_register(), BitsPerInt, temp_reg);
   161     __ sra(temp_reg, BitsPerInt, temp_reg);
   162     __ cmp(arg_slots.as_register(), temp_reg);
   163     __ br(Assembler::equal, false, Assembler::pt, L_ok);
   164     __ delayed()->nop();
   165     __ stop("arg_slots register not loaded as signed int");
   166     __ bind(L_ok);
   167   }
   168 #endif
   170   // Make space on the stack for the inserted argument(s).
   171   // Then pull down everything shallower than argslot_reg.
   172   // The stacked return address gets pulled down with everything else.
   173   // That is, copy [sp, argslot) downward by -size words.  In pseudo-code:
   174   //   sp -= size;
   175   //   for (temp = sp + size; temp < argslot; temp++)
   176   //     temp[-size] = temp[0]
   177   //   argslot -= size;
   178   RegisterOrConstant offset = __ regcon_sll_ptr(arg_slots, LogBytesPerWord, temp3_reg);
   180   // Keep the stack pointer 2*wordSize aligned.
   181   const int TwoWordAlignmentMask = right_n_bits(LogBytesPerWord + 1);
   182   RegisterOrConstant masked_offset = __ regcon_andn_ptr(offset, TwoWordAlignmentMask, temp_reg);
   183   __ add(SP, masked_offset, SP);
   185   __ mov(Gargs, temp_reg);  // source pointer for copy
   186   __ add(Gargs, offset, Gargs);
   188   {
   189     Label loop;
   190     __ bind(loop);
   191     // pull one word down each time through the loop
   192     __ ld_ptr(Address(temp_reg, 0), temp2_reg);
   193     __ st_ptr(temp2_reg, Address(temp_reg, offset));
   194     __ add(temp_reg, wordSize, temp_reg);
   195     __ cmp(temp_reg, argslot_reg);
   196     __ brx(Assembler::less, false, Assembler::pt, loop);
   197     __ delayed()->nop();  // FILLME
   198   }
   200   // Now move the argslot down, to point to the opened-up space.
   201   __ add(argslot_reg, offset, argslot_reg);
   202 }
   205 // Helper to remove argument slots from the stack.
   206 // arg_slots must be a multiple of stack_move_unit() and >= 0
   207 void MethodHandles::remove_arg_slots(MacroAssembler* _masm,
   208                                      RegisterOrConstant arg_slots,
   209                                      Register argslot_reg,
   210                                      Register temp_reg, Register temp2_reg, Register temp3_reg) {
   211   assert(temp3_reg != noreg, "temp3 required");
   212   assert_different_registers(argslot_reg, temp_reg, temp2_reg, temp3_reg,
   213                              (!arg_slots.is_register() ? Gargs : arg_slots.as_register()));
   215   RegisterOrConstant offset = __ regcon_sll_ptr(arg_slots, LogBytesPerWord, temp3_reg);
   217 #ifdef ASSERT
   218   // Verify that [argslot..argslot+size) lies within (Gargs, FP).
   219   __ add(argslot_reg, offset, temp2_reg);
   220   verify_argslot(_masm, temp2_reg, temp_reg, "deleted argument(s) must fall within current frame");
   221   if (arg_slots.is_register()) {
   222     Label L_ok, L_bad;
   223     __ cmp(arg_slots.as_register(), (int32_t) NULL_WORD);
   224     __ br(Assembler::less, false, Assembler::pn, L_bad);
   225     __ delayed()->nop();
   226     __ btst(-stack_move_unit() - 1, arg_slots.as_register());
   227     __ br(Assembler::zero, false, Assembler::pt, L_ok);
   228     __ delayed()->nop();
   229     __ bind(L_bad);
   230     __ stop("assert arg_slots >= 0 and clear low bits");
   231     __ bind(L_ok);
   232   } else {
   233     assert(arg_slots.as_constant() >= 0, "");
   234     assert(arg_slots.as_constant() % -stack_move_unit() == 0, "");
   235   }
   236 #endif // ASSERT
   238   // Pull up everything shallower than argslot.
   239   // Then remove the excess space on the stack.
   240   // The stacked return address gets pulled up with everything else.
   241   // That is, copy [sp, argslot) upward by size words.  In pseudo-code:
   242   //   for (temp = argslot-1; temp >= sp; --temp)
   243   //     temp[size] = temp[0]
   244   //   argslot += size;
   245   //   sp += size;
   246   __ sub(argslot_reg, wordSize, temp_reg);  // source pointer for copy
   247   {
   248     Label loop;
   249     __ bind(loop);
   250     // pull one word up each time through the loop
   251     __ ld_ptr(Address(temp_reg, 0), temp2_reg);
   252     __ st_ptr(temp2_reg, Address(temp_reg, offset));
   253     __ sub(temp_reg, wordSize, temp_reg);
   254     __ cmp(temp_reg, Gargs);
   255     __ brx(Assembler::greaterEqual, false, Assembler::pt, loop);
   256     __ delayed()->nop();  // FILLME
   257   }
   259   // Now move the argslot up, to point to the just-copied block.
   260   __ add(Gargs, offset, Gargs);
   261   // And adjust the argslot address to point at the deletion point.
   262   __ add(argslot_reg, offset, argslot_reg);
   264   // Keep the stack pointer 2*wordSize aligned.
   265   const int TwoWordAlignmentMask = right_n_bits(LogBytesPerWord + 1);
   266   RegisterOrConstant masked_offset = __ regcon_andn_ptr(offset, TwoWordAlignmentMask, temp_reg);
   267   __ add(SP, masked_offset, SP);
   268 }
   271 #ifndef PRODUCT
   272 extern "C" void print_method_handle(oop mh);
   273 void trace_method_handle_stub(const char* adaptername,
   274                               oop mh) {
   275 #if 0
   276                               intptr_t* entry_sp,
   277                               intptr_t* saved_sp,
   278                               intptr_t* saved_bp) {
   279   // called as a leaf from native code: do not block the JVM!
   280   intptr_t* last_sp = (intptr_t*) saved_bp[frame::interpreter_frame_last_sp_offset];
   281   intptr_t* base_sp = (intptr_t*) saved_bp[frame::interpreter_frame_monitor_block_top_offset];
   282   printf("MH %s mh="INTPTR_FORMAT" sp=("INTPTR_FORMAT"+"INTX_FORMAT") stack_size="INTX_FORMAT" bp="INTPTR_FORMAT"\n",
   283          adaptername, (intptr_t)mh, (intptr_t)entry_sp, (intptr_t)(saved_sp - entry_sp), (intptr_t)(base_sp - last_sp), (intptr_t)saved_bp);
   284   if (last_sp != saved_sp)
   285     printf("*** last_sp="INTPTR_FORMAT"\n", (intptr_t)last_sp);
   286 #endif
   288   printf("MH %s mh="INTPTR_FORMAT"\n", adaptername, (intptr_t) mh);
   289   print_method_handle(mh);
   290 }
   291 #endif // PRODUCT
   293 // which conversion op types are implemented here?
   294 int MethodHandles::adapter_conversion_ops_supported_mask() {
   295   return ((1<<sun_dyn_AdapterMethodHandle::OP_RETYPE_ONLY)
   296          |(1<<sun_dyn_AdapterMethodHandle::OP_RETYPE_RAW)
   297          |(1<<sun_dyn_AdapterMethodHandle::OP_CHECK_CAST)
   298          |(1<<sun_dyn_AdapterMethodHandle::OP_PRIM_TO_PRIM)
   299          |(1<<sun_dyn_AdapterMethodHandle::OP_REF_TO_PRIM)
   300          |(1<<sun_dyn_AdapterMethodHandle::OP_SWAP_ARGS)
   301          |(1<<sun_dyn_AdapterMethodHandle::OP_ROT_ARGS)
   302          |(1<<sun_dyn_AdapterMethodHandle::OP_DUP_ARGS)
   303          |(1<<sun_dyn_AdapterMethodHandle::OP_DROP_ARGS)
   304          //|(1<<sun_dyn_AdapterMethodHandle::OP_SPREAD_ARGS) //BUG!
   305          );
   306   // FIXME: MethodHandlesTest gets a crash if we enable OP_SPREAD_ARGS.
   307 }
   309 //------------------------------------------------------------------------------
   310 // MethodHandles::generate_method_handle_stub
   311 //
   312 // Generate an "entry" field for a method handle.
   313 // This determines how the method handle will respond to calls.
   314 void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHandles::EntryKind ek) {
   315   // Here is the register state during an interpreted call,
   316   // as set up by generate_method_handle_interpreter_entry():
   317   // - G5: garbage temp (was MethodHandle.invoke methodOop, unused)
   318   // - G3: receiver method handle
   319   // - O5_savedSP: sender SP (must preserve)
   321   Register O0_argslot = O0;
   322   Register O1_scratch = O1;
   323   Register O2_scratch = O2;
   324   Register O3_scratch = O3;
   325   Register G5_index   = G5;
   327   guarantee(java_dyn_MethodHandle::vmentry_offset_in_bytes() != 0, "must have offsets");
   329   // Some handy addresses:
   330   Address G5_method_fie(    G5_method,        in_bytes(methodOopDesc::from_interpreted_offset()));
   332   Address G3_mh_vmtarget(   G3_method_handle, java_dyn_MethodHandle::vmtarget_offset_in_bytes());
   334   Address G3_dmh_vmindex(   G3_method_handle, sun_dyn_DirectMethodHandle::vmindex_offset_in_bytes());
   336   Address G3_bmh_vmargslot( G3_method_handle, sun_dyn_BoundMethodHandle::vmargslot_offset_in_bytes());
   337   Address G3_bmh_argument(  G3_method_handle, sun_dyn_BoundMethodHandle::argument_offset_in_bytes());
   339   Address G3_amh_vmargslot( G3_method_handle, sun_dyn_AdapterMethodHandle::vmargslot_offset_in_bytes());
   340   Address G3_amh_argument ( G3_method_handle, sun_dyn_AdapterMethodHandle::argument_offset_in_bytes());
   341   Address G3_amh_conversion(G3_method_handle, sun_dyn_AdapterMethodHandle::conversion_offset_in_bytes());
   343   const int java_mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();
   345   if (have_entry(ek)) {
   346     __ nop();  // empty stubs make SG sick
   347     return;
   348   }
   350   address interp_entry = __ pc();
   352 #ifndef PRODUCT
   353   if (TraceMethodHandles) {
   354     // save: Gargs, O5_savedSP
   355     __ save(SP, -16*wordSize, SP);
   356     __ set((intptr_t) entry_name(ek), O0);
   357     __ mov(G3_method_handle, O1);
   358     __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, trace_method_handle_stub));
   359     __ restore(SP, 16*wordSize, SP);
   360   }
   361 #endif // PRODUCT
   363   switch ((int) ek) {
   364   case _raise_exception:
   365     {
   366       // Not a real MH entry, but rather shared code for raising an
   367       // exception.  Extra local arguments are passed in scratch
   368       // registers, as required type in O3, failing object (or NULL)
   369       // in O2, failing bytecode type in O1.
   371       __ mov(O5_savedSP, SP);  // Cut the stack back to where the caller started.
   373       // Push arguments as if coming from the interpreter.
   374       Register O0_scratch = O0_argslot;
   375       int stackElementSize = Interpreter::stackElementSize;
   377       // Make space on the stack for the arguments and set Gargs
   378       // correctly.
   379       __ sub(SP, 4*stackElementSize, SP);  // Keep stack aligned.
   380       __ add(SP, (frame::varargs_offset)*wordSize - 1*Interpreter::stackElementSize + STACK_BIAS + BytesPerWord, Gargs);
   382       // void raiseException(int code, Object actual, Object required)
   383       __ st(    O1_scratch, Address(Gargs, 2*stackElementSize));  // code
   384       __ st_ptr(O2_scratch, Address(Gargs, 1*stackElementSize));  // actual
   385       __ st_ptr(O3_scratch, Address(Gargs, 0*stackElementSize));  // required
   387       Label no_method;
   388       // FIXME: fill in _raise_exception_method with a suitable sun.dyn method
   389       __ set(AddressLiteral((address) &_raise_exception_method), G5_method);
   390       __ ld_ptr(Address(G5_method, 0), G5_method);
   391       __ tst(G5_method);
   392       __ brx(Assembler::zero, false, Assembler::pn, no_method);
   393       __ delayed()->nop();
   395       int jobject_oop_offset = 0;
   396       __ ld_ptr(Address(G5_method, jobject_oop_offset), G5_method);
   397       __ tst(G5_method);
   398       __ brx(Assembler::zero, false, Assembler::pn, no_method);
   399       __ delayed()->nop();
   401       __ verify_oop(G5_method);
   402       __ jump_indirect_to(G5_method_fie, O1_scratch);
   403       __ delayed()->nop();
   405       // If we get here, the Java runtime did not do its job of creating the exception.
   406       // Do something that is at least causes a valid throw from the interpreter.
   407       __ bind(no_method);
   408       __ unimplemented("_raise_exception no method");
   409     }
   410     break;
   412   case _invokestatic_mh:
   413   case _invokespecial_mh:
   414     {
   415       __ load_heap_oop(G3_mh_vmtarget, G5_method);  // target is a methodOop
   416       __ verify_oop(G5_method);
   417       // Same as TemplateTable::invokestatic or invokespecial,
   418       // minus the CP setup and profiling:
   419       if (ek == _invokespecial_mh) {
   420         // Must load & check the first argument before entering the target method.
   421         __ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch);
   422         __ ld_ptr(__ argument_address(O0_argslot), G3_method_handle);
   423         __ null_check(G3_method_handle);
   424         __ verify_oop(G3_method_handle);
   425       }
   426       __ jump_indirect_to(G5_method_fie, O1_scratch);
   427       __ delayed()->nop();
   428     }
   429     break;
   431   case _invokevirtual_mh:
   432     {
   433       // Same as TemplateTable::invokevirtual,
   434       // minus the CP setup and profiling:
   436       // Pick out the vtable index and receiver offset from the MH,
   437       // and then we can discard it:
   438       __ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch);
   439       __ ldsw(G3_dmh_vmindex, G5_index);
   440       // Note:  The verifier allows us to ignore G3_mh_vmtarget.
   441       __ ld_ptr(__ argument_address(O0_argslot, -1), G3_method_handle);
   442       __ null_check(G3_method_handle, oopDesc::klass_offset_in_bytes());
   444       // Get receiver klass:
   445       Register O0_klass = O0_argslot;
   446       __ load_klass(G3_method_handle, O0_klass);
   447       __ verify_oop(O0_klass);
   449       // Get target methodOop & entry point:
   450       const int base = instanceKlass::vtable_start_offset() * wordSize;
   451       assert(vtableEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
   453       __ sll_ptr(G5_index, LogBytesPerWord, G5_index);
   454       __ add(O0_klass, G5_index, O0_klass);
   455       Address vtable_entry_addr(O0_klass, base + vtableEntry::method_offset_in_bytes());
   456       __ ld_ptr(vtable_entry_addr, G5_method);
   458       __ verify_oop(G5_method);
   459       __ jump_indirect_to(G5_method_fie, O1_scratch);
   460       __ delayed()->nop();
   461     }
   462     break;
   464   case _invokeinterface_mh:
   465     {
   466       // Same as TemplateTable::invokeinterface,
   467       // minus the CP setup and profiling:
   468       __ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch);
   469       Register O1_intf  = O1_scratch;
   470       __ load_heap_oop(G3_mh_vmtarget, O1_intf);
   471       __ ldsw(G3_dmh_vmindex, G5_index);
   472       __ ld_ptr(__ argument_address(O0_argslot, -1), G3_method_handle);
   473       __ null_check(G3_method_handle, oopDesc::klass_offset_in_bytes());
   475       // Get receiver klass:
   476       Register O0_klass = O0_argslot;
   477       __ load_klass(G3_method_handle, O0_klass);
   478       __ verify_oop(O0_klass);
   480       // Get interface:
   481       Label no_such_interface;
   482       __ verify_oop(O1_intf);
   483       __ lookup_interface_method(O0_klass, O1_intf,
   484                                  // Note: next two args must be the same:
   485                                  G5_index, G5_method,
   486                                  O2_scratch,
   487                                  O3_scratch,
   488                                  no_such_interface);
   490       __ verify_oop(G5_method);
   491       __ jump_indirect_to(G5_method_fie, O1_scratch);
   492       __ delayed()->nop();
   494       __ bind(no_such_interface);
   495       // Throw an exception.
   496       // For historical reasons, it will be IncompatibleClassChangeError.
   497       __ unimplemented("not tested yet");
   498       __ ld_ptr(Address(O1_intf, java_mirror_offset), O3_scratch);  // required interface
   499       __ mov(O0_klass, O2_scratch);  // bad receiver
   500       __ jump_to(AddressLiteral(from_interpreted_entry(_raise_exception)), O0_argslot);
   501       __ delayed()->mov(Bytecodes::_invokeinterface, O1_scratch);  // who is complaining?
   502     }
   503     break;
   505   case _bound_ref_mh:
   506   case _bound_int_mh:
   507   case _bound_long_mh:
   508   case _bound_ref_direct_mh:
   509   case _bound_int_direct_mh:
   510   case _bound_long_direct_mh:
   511     {
   512       const bool direct_to_method = (ek >= _bound_ref_direct_mh);
   513       BasicType arg_type  = T_ILLEGAL;
   514       int       arg_mask  = _INSERT_NO_MASK;
   515       int       arg_slots = -1;
   516       get_ek_bound_mh_info(ek, arg_type, arg_mask, arg_slots);
   518       // Make room for the new argument:
   519       __ ldsw(G3_bmh_vmargslot, O0_argslot);
   520       __ add(Gargs, __ argument_offset(O0_argslot), O0_argslot);
   522       insert_arg_slots(_masm, arg_slots * stack_move_unit(), arg_mask, O0_argslot, O1_scratch, O2_scratch, G5_index);
   524       // Store bound argument into the new stack slot:
   525       __ load_heap_oop(G3_bmh_argument, O1_scratch);
   526       if (arg_type == T_OBJECT) {
   527         __ st_ptr(O1_scratch, Address(O0_argslot, 0));
   528       } else {
   529         Address prim_value_addr(O1_scratch, java_lang_boxing_object::value_offset_in_bytes(arg_type));
   530         __ load_sized_value(prim_value_addr, O2_scratch, type2aelembytes(arg_type), is_signed_subword_type(arg_type));
   531         if (arg_slots == 2) {
   532           __ unimplemented("not yet tested");
   533 #ifndef _LP64
   534           __ signx(O2_scratch, O3_scratch);  // Sign extend
   535 #endif
   536           __ st_long(O2_scratch, Address(O0_argslot, 0));  // Uses O2/O3 on !_LP64
   537         } else {
   538           __ st_ptr( O2_scratch, Address(O0_argslot, 0));
   539         }
   540       }
   542       if (direct_to_method) {
   543         __ load_heap_oop(G3_mh_vmtarget, G5_method);  // target is a methodOop
   544         __ verify_oop(G5_method);
   545         __ jump_indirect_to(G5_method_fie, O1_scratch);
   546         __ delayed()->nop();
   547       } else {
   548         __ load_heap_oop(G3_mh_vmtarget, G3_method_handle);  // target is a methodOop
   549         __ verify_oop(G3_method_handle);
   550         __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
   551       }
   552     }
   553     break;
   555   case _adapter_retype_only:
   556   case _adapter_retype_raw:
   557     // Immediately jump to the next MH layer:
   558     __ load_heap_oop(G3_mh_vmtarget, G3_method_handle);
   559     __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
   560     // This is OK when all parameter types widen.
   561     // It is also OK when a return type narrows.
   562     break;
   564   case _adapter_check_cast:
   565     {
   566       // Temps:
   567       Register G5_klass = G5_index;  // Interesting AMH data.
   569       // Check a reference argument before jumping to the next layer of MH:
   570       __ ldsw(G3_amh_vmargslot, O0_argslot);
   571       Address vmarg = __ argument_address(O0_argslot);
   573       // What class are we casting to?
   574       __ load_heap_oop(G3_amh_argument, G5_klass);  // This is a Class object!
   575       __ load_heap_oop(Address(G5_klass, java_lang_Class::klass_offset_in_bytes()), G5_klass);
   577       Label done;
   578       __ ld_ptr(vmarg, O1_scratch);
   579       __ tst(O1_scratch);
   580       __ brx(Assembler::zero, false, Assembler::pn, done);  // No cast if null.
   581       __ delayed()->nop();
   582       __ load_klass(O1_scratch, O1_scratch);
   584       // Live at this point:
   585       // - G5_klass        :  klass required by the target method
   586       // - O1_scratch      :  argument klass to test
   587       // - G3_method_handle:  adapter method handle
   588       __ check_klass_subtype(O1_scratch, G5_klass, O0_argslot, O2_scratch, done);
   590       // If we get here, the type check failed!
   591       __ ldsw(G3_amh_vmargslot, O0_argslot);  // reload argslot field
   592       __ load_heap_oop(G3_amh_argument, O3_scratch);  // required class
   593       __ ld_ptr(vmarg, O2_scratch);  // bad object
   594       __ jump_to(AddressLiteral(from_interpreted_entry(_raise_exception)), O0_argslot);
   595       __ delayed()->mov(Bytecodes::_checkcast, O1_scratch);  // who is complaining?
   597       __ bind(done);
   598       // Get the new MH:
   599       __ load_heap_oop(G3_mh_vmtarget, G3_method_handle);
   600       __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
   601     }
   602     break;
   604   case _adapter_prim_to_prim:
   605   case _adapter_ref_to_prim:
   606     // Handled completely by optimized cases.
   607     __ stop("init_AdapterMethodHandle should not issue this");
   608     break;
   610   case _adapter_opt_i2i:        // optimized subcase of adapt_prim_to_prim
   611 //case _adapter_opt_f2i:        // optimized subcase of adapt_prim_to_prim
   612   case _adapter_opt_l2i:        // optimized subcase of adapt_prim_to_prim
   613   case _adapter_opt_unboxi:     // optimized subcase of adapt_ref_to_prim
   614     {
   615       // Perform an in-place conversion to int or an int subword.
   616       __ ldsw(G3_amh_vmargslot, O0_argslot);
   617       Address vmarg = __ argument_address(O0_argslot);
   618       Address value;
   619       bool value_left_justified = false;
   621       switch (ek) {
   622       case _adapter_opt_i2i:
   623       case _adapter_opt_l2i:
   624         __ unimplemented(entry_name(ek));
   625         value = vmarg;
   626         break;
   627       case _adapter_opt_unboxi:
   628         {
   629           // Load the value up from the heap.
   630           __ ld_ptr(vmarg, O1_scratch);
   631           int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_INT);
   632 #ifdef ASSERT
   633           for (int bt = T_BOOLEAN; bt < T_INT; bt++) {
   634             if (is_subword_type(BasicType(bt)))
   635               assert(value_offset == java_lang_boxing_object::value_offset_in_bytes(BasicType(bt)), "");
   636           }
   637 #endif
   638           __ null_check(O1_scratch, value_offset);
   639           value = Address(O1_scratch, value_offset);
   640 #ifdef _BIG_ENDIAN
   641           // Values stored in objects are packed.
   642           value_left_justified = true;
   643 #endif
   644         }
   645         break;
   646       default:
   647         ShouldNotReachHere();
   648       }
   650       // This check is required on _BIG_ENDIAN
   651       Register G5_vminfo = G5_index;
   652       __ ldsw(G3_amh_conversion, G5_vminfo);
   653       assert(CONV_VMINFO_SHIFT == 0, "preshifted");
   655       // Original 32-bit vmdata word must be of this form:
   656       // | MBZ:6 | signBitCount:8 | srcDstTypes:8 | conversionOp:8 |
   657       __ lduw(value, O1_scratch);
   658       if (!value_left_justified)
   659         __ sll(O1_scratch, G5_vminfo, O1_scratch);
   660       Label zero_extend, done;
   661       __ btst(CONV_VMINFO_SIGN_FLAG, G5_vminfo);
   662       __ br(Assembler::zero, false, Assembler::pn, zero_extend);
   663       __ delayed()->nop();
   665       // this path is taken for int->byte, int->short
   666       __ sra(O1_scratch, G5_vminfo, O1_scratch);
   667       __ ba(false, done);
   668       __ delayed()->nop();
   670       __ bind(zero_extend);
   671       // this is taken for int->char
   672       __ srl(O1_scratch, G5_vminfo, O1_scratch);
   674       __ bind(done);
   675       __ st(O1_scratch, vmarg);
   677       // Get the new MH:
   678       __ load_heap_oop(G3_mh_vmtarget, G3_method_handle);
   679       __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
   680     }
   681     break;
   683   case _adapter_opt_i2l:        // optimized subcase of adapt_prim_to_prim
   684   case _adapter_opt_unboxl:     // optimized subcase of adapt_ref_to_prim
   685     {
   686       // Perform an in-place int-to-long or ref-to-long conversion.
   687       __ ldsw(G3_amh_vmargslot, O0_argslot);
   689       // On big-endian machine we duplicate the slot and store the MSW
   690       // in the first slot.
   691       __ add(Gargs, __ argument_offset(O0_argslot, 1), O0_argslot);
   693       insert_arg_slots(_masm, stack_move_unit(), _INSERT_INT_MASK, O0_argslot, O1_scratch, O2_scratch, G5_index);
   695       Address arg_lsw(O0_argslot, 0);
   696       Address arg_msw(O0_argslot, -Interpreter::stackElementSize);
   698       switch (ek) {
   699       case _adapter_opt_i2l:
   700         {
   701           __ ldsw(arg_lsw, O2_scratch);      // Load LSW
   702 #ifndef _LP64
   703           __ signx(O2_scratch, O3_scratch);  // Sign extend
   704 #endif
   705           __ st_long(O2_scratch, arg_msw);   // Uses O2/O3 on !_LP64
   706         }
   707         break;
   708       case _adapter_opt_unboxl:
   709         {
   710           // Load the value up from the heap.
   711           __ ld_ptr(arg_lsw, O1_scratch);
   712           int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_LONG);
   713           assert(value_offset == java_lang_boxing_object::value_offset_in_bytes(T_DOUBLE), "");
   714           __ null_check(O1_scratch, value_offset);
   715           __ ld_long(Address(O1_scratch, value_offset), O2_scratch);  // Uses O2/O3 on !_LP64
   716           __ st_long(O2_scratch, arg_msw);
   717         }
   718         break;
   719       default:
   720         ShouldNotReachHere();
   721       }
   723       __ load_heap_oop(G3_mh_vmtarget, G3_method_handle);
   724       __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
   725     }
   726     break;
   728   case _adapter_opt_f2d:        // optimized subcase of adapt_prim_to_prim
   729   case _adapter_opt_d2f:        // optimized subcase of adapt_prim_to_prim
   730     {
   731       // perform an in-place floating primitive conversion
   732       __ unimplemented(entry_name(ek));
   733     }
   734     break;
   736   case _adapter_prim_to_ref:
   737     __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
   738     break;
   740   case _adapter_swap_args:
   741   case _adapter_rot_args:
   742     // handled completely by optimized cases
   743     __ stop("init_AdapterMethodHandle should not issue this");
   744     break;
   746   case _adapter_opt_swap_1:
   747   case _adapter_opt_swap_2:
   748   case _adapter_opt_rot_1_up:
   749   case _adapter_opt_rot_1_down:
   750   case _adapter_opt_rot_2_up:
   751   case _adapter_opt_rot_2_down:
   752     {
   753       int swap_bytes = 0, rotate = 0;
   754       get_ek_adapter_opt_swap_rot_info(ek, swap_bytes, rotate);
   756       // 'argslot' is the position of the first argument to swap.
   757       __ ldsw(G3_amh_vmargslot, O0_argslot);
   758       __ add(Gargs, __ argument_offset(O0_argslot), O0_argslot);
   760       // 'vminfo' is the second.
   761       Register O1_destslot = O1_scratch;
   762       __ ldsw(G3_amh_conversion, O1_destslot);
   763       assert(CONV_VMINFO_SHIFT == 0, "preshifted");
   764       __ and3(O1_destslot, CONV_VMINFO_MASK, O1_destslot);
   765       __ add(Gargs, __ argument_offset(O1_destslot), O1_destslot);
   767       if (!rotate) {
   768         for (int i = 0; i < swap_bytes; i += wordSize) {
   769           __ ld_ptr(Address(O0_argslot,  i), O2_scratch);
   770           __ ld_ptr(Address(O1_destslot, i), O3_scratch);
   771           __ st_ptr(O3_scratch, Address(O0_argslot,  i));
   772           __ st_ptr(O2_scratch, Address(O1_destslot, i));
   773         }
   774       } else {
   775         // Save the first chunk, which is going to get overwritten.
   776         switch (swap_bytes) {
   777         case 4 : __ lduw(Address(O0_argslot, 0), O2_scratch); break;
   778         case 16: __ ldx( Address(O0_argslot, 8), O3_scratch); //fall-thru
   779         case 8 : __ ldx( Address(O0_argslot, 0), O2_scratch); break;
   780         default: ShouldNotReachHere();
   781         }
   783         if (rotate > 0) {
   784           // Rorate upward.
   785           __ sub(O0_argslot, swap_bytes, O0_argslot);
   786 #if ASSERT
   787           {
   788             // Verify that argslot > destslot, by at least swap_bytes.
   789             Label L_ok;
   790             __ cmp(O0_argslot, O1_destslot);
   791             __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, L_ok);
   792             __ delayed()->nop();
   793             __ stop("source must be above destination (upward rotation)");
   794             __ bind(L_ok);
   795           }
   796 #endif
   797           // Work argslot down to destslot, copying contiguous data upwards.
   798           // Pseudo-code:
   799           //   argslot  = src_addr - swap_bytes
   800           //   destslot = dest_addr
   801           //   while (argslot >= destslot) {
   802           //     *(argslot + swap_bytes) = *(argslot + 0);
   803           //     argslot--;
   804           //   }
   805           Label loop;
   806           __ bind(loop);
   807           __ ld_ptr(Address(O0_argslot, 0), G5_index);
   808           __ st_ptr(G5_index, Address(O0_argslot, swap_bytes));
   809           __ sub(O0_argslot, wordSize, O0_argslot);
   810           __ cmp(O0_argslot, O1_destslot);
   811           __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, loop);
   812           __ delayed()->nop();  // FILLME
   813         } else {
   814           __ add(O0_argslot, swap_bytes, O0_argslot);
   815 #if ASSERT
   816           {
   817             // Verify that argslot < destslot, by at least swap_bytes.
   818             Label L_ok;
   819             __ cmp(O0_argslot, O1_destslot);
   820             __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, L_ok);
   821             __ delayed()->nop();
   822             __ stop("source must be above destination (upward rotation)");
   823             __ bind(L_ok);
   824           }
   825 #endif
   826           // Work argslot up to destslot, copying contiguous data downwards.
   827           // Pseudo-code:
   828           //   argslot  = src_addr + swap_bytes
   829           //   destslot = dest_addr
   830           //   while (argslot >= destslot) {
   831           //     *(argslot - swap_bytes) = *(argslot + 0);
   832           //     argslot++;
   833           //   }
   834           Label loop;
   835           __ bind(loop);
   836           __ ld_ptr(Address(O0_argslot, 0), G5_index);
   837           __ st_ptr(G5_index, Address(O0_argslot, -swap_bytes));
   838           __ add(O0_argslot, wordSize, O0_argslot);
   839           __ cmp(O0_argslot, O1_destslot);
   840           __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, loop);
   841           __ delayed()->nop();  // FILLME
   842         }
   844         // Store the original first chunk into the destination slot, now free.
   845         switch (swap_bytes) {
   846         case 4 : __ stw(O2_scratch, Address(O1_destslot, 0)); break;
   847         case 16: __ stx(O3_scratch, Address(O1_destslot, 8)); // fall-thru
   848         case 8 : __ stx(O2_scratch, Address(O1_destslot, 0)); break;
   849         default: ShouldNotReachHere();
   850         }
   851       }
   853       __ load_heap_oop(G3_mh_vmtarget, G3_method_handle);
   854       __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
   855     }
   856     break;
   858   case _adapter_dup_args:
   859     {
   860       // 'argslot' is the position of the first argument to duplicate.
   861       __ ldsw(G3_amh_vmargslot, O0_argslot);
   862       __ add(Gargs, __ argument_offset(O0_argslot), O0_argslot);
   864       // 'stack_move' is negative number of words to duplicate.
   865       Register G5_stack_move = G5_index;
   866       __ ldsw(G3_amh_conversion, G5_stack_move);
   867       __ sra(G5_stack_move, CONV_STACK_MOVE_SHIFT, G5_stack_move);
   869       // Remember the old Gargs (argslot[0]).
   870       Register O1_oldarg = O1_scratch;
   871       __ mov(Gargs, O1_oldarg);
   873       // Move Gargs down to make room for dups.
   874       __ sll_ptr(G5_stack_move, LogBytesPerWord, G5_stack_move);
   875       __ add(Gargs, G5_stack_move, Gargs);
   877       // Compute the new Gargs (argslot[0]).
   878       Register O2_newarg = O2_scratch;
   879       __ mov(Gargs, O2_newarg);
   881       // Copy from oldarg[0...] down to newarg[0...]
   882       // Pseude-code:
   883       //   O1_oldarg  = old-Gargs
   884       //   O2_newarg  = new-Gargs
   885       //   O0_argslot = argslot
   886       //   while (O2_newarg < O1_oldarg) *O2_newarg = *O0_argslot++
   887       Label loop;
   888       __ bind(loop);
   889       __ ld_ptr(Address(O0_argslot, 0), O3_scratch);
   890       __ st_ptr(O3_scratch, Address(O2_newarg, 0));
   891       __ add(O0_argslot, wordSize, O0_argslot);
   892       __ add(O2_newarg,  wordSize, O2_newarg);
   893       __ cmp(O2_newarg, O1_oldarg);
   894       __ brx(Assembler::less, false, Assembler::pt, loop);
   895       __ delayed()->nop();  // FILLME
   897       __ load_heap_oop(G3_mh_vmtarget, G3_method_handle);
   898       __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
   899     }
   900     break;
   902   case _adapter_drop_args:
   903     {
   904       // 'argslot' is the position of the first argument to nuke.
   905       __ ldsw(G3_amh_vmargslot, O0_argslot);
   906       __ add(Gargs, __ argument_offset(O0_argslot), O0_argslot);
   908       // 'stack_move' is number of words to drop.
   909       Register G5_stack_move = G5_index;
   910       __ ldsw(G3_amh_conversion, G5_stack_move);
   911       __ sra(G5_stack_move, CONV_STACK_MOVE_SHIFT, G5_stack_move);
   913       remove_arg_slots(_masm, G5_stack_move, O0_argslot, O1_scratch, O2_scratch, O3_scratch);
   915       __ load_heap_oop(G3_mh_vmtarget, G3_method_handle);
   916       __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
   917     }
   918     break;
   920   case _adapter_collect_args:
   921     __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
   922     break;
   924   case _adapter_spread_args:
   925     // Handled completely by optimized cases.
   926     __ stop("init_AdapterMethodHandle should not issue this");
   927     break;
   929   case _adapter_opt_spread_0:
   930   case _adapter_opt_spread_1:
   931   case _adapter_opt_spread_more:
   932     {
   933       // spread an array out into a group of arguments
   934       __ unimplemented(entry_name(ek));
   935     }
   936     break;
   938   case _adapter_flyby:
   939   case _adapter_ricochet:
   940     __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
   941     break;
   943   default:
   944     ShouldNotReachHere();
   945   }
   947   address me_cookie = MethodHandleEntry::start_compiled_entry(_masm, interp_entry);
   948   __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
   950   init_entry(ek, MethodHandleEntry::finish_compiled_entry(_masm, me_cookie));
   951 }

mercurial