src/cpu/mips/vm/templateTable_mips_64.cpp

Fri, 29 Apr 2016 00:06:10 +0800

author
aoqi
date
Fri, 29 Apr 2016 00:06:10 +0800
changeset 1
2d8a650513c2
child 2
26621fe12c48
permissions
-rw-r--r--

Added MIPS 64-bit port.

     1 /*
     2  * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
     3  * Copyright (c) 2015, 2016, Loongson Technology. All rights reserved.
     4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     5  *
     6  * This code is free software; you can redistribute it and/or modify it
     7  * under the terms of the GNU General Public License version 2 only, as
     8  * published by the Free Software Foundation.
     9  *
    10  * This code is distributed in the hope that it will be useful, but WITHOUT
    11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    13  * version 2 for more details (a copy is included in the LICENSE file that
    14  * accompanied this code).
    15  *
    16  * You should have received a copy of the GNU General Public License version
    17  * 2 along with this work; if not, write to the Free Software Foundation,
    18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    19  *
    20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    21  * or visit www.oracle.com if you need additional information or have any
    22  * questions.
    23  *
    24  */
    26 #include "precompiled.hpp"
    27 #include "asm/macroAssembler.hpp"
    28 #include "interpreter/interpreter.hpp"
    29 #include "interpreter/interpreterRuntime.hpp"
    30 #include "interpreter/templateTable.hpp"
    31 #include "memory/universe.inline.hpp"
    32 #include "oops/methodData.hpp"
    33 #include "oops/objArrayKlass.hpp"
    34 #include "oops/oop.inline.hpp"
    35 #include "prims/methodHandles.hpp"
    36 #include "runtime/sharedRuntime.hpp"
    37 #include "runtime/stubRoutines.hpp"
    38 #include "runtime/synchronizer.hpp"
    41 #ifndef CC_INTERP
    43 #define __ _masm->
    45 // Platform-dependent initialization
    47 void TemplateTable::pd_initialize() {
    48   // No mips specific initialization
    49 }
    51 // Address computation: local variables
    52 // we use t8 as the local variables pointer register, by yjl 6/27/2005
    53 static inline Address iaddress(int n) {
    54   return Address(LVP, Interpreter::local_offset_in_bytes(n));
    55 }
    57 static inline Address laddress(int n) {
    58   return iaddress(n + 1);
    59 }
    61 static inline Address faddress(int n) {
    62   return iaddress(n);
    63 }
    65 static inline Address daddress(int n) {
    66   return laddress(n);
    67 }
    69 static inline Address aaddress(int n) {
    70   return iaddress(n);
    71 }
    72 static inline Address haddress(int n)            { return iaddress(n + 0); }
    74 //FIXME , can not use dadd and dsll
    75 /*
    76 static inline Address iaddress(Register r) {
    77   return Address(r14, r, Address::times_8, Interpreter::value_offset_in_bytes());
    78 }
    80 static inline Address laddress(Register r) {
    81   return Address(r14, r, Address::times_8, Interpreter::local_offset_in_bytes(1));
    82 }
    84 static inline Address faddress(Register r) {
    85   return iaddress(r);
    86 }
    88 static inline Address daddress(Register r) {
    89   return laddress(r);
    90 }
    92 static inline Address aaddress(Register r) {
    93   return iaddress(r);
    94 }
    95 */
    97 static inline Address at_sp() 						{	return Address(SP, 	0); }					
    98 static inline Address at_sp_p1()          { return Address(SP,  1 * wordSize); }
    99 static inline Address at_sp_p2()          { return Address(SP,  2 * wordSize); }
   101 // At top of Java expression stack which may be different than esp().  It
   102 // isn't for category 1 objects.
   103 static inline Address at_tos   () {
   104   Address tos = Address(SP,  Interpreter::expr_offset_in_bytes(0));
   105   return tos;
   106 }
   108 static inline Address at_tos_p1() {
   109   return Address(SP,  Interpreter::expr_offset_in_bytes(1));
   110 }
   112 static inline Address at_tos_p2() {
   113   return Address(SP,  Interpreter::expr_offset_in_bytes(2));
   114 }
   116 static inline Address at_tos_p3() {
   117   return Address(SP,  Interpreter::expr_offset_in_bytes(3));
   118 }
   120 // we use S0 as bcp, be sure you have bcp in S0 before you call any of the Template generator 
   121 Address TemplateTable::at_bcp(int offset) {
   122   assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
   123   return Address(BCP, offset);
   124 }
   126 #define callee_saved_register(R) assert((R>=S0 && R<=S7), "should use callee saved registers!")
   128 // bytecode folding
   129 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
   130                                    Register tmp_reg, 
   131                                    bool load_bc_into_bc_reg,/*=true*/
   132                                    int byte_no) {
   133   if (!RewriteBytecodes) {
   134     return;
   135   }
   137   Label L_patch_done;
   138   switch (bc) {
   139   case Bytecodes::_fast_aputfield:
   140   case Bytecodes::_fast_bputfield:
   141   case Bytecodes::_fast_cputfield:
   142   case Bytecodes::_fast_dputfield:
   143   case Bytecodes::_fast_fputfield:
   144   case Bytecodes::_fast_iputfield:
   145   case Bytecodes::_fast_lputfield:
   146   case Bytecodes::_fast_sputfield:
   147     {
   148     // We skip bytecode quickening for putfield instructions when the put_code written to the constant pool cache
   149     // is zero. This is required so that every execution of this instruction calls out to 
   150     // InterpreterRuntime::resolve_get_put to do additional, required work.
   151     assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
   152     assert(load_bc_into_bc_reg, "we use bc_reg as temp");
   153     __ get_cache_and_index_and_bytecode_at_bcp(tmp_reg, bc_reg, tmp_reg, byte_no, 1);
   154     __ daddi(bc_reg, R0, bc);
   155     __ beq(tmp_reg, R0, L_patch_done);
   156     __ delayed()->nop();
   157     }
   158     break;
   159   default:
   160     assert(byte_no == -1, "sanity");
   161  // the pair bytecodes have already done the load.
   162   if (load_bc_into_bc_reg) {
   163     __ move(bc_reg, bc);
   164   }
   166   }
   167   if (JvmtiExport::can_post_breakpoint()) {
   168     Label L_fast_patch;
   169     // if a breakpoint is present we can't rewrite the stream directly
   170     __ lbu(tmp_reg, at_bcp(0));
   171     __ move(AT, Bytecodes::_breakpoint);
   172     __ bne(tmp_reg, AT, L_fast_patch);
   173     __ delayed()->nop();
   175     __ get_method(tmp_reg);
   176     // Let breakpoint table handling rewrite to quicker bytecode 
   177     __ call_VM(NOREG, CAST_FROM_FN_PTR(address, 
   178 	  InterpreterRuntime::set_original_bytecode_at), tmp_reg, BCP, bc_reg);
   180     __ b(L_patch_done);
   181     __ delayed()->nop();
   182     __ bind(L_fast_patch);
   183   }
   185 #ifdef ASSERT
   186   Label L_okay;
   187   __ lbu(tmp_reg, at_bcp(0));
   188   __ move(AT, (int)Bytecodes::java_code(bc));
   189   __ beq(tmp_reg, AT, L_okay);
   190   __ delayed()->nop();
   191   __ beq(tmp_reg, bc_reg, L_patch_done);
   192   __ delayed()->nop();
   193   __ stop("patching the wrong bytecode");
   194   __ bind(L_okay);
   195 #endif
   197   // patch bytecode
   198   __ sb(bc_reg, at_bcp(0));
   199   __ bind(L_patch_done);
   200 }
   203 // Individual instructions
   205 void TemplateTable::nop() {
   206   transition(vtos, vtos);
   207   // nothing to do
   208 }
   210 void TemplateTable::shouldnotreachhere() {
   211   transition(vtos, vtos);
   212   __ stop("shouldnotreachhere bytecode");
   213 }
   215 void TemplateTable::aconst_null() {
   216   transition(vtos, atos);
   217   __ move(FSR, R0);
   218 }
   220 void TemplateTable::iconst(int value) {
   221   transition(vtos, itos);
   222   if (value == 0) {
   223     __ move(FSR, R0);
   224   } else {
   225     __ move(FSR, value);
   226   }
   227 }
   229 void TemplateTable::lconst(int value) {
   230   transition(vtos, ltos);
   231   if (value == 0) {
   232     __ move(FSR, R0);
   233   } else {
   234     __ move(FSR, value);
   235   }
   236   assert(value >= 0, "check this code");
   237   //__ move(SSR, R0);
   238 }
   240 void TemplateTable::fconst(int value) {
   241   static float  _f1 = 1.0, _f2 = 2.0;
   242   transition(vtos, ftos);
   243   float* p;
   244   switch( value ) {
   245     default: ShouldNotReachHere();
   246     case 0:  __ dmtc1(R0, FSF);  return;
   247     case 1:  p = &_f1;   break;
   248     case 2:  p = &_f2;   break;
   249   }
   250   __ li(AT, (address)p);
   251   __ lwc1(FSF, AT, 0);
   252 }
   254 void TemplateTable::dconst(int value) {
   255   static double _d1 = 1.0;
   256   transition(vtos, dtos);
   257   double* p;
   258   switch( value ) {
   259     default: ShouldNotReachHere();
   260     case 0:  __ dmtc1(R0, FSF);  return;
   261     case 1:  p = &_d1;   break;
   262   }
   263   __ li(AT, (address)p);
   264   __ ldc1(FSF, AT, 0);
   265 }
   267 void TemplateTable::bipush() {
   268   transition(vtos, itos);
   269   __ lb(FSR, at_bcp(1));
   270 }
   272 void TemplateTable::sipush() {
   273 	transition(vtos, itos);
   274 	__ load_two_bytes_from_at_bcp(FSR, AT, 1);
   275 	__ hswap(FSR);
   276 }
   278 // T1 : tags
   279 // T2 : index
   280 // T3 : cpool
   281 // T8 : tag
   282 void TemplateTable::ldc(bool wide) {
   283   transition(vtos, vtos);
   284   Label call_ldc, notFloat, notClass, Done;
   285   // get index in cpool
   286   if (wide) {
   287     __ load_two_bytes_from_at_bcp(T2, AT, 1);
   288     __ huswap(T2);
   289   } else {
   290     __ lbu(T2, at_bcp(1));
   291   }
   293   __ get_cpool_and_tags(T3, T1);
   295   const int base_offset = ConstantPool::header_size() * wordSize;
   296   const int tags_offset = Array<u1>::base_offset_in_bytes();
   298   // get type
   299   __ dadd(AT, T1, T2);
   300   __ lb(T1, AT, tags_offset);
   301   //now T1 is the tag
   303   // unresolved string - get the resolved string
   304   /*__ daddiu(AT, T1, - JVM_CONSTANT_UnresolvedString);
   305   __ beq(AT, R0, call_ldc);
   306   __ delayed()->nop();*/
   308   // unresolved class - get the resolved class
   309   __ daddiu(AT, T1, - JVM_CONSTANT_UnresolvedClass);
   310   __ beq(AT, R0, call_ldc);
   311   __ delayed()->nop();
   313   // unresolved class in error (resolution failed) - call into runtime
   314   // so that the same error from first resolution attempt is thrown.
   315   __ daddiu(AT, T1, -JVM_CONSTANT_UnresolvedClassInError); 
   316   __ beq(AT, R0, call_ldc);
   317   __ delayed()->nop();
   319   // resolved class - need to call vm to get java mirror of the class
   320   __ daddiu(AT, T1, - JVM_CONSTANT_Class);
   321   __ bne(AT, R0, notClass);
   322   __ delayed()->dsll(T2, T2, Address::times_8);
   324   __ bind(call_ldc);
   326   __ move(A1, wide);
   327   call_VM(FSR, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), A1);
   328   //	__ sw(FSR, SP, - 1 * wordSize);
   329   __ push(atos);	
   330   __ b(Done);
   331   //	__ delayed()->daddi(SP, SP, - 1 * wordSize);
   332   __ delayed()->nop();
   333   __ bind(notClass);
   335   __ daddiu(AT, T1, -JVM_CONSTANT_Float);
   336   __ bne(AT, R0, notFloat);
   337   __ delayed()->nop();
   338   // ftos
   339   __ dadd(AT, T3, T2);
   340   __ lwc1(FSF, AT, base_offset);
   341   __ push_f();
   342   __ b(Done);
   343   __ delayed()->nop();
   345   __ bind(notFloat);
   346 #ifdef ASSERT
   347   { 
   348     Label L;
   349     __ daddiu(AT, T1, -JVM_CONSTANT_Integer);
   350     __ beq(AT, R0, L);
   351     __ delayed()->nop();
   352     __ stop("unexpected tag type in ldc");
   353     __ bind(L);
   354   }
   355 #endif
   356   // atos and itos
   357   __ dadd(T0, T3, T2);
   358   __ lw(FSR, T0, base_offset);
   359   __ push(itos);
   360   __ b(Done);
   361   __ delayed()->nop(); 
   364   if (VerifyOops) {
   365     __ verify_oop(FSR);
   366   }
   368   __ bind(Done);
   369 }
   371 // Fast path for caching oop constants.
   372 void TemplateTable::fast_aldc(bool wide) {
   373   transition(vtos, atos);
   375   Register result = FSR;
   376   Register tmp = SSR;
   377   int index_size = wide ? sizeof(u2) : sizeof(u1);
   379   Label resolved;
   380  // We are resolved if the resolved reference cache entry contains a
   381  // non-null object (String, MethodType, etc.)
   382   assert_different_registers(result, tmp);
   383   __ get_cache_index_at_bcp(tmp, 1, index_size);
   384   __ load_resolved_reference_at_index(result, tmp);
   385   __ bne(result, R0, resolved);
   386   __ delayed()->nop();
   388   address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
   389   // first time invocation - must resolve first
   390   int i = (int)bytecode();
   391   __ move(tmp, i);
   392   __ call_VM(result, entry, tmp);
   394   __ bind(resolved);
   396   if (VerifyOops) {
   397     __ verify_oop(result);
   398   }
   399 }
   402 // used register: T2, T3, T1
   403 // T2 : index
   404 // T3 : cpool
   405 // T1 : tag
   406 void TemplateTable::ldc2_w() {
   407   transition(vtos, vtos);
   408   Label Long, Done;
   410   // get index in cpool
   411   __ load_two_bytes_from_at_bcp(T2, AT, 1);
   412   __ huswap(T2);
   414   __ get_cpool_and_tags(T3, T1);
   416   const int base_offset = ConstantPool::header_size() * wordSize;
   417   const int tags_offset = Array<u1>::base_offset_in_bytes();
   419   // get type in T1
   420   __ dadd(AT, T1, T2);
   421   __ lb(T1, AT, tags_offset);
   423   __ daddiu(AT, T1, - JVM_CONSTANT_Double);
   424   __ bne(AT, R0, Long);
   425   __ delayed()->dsll(T2, T2, Address::times_8);
   426   // dtos	
   427   __ daddu(AT, T3, T2);
   428   __ ldc1(FSF, AT, base_offset + 0 * wordSize);
   429   __ sdc1(FSF, SP, - 2 * wordSize);
   430   __ b(Done);
   431   __ delayed()->daddi(SP, SP, - 2 * wordSize);
   433   // ltos
   434   __ bind(Long);
   435   __ dadd(AT, T3, T2);	
   436   __ ld(FSR, AT, base_offset + 0 * wordSize);
   437   __ push(ltos);
   439   __ bind(Done);
   440 }
   442 // we compute the actual local variable address here
   443 // the x86 dont do so for it has scaled index memory access model, we dont have, so do here
   444 void TemplateTable::locals_index(Register reg, int offset) {
   445   __ lbu(reg, at_bcp(offset));
   446   __ dsll(reg, reg, Address::times_8);
   447   __ dsub(reg, LVP, reg);
   448 }
   450 // this method will do bytecode folding of the two form:
   451 // iload iload			iload caload
   452 // used register : T2, T3
   453 // T2 : bytecode
   454 // T3 : folded code
   455 void TemplateTable::iload() {
   456   transition(vtos, itos);
   457   if (RewriteFrequentPairs) { 
   458     Label rewrite, done;
   459     // get the next bytecode in T2
   460     __ lbu(T2, at_bcp(Bytecodes::length_for(Bytecodes::_iload)));
   461     // if _iload, wait to rewrite to iload2.  We only want to rewrite the
   462     // last two iloads in a pair.  Comparing against fast_iload means that
   463     // the next bytecode is neither an iload or a caload, and therefore
   464     // an iload pair.
   465     __ move(AT, Bytecodes::_iload);
   466     __ beq(AT, T2, done);
   467     __ delayed()->nop();
   469     __ move(T3, Bytecodes::_fast_iload2);
   470     __ move(AT, Bytecodes::_fast_iload);
   471     __ beq(AT, T2, rewrite);
   472     __ delayed()->nop();
   474     // if _caload, rewrite to fast_icaload
   475     __ move(T3, Bytecodes::_fast_icaload);
   476     __ move(AT, Bytecodes::_caload);
   477     __ beq(AT, T2, rewrite);
   478     __ delayed()->nop();
   480     // rewrite so iload doesn't check again.
   481     __ move(T3, Bytecodes::_fast_iload);
   483     // rewrite
   484     // T3 : fast bytecode
   485     __ bind(rewrite);
   486     patch_bytecode(Bytecodes::_iload, T3, T2, false);
   487     __ bind(done);
   488   }
   490   // Get the local value into tos
   491   locals_index(T2);
   492   __ lw(FSR, T2, 0);
   493 }
   495 // used register T2
   496 // T2 : index
   497 void TemplateTable::fast_iload2() {
   498 	transition(vtos, itos);
   499 	locals_index(T2);
   500 	__ lw(FSR, T2, 0);
   501 	__ push(itos);
   502 	locals_index(T2, 3);
   503 	__ lw(FSR, T2, 0);
   504 }
   506 // used register T2
   507 // T2 : index
   508 void TemplateTable::fast_iload() {
   509   transition(vtos, itos);
   510   locals_index(T2);
   511   __ lw(FSR, T2, 0);
   512 }
   514 // used register T2
   515 // T2 : index
   516 void TemplateTable::lload() {
   518   transition(vtos, ltos);
   519   locals_index(T2);
   520   __ ld(FSR, T2, -wordSize);
   521   __ ld(SSR, T2, 0);
   522 }
   524 // used register T2
   525 // T2 : index
   526 void TemplateTable::fload() {
   527   transition(vtos, ftos);
   528   locals_index(T2);
   529 //FIXME, aoqi. How should the high 32bits be when store a single float into a 64bits register. 
   530   //__ mtc1(R0, FSF);
   531   __ lwc1(FSF, T2, 0);
   532 }
   534 // used register T2
   535 // T2 : index
   536 void TemplateTable::dload() {
   538   transition(vtos, dtos);
   539   locals_index(T2);
   540 /*  if (TaggedStackInterpreter) {
   541     // Get double out of locals array, onto temp stack and load with
   542     // float instruction into ST0
   543     __ dsll(AT,T2,Interpreter::stackElementScale());
   544     __ dadd(AT, LVP, AT);
   545     __ ldc1(FSF, AT, Interpreter::local_offset_in_bytes(1)); 
   546   } else {*/
   547     __ ldc1(FSF, T2, -wordSize);
   548     __ ldc1(SSF, T2, 0);
   549  // }
   550 }
   552 // used register T2
   553 // T2 : index
   554 void TemplateTable::aload() 
   555 {
   556   transition(vtos, atos);
   557   locals_index(T2);
   558   __ ld(FSR, T2, 0);
   559 }
   561 void TemplateTable::locals_index_wide(Register reg) {
   562   __ load_two_bytes_from_at_bcp(reg, AT, 2);
   563   __ huswap(reg);
   564   __ dsll(reg, reg, Address::times_8);
   565   __ dsub(reg, LVP, reg);
   566 }
   568 // used register T2
   569 // T2 : index
   570 void TemplateTable::wide_iload() {
   571 	transition(vtos, itos);
   572 	locals_index_wide(T2);
   573 	__ ld(FSR, T2, 0);
   574 }
   576 // used register T2
   577 // T2 : index
   578 void TemplateTable::wide_lload() {
   579 	transition(vtos, ltos);
   580 	locals_index_wide(T2);
   581 	__ ld(FSR, T2, -4);
   582 }
   584 // used register T2
   585 // T2 : index
   586 void TemplateTable::wide_fload() {
   587 	transition(vtos, ftos);
   588 	locals_index_wide(T2);
   589 	__ lwc1(FSF, T2, 0);
   590 }
   592 // used register T2
   593 // T2 : index
   594 void TemplateTable::wide_dload() {
   595 	transition(vtos, dtos);
   596 	locals_index_wide(T2);
   597 /*	if (TaggedStackInterpreter) {
   598 		// Get double out of locals array, onto temp stack and load with
   599 		// float instruction into ST0
   600 		//   __ movl(eax, laddress(ebx));
   601 		//  __ movl(edx, haddress(ebx));
   602 		__ dsll(AT,T2,Interpreter::stackElementScale());
   603 		__ dadd(AT, LVP, AT);
   604 		__ ldc1(FSF, AT, Interpreter::local_offset_in_bytes(1)); 
   606 		//  __ pushl(edx);  // push hi first
   607 		//  __ pushl(eax);
   608 		//  __ fld_d(Address(esp));
   609 		//  __ addl(esp, 2*wordSize);
   610 	} else {*/
   611 		__ ldc1(FSF, T2, -4);
   612 	//}
   613 }
   615 // used register T2
   616 // T2 : index
   617 void TemplateTable::wide_aload() {
   618 	transition(vtos, atos);
   619 	locals_index_wide(T2);
   620 	__ ld(FSR, T2, 0);
   621 }
   623 // we use A2 as the regiser for index, BE CAREFUL!
   624 // we dont use our tge 29 now, for later optimization
   625 void TemplateTable::index_check(Register array, Register index) {
   626   // Pop ptr into array
   627   __ pop_ptr(array);
   628   index_check_without_pop(array, index);
   629 }
   631 void TemplateTable::index_check_without_pop(Register array, Register index) {
   632   // destroys ebx
   633   // check array
   634   __ null_check(array, arrayOopDesc::length_offset_in_bytes());
   636   // check index
   637   Label ok;
   638   __ lw(AT, array, arrayOopDesc::length_offset_in_bytes());
   639 #ifndef OPT_RANGECHECK
   640   __ sltu(AT, index, AT);
   641   __ bne(AT, R0, ok);
   642   __ delayed()->nop(); 
   644   //throw_ArrayIndexOutOfBoundsException assume abberrant index in A2
   645   if (A2 != index) __ move(A2, index);		
   646   __ jmp(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry);
   647   __ delayed()->nop();
   648   __ bind(ok);
   649 #else
   650   __ lw(AT, array, arrayOopDesc::length_offset_in_bytes());
   651   __ move(A2, index);
   652   __ tgeu(A2, AT, 29);
   653 #endif
   654 }
   656 void TemplateTable::iaload() {
   657   transition(itos, itos);
   658   //  __ pop(SSR);
   659   index_check(SSR, FSR);
   660   __ dsll(FSR, FSR, 2);
   661   __ dadd(FSR, SSR, FSR);
   662   //FSR: index
   663   __ lw(FSR, FSR, arrayOopDesc::base_offset_in_bytes(T_INT));
   664 }
   667 void TemplateTable::laload() {
   668   transition(itos, ltos);
   669   //  __ pop(SSR);
   670   index_check(SSR, FSR);
   671   __ dsll(AT, FSR, Address::times_8);
   672   __ dadd(AT, SSR, AT);
   673   __ ld(FSR, AT, arrayOopDesc::base_offset_in_bytes(T_LONG) + 0 * wordSize);
   674 }
   676 void TemplateTable::faload() {
   677 	transition(itos, ftos);
   678 	// __ pop(SSR);
   679 	index_check(SSR, FSR);  
   680 	__ shl(FSR, 2);
   681 	__ dadd(FSR, SSR, FSR);
   682 	__ lwc1(FSF, FSR, arrayOopDesc::base_offset_in_bytes(T_FLOAT));
   683 }
   685 void TemplateTable::daload() {
   686 	transition(itos, dtos);
   687 	//__ pop(SSR);
   688 	index_check(SSR, FSR);  
   689 	__ dsll(AT, FSR, 3);
   690 	__ dadd(AT, SSR, AT);
   691 	__ ldc1(FSF, AT, arrayOopDesc::base_offset_in_bytes(T_DOUBLE) + 0 * wordSize);
   692 }
   694 void TemplateTable::aaload() {
   695   transition(itos, atos);
   696   //__ pop(SSR);
   697   index_check(SSR, FSR);
   698   __ dsll(FSR, FSR, UseCompressedOops ? Address::times_4 : Address::times_8);
   699   __ dadd(FSR, SSR, FSR);
   700   //add for compressedoops
   701   __ load_heap_oop(FSR, Address(FSR, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
   702 }
   704 void TemplateTable::baload() {
   705   transition(itos, itos);
   706   //__ pop(SSR);
   707   index_check(SSR, FSR); 
   708   __ dadd(FSR, SSR, FSR);
   709   __ lb(FSR, FSR, arrayOopDesc::base_offset_in_bytes(T_BYTE));
   710 }
   712 void TemplateTable::caload() {
   713   transition(itos, itos);
   714   // __ pop(SSR);
   715   index_check(SSR, FSR);
   716   __ dsll(FSR, FSR, Address::times_2);
   717   __ dadd(FSR, SSR, FSR);
   718   __ lhu(FSR, FSR,  arrayOopDesc::base_offset_in_bytes(T_CHAR));
   719 }
   721 // iload followed by caload frequent pair
   722 // used register : T2
   723 // T2 : index
   724 void TemplateTable::fast_icaload() {
   725   transition(vtos, itos);
   726   // load index out of locals
   727   locals_index(T2);
   728   __ lw(FSR, T2, 0);
   729   //	__ pop(SSR);
   730   index_check(SSR, FSR);
   731   __ dsll(FSR, FSR, 1);
   732   __ dadd(FSR, SSR, FSR);
   733   __ lhu(FSR, FSR,  arrayOopDesc::base_offset_in_bytes(T_CHAR));
   734 }
   736 void TemplateTable::saload() {
   737   transition(itos, itos);
   738   // __ pop(SSR);
   739   index_check(SSR, FSR);  
   740   __ dsll(FSR, FSR, Address::times_2);
   741   __ dadd(FSR, SSR, FSR);
   742   __ lh(FSR, FSR,  arrayOopDesc::base_offset_in_bytes(T_SHORT));
   743 }
   745 void TemplateTable::iload(int n) {
   746 	transition(vtos, itos);
   747 	__ lw(FSR, iaddress(n));
   748 }
   750 void TemplateTable::lload(int n) {
   751 	transition(vtos, ltos);
   752 	__ ld(FSR, laddress(n));
   753 }
   755 void TemplateTable::fload(int n) {
   756   transition(vtos, ftos);
   757   //__ mtc1(R0, FSF);
   758   __ lwc1(FSF, faddress(n));
   759 }
   760 //FIXME here
   761 void TemplateTable::dload(int n) {
   762 	transition(vtos, dtos);
   763 	__ ldc1(FSF, laddress(n));
   764 }
   766 void TemplateTable::aload(int n) {
   767   transition(vtos, atos);
   768   __ ld(FSR, aaddress(n));
   769 }
   771 // used register : T2, T3
   772 // T2 : bytecode
   773 // T3 : folded code
   774 void TemplateTable::aload_0() {
   775 	transition(vtos, atos);
   776 	// According to bytecode histograms, the pairs:
   777 	//
   778 	// _aload_0, _fast_igetfield
   779 	// _aload_0, _fast_agetfield
   780 	// _aload_0, _fast_fgetfield
   781 	//
   782 	// occur frequently. If RewriteFrequentPairs is set, the (slow) _aload_0
   783 	// bytecode checks if the next bytecode is either _fast_igetfield, 
   784 	// _fast_agetfield or _fast_fgetfield and then rewrites the
   785 	// current bytecode into a pair bytecode; otherwise it rewrites the current
   786 	// bytecode into _fast_aload_0 that doesn't do the pair check anymore.
   787 	//
   788 	// Note: If the next bytecode is _getfield, the rewrite must be delayed,
   789 	//       otherwise we may miss an opportunity for a pair.
   790 	//
   791 	// Also rewrite frequent pairs
   792 	//   aload_0, aload_1
   793 	//   aload_0, iload_1
   794 	// These bytecodes with a small amount of code are most profitable to rewrite
   795 	if (RewriteFrequentPairs) {
   796 		Label rewrite, done;
   797 		// get the next bytecode in T2
   798 		__ lbu(T2, at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)));
   800 		// do actual aload_0
   801 		aload(0);
   803 		// if _getfield then wait with rewrite
   804 		__ move(AT, Bytecodes::_getfield);
   805 		__ beq(AT, T2, done);
   806 		__ delayed()->nop();
   808 		// if _igetfield then reqrite to _fast_iaccess_0
   809 		assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == 
   810 				Bytecodes::_aload_0, "fix bytecode definition");
   811 		__ move(T3, Bytecodes::_fast_iaccess_0);
   812 		__ move(AT, Bytecodes::_fast_igetfield);
   813 		__ beq(AT, T2, rewrite);
   814 		__ delayed()->nop();
   816 		// if _agetfield then reqrite to _fast_aaccess_0
   817 		assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == 
   818 				Bytecodes::_aload_0, "fix bytecode definition");
   819 		__ move(T3, Bytecodes::_fast_aaccess_0);
   820 		__ move(AT, Bytecodes::_fast_agetfield);
   821 		__ beq(AT, T2, rewrite);
   822 		__ delayed()->nop();
   824 		// if _fgetfield then reqrite to _fast_faccess_0
   825 		assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == 
   826 				Bytecodes::_aload_0, "fix bytecode definition");
   827 		__ move(T3, Bytecodes::_fast_faccess_0);
   828 		__ move(AT, Bytecodes::_fast_fgetfield);
   829 		__ beq(AT, T2, rewrite);
   830 		__ delayed()->nop();
   832 		// else rewrite to _fast_aload0
   833 		assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == 
   834 				Bytecodes::_aload_0, "fix bytecode definition");
   835 		__ move(T3, Bytecodes::_fast_aload_0);
   837 		// rewrite
   838 		__ bind(rewrite);
   839 		patch_bytecode(Bytecodes::_aload_0, T3, T2, false);
   841 		__ bind(done);
   842 	} else {
   843 		aload(0);
   844 	}
   845 }
   847 void TemplateTable::istore() {
   848 	transition(itos, vtos);
   849 	locals_index(T2);
   850 	__ sw(FSR, T2, 0);
   851 }
   853 void TemplateTable::lstore() {
   854   transition(ltos, vtos);
   855   locals_index(T2);
   856   __ sd(FSR, T2, -wordSize);
   857 }
   859 void TemplateTable::fstore() {
   860 	transition(ftos, vtos);
   861 	locals_index(T2);
   862 	__ swc1(FSF, T2, 0);
   863 }
   865 void TemplateTable::dstore() {
   866   transition(dtos, vtos);
   867   locals_index(T2);
   868   __ sdc1(FSF, T2, -wordSize);
   869 }
   871 void TemplateTable::astore() {
   872   transition(vtos, vtos);
   873   //  __ pop(FSR);
   874   __ pop_ptr(FSR);
   875   locals_index(T2);
   876   __ sd(FSR, T2, 0);
   877 }
   879 void TemplateTable::wide_istore() {
   880 	transition(vtos, vtos);
   881 	//  __ pop(FSR);
   882 	__ pop_i(FSR);
   883 	locals_index_wide(T2);
   884 	__ sd(FSR, T2, 0);
   885 }
   887 void TemplateTable::wide_lstore() {
   888 	transition(vtos, vtos);
   889 	//__ pop2(FSR, SSR);
   890 	//__ pop_l(FSR, SSR); 
   891 	__ pop_l(FSR); //aoqi:FIXME Is this right?
   892 	locals_index_wide(T2);
   893 	__ sd(FSR, T2, -4);
   894 }
   896 void TemplateTable::wide_fstore() {
   897 	wide_istore();
   898 }
   900 void TemplateTable::wide_dstore() {
   901 	wide_lstore();
   902 }
   904 void TemplateTable::wide_astore() {
   905 	transition(vtos, vtos);
   906 	__ pop_ptr(FSR);
   907 	locals_index_wide(T2);
   908 	__ sd(FSR, T2, 0);
   909 }
   911 // used register : T2
   912 void TemplateTable::iastore() {
   913   transition(itos, vtos);
   914   __ pop_i(SSR);
   915   index_check(T2, SSR);  // prefer index in ebx
   916   __ dsll(SSR, SSR, Address::times_4);
   917   __ dadd(T2, T2, SSR);
   918   __ sw(FSR, T2, arrayOopDesc::base_offset_in_bytes(T_INT));
   919 }
   923 // used register T2, T3
   924 void TemplateTable::lastore() {
   925   transition(ltos, vtos);
   926   __ pop_i (T2);
   927   index_check(T3, T2);
   928   __ dsll(T2, T2, Address::times_8);
   929   __ dadd(T3, T3, T2);
   930   __ sd(FSR, T3, arrayOopDesc::base_offset_in_bytes(T_LONG) + 0 * wordSize);
   931 }
   933 // used register T2
   934 void TemplateTable::fastore() {
   935   transition(ftos, vtos);
   936   __ pop_i(SSR);	
   937   index_check(T2, SSR); 
   938   __ dsll(SSR, SSR, Address::times_4);
   939   __ dadd(T2, T2, SSR);
   940   __ swc1(FSF, T2, arrayOopDesc::base_offset_in_bytes(T_FLOAT));
   941 }
   943 // used register T2, T3
   944 void TemplateTable::dastore() {
   945   transition(dtos, vtos);
   946   __ pop_i (T2); 
   947   index_check(T3, T2);  
   948   __ dsll(T2, T2, Address::times_8);
   949   __ daddu(T3, T3, T2);
   950   __ sdc1(FSF, T3, arrayOopDesc::base_offset_in_bytes(T_DOUBLE) + 0 * wordSize);
   952 }
   954 // used register : T2, T3, T8
   955 // T2 : array
   956 // T3 : subklass
   957 // T8 : supklass
   958 void TemplateTable::aastore() {
   959   Label is_null, ok_is_subtype, done;
   960   transition(vtos, vtos);
   961   // stack: ..., array, index, value
   962   __ ld(FSR, at_tos());     // Value
   963   __ lw(SSR, at_tos_p1());  // Index
   964   __ ld(T2, at_tos_p2());  // Array
   966   // index_check(T2, SSR);
   967   index_check_without_pop(T2, SSR);
   968   // do array store check - check for NULL value first
   969   __ beq(FSR, R0, is_null);
   970   __ delayed()->nop();
   972   // Move subklass into T3
   973   //__ ld(T3,  Address(FSR, oopDesc::klass_offset_in_bytes()));
   974   //add for compressedoops
   975   __ load_klass(T3, FSR);
   976   // Move superklass into T8
   977   //__ ld(T8, Address(T2, oopDesc::klass_offset_in_bytes()));
   978   //add for compressedoops
   979   __ load_klass(T8, T2);
   980   __ ld(T8, Address(T8,  ObjArrayKlass::element_klass_offset()));
   981   // Compress array+index*4+12 into a single register. T2
   982   __ dsll(AT, SSR, UseCompressedOops? Address::times_4 : Address::times_8);
   983   __ dadd(T2, T2, AT);
   984   __ daddi(T2, T2, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
   986   // Generate subtype check.
   987   // Superklass in T8.  Subklass in T3.
   988   __ gen_subtype_check(T8, T3, ok_is_subtype);				// <-- Jin
   989   // Come here on failure
   990   // object is at FSR
   991   __ jmp(Interpreter::_throw_ArrayStoreException_entry);    // <-- Jin
   992   __ delayed()->nop();
   993   // Come here on success
   994   __ bind(ok_is_subtype);
   995   //replace with do_oop_store->store_heap_oop
   996   //__ sd(FSR, T2, 0);
   997   __ store_heap_oop(Address(T2, 0), FSR);					// <-- Jin
   998   __ store_check(T2);
   999   __ b(done);
  1000   __ delayed()->nop();
  1002   // Have a NULL in FSR, EDX=T2, SSR=index.  Store NULL at ary[idx]
  1003   __ bind(is_null);
  1004   __ profile_null_seen(T9);
  1005   __ dsll(AT, SSR, UseCompressedOops? Address::times_4 : Address::times_8);
  1006   __ dadd(T2, T2, AT);
  1007   //__ sd(FSR, T2, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
  1008   __ store_heap_oop(Address(T2, arrayOopDesc::base_offset_in_bytes(T_OBJECT)), FSR);	/* FSR is null here */
  1010   __ bind(done);
  1011   __ daddi(SP, SP, 3 * Interpreter::stackElementSize);
  1014 void TemplateTable::bastore() {
  1015   transition(itos, vtos);
  1016   __ pop_i (SSR); 
  1017   index_check(T2, SSR);
  1018   __ dadd(SSR, T2, SSR);
  1019   __ sb(FSR, SSR, arrayOopDesc::base_offset_in_bytes(T_BYTE));
  1022 void TemplateTable::castore() {
  1023   transition(itos, vtos);
  1024   __ pop_i(SSR); 
  1025   index_check(T2, SSR); 
  1026   __ dsll(SSR, SSR, Address::times_2);
  1027   __ dadd(SSR, T2, SSR);
  1028   __ sh(FSR, SSR, arrayOopDesc::base_offset_in_bytes(T_CHAR));
  1031 void TemplateTable::sastore() {
  1032   castore();
  1035 void TemplateTable::istore(int n) {
  1036   transition(itos, vtos);
  1037   __ sw(FSR, iaddress(n));
  1040 void TemplateTable::lstore(int n) {
  1041   transition(ltos, vtos);
  1042   __ sd(FSR, laddress(n));
  1045 void TemplateTable::fstore(int n) {
  1046   transition(ftos, vtos);
  1047   __ swc1(FSF, faddress(n));
  1050 void TemplateTable::dstore(int n) {
  1051   transition(dtos, vtos);
  1052   __ sdc1(FSF, laddress(n));
  1055 void TemplateTable::astore(int n) {
  1056   transition(vtos, vtos);
  1057   __ pop_ptr(FSR);
  1058   __ sd(FSR, aaddress(n));
  1061 void TemplateTable::pop() {
  1062   transition(vtos, vtos);
  1063   __ daddi(SP, SP, Interpreter::stackElementSize);
  1066 void TemplateTable::pop2() {
  1067   transition(vtos, vtos);
  1068   __ daddi(SP, SP, 2 * Interpreter::stackElementSize);
  1071 void TemplateTable::dup() {
  1072   transition(vtos, vtos);
  1073   // stack: ..., a
  1074   __ load_ptr(0, FSR);
  1075   __ push_ptr(FSR);
  1076   // stack: ..., a, a
  1079 // blows FSR
  1080 void TemplateTable::dup_x1() {
  1081 	transition(vtos, vtos);
  1082 	// stack: ..., a, b
  1083 	__ load_ptr(0, FSR);  // load b
  1084 	__ load_ptr(1, A5);  // load a
  1085 	__ store_ptr(1, FSR); // store b
  1086 	__ store_ptr(0, A5); // store a
  1087 	__ push_ptr(FSR);             // push b
  1088 	// stack: ..., b, a, b
  1091 // blows FSR
  1092 void TemplateTable::dup_x2() {
  1093 	transition(vtos, vtos);
  1094 	// stack: ..., a, b, c
  1095 	__ load_ptr(0, FSR);  // load c
  1096 	__ load_ptr(2, A5);  // load a
  1097 	__ store_ptr(2, FSR); // store c in a
  1098 	__ push_ptr(FSR);             // push c
  1099 	// stack: ..., c, b, c, c
  1100 	__ load_ptr(2, FSR);  // load b
  1101 	__ store_ptr(2, A5); // store a in b
  1102 	// stack: ..., c, a, c, c
  1103 	__ store_ptr(1, FSR); // store b in c
  1104 	// stack: ..., c, a, b, c
  1107 // blows FSR
  1108 void TemplateTable::dup2() {
  1109 	transition(vtos, vtos);
  1110 	// stack: ..., a, b
  1111 	__ load_ptr(1, FSR);  // load a
  1112 	__ push_ptr(FSR);             // push a
  1113 	__ load_ptr(1, FSR);  // load b
  1114 	__ push_ptr(FSR);             // push b
  1115 	// stack: ..., a, b, a, b
  1118 // blows FSR
  1119 void TemplateTable::dup2_x1() {
  1120 	transition(vtos, vtos);
  1121 	// stack: ..., a, b, c
  1122 	__ load_ptr(0, T2);  // load c
  1123 	__ load_ptr(1, FSR);  // load b
  1124 	__ push_ptr(FSR);             // push b
  1125 	__ push_ptr(T2);             // push c
  1126 	// stack: ..., a, b, c, b, c
  1127 	__ store_ptr(3, T2); // store c in b
  1128 	// stack: ..., a, c, c, b, c
  1129 	__ load_ptr(4, T2);  // load a
  1130 	__ store_ptr(2, T2); // store a in 2nd c
  1131 	// stack: ..., a, c, a, b, c
  1132 	__ store_ptr(4, FSR); // store b in a
  1133 	// stack: ..., b, c, a, b, c
  1135 	// stack: ..., b, c, a, b, c
  1138 // blows FSR, SSR
  1139 void TemplateTable::dup2_x2() {
  1140 	transition(vtos, vtos);
  1141 	// stack: ..., a, b, c, d
  1142 	// stack: ..., a, b, c, d
  1143 	__ load_ptr(0, T2);  // load d
  1144 	__ load_ptr(1, FSR);  // load c
  1145 	__ push_ptr(FSR);             // push c
  1146 	__ push_ptr(T2);             // push d
  1147 	// stack: ..., a, b, c, d, c, d
  1148 	__ load_ptr(4, FSR);  // load b
  1149 	__ store_ptr(2, FSR); // store b in d
  1150 	__ store_ptr(4, T2); // store d in b
  1151 	// stack: ..., a, d, c, b, c, d
  1152 	__ load_ptr(5, T2);  // load a
  1153 	__ load_ptr(3, FSR);  // load c
  1154 	__ store_ptr(3, T2); // store a in c
  1155 	__ store_ptr(5, FSR); // store c in a
  1156 	// stack: ..., c, d, a, b, c, d
  1158 	// stack: ..., c, d, a, b, c, d
  1161 // blows FSR
  1162 void TemplateTable::swap() {
  1163 	transition(vtos, vtos);
  1164 	// stack: ..., a, b
  1166 	__ load_ptr(1, A5);  // load a
  1167 	__ load_ptr(0, FSR);  // load b
  1168 	__ store_ptr(0, A5); // store a in b
  1169 	__ store_ptr(1, FSR); // store b in a
  1171 	// stack: ..., b, a
  1174 void TemplateTable::iop2(Operation op) {
  1175 	transition(itos, itos);
  1176 	switch (op) {
  1177 		case add  :                    
  1178 			__ pop_i(SSR); 
  1179 			__ addu32(FSR, SSR, FSR); 
  1180 			break;
  1181 		case sub  :  
  1182 			__ pop_i(SSR); 
  1183 			__ subu32(FSR, SSR, FSR); 
  1184 			break;
  1185 		case mul  :                    
  1186 			__ lw(SSR, SP, 0);
  1187 			__ mult(SSR, FSR);
  1188 			__ daddi(SP, SP, wordSize);
  1189 			__ nop();
  1190 			__ mflo(FSR);
  1191 			break;
  1192 		case _and :                    
  1193 			__ pop_i(SSR); 
  1194 			__ andr(FSR, SSR, FSR); 
  1195 			break;
  1196 		case _or  :                    
  1197 			__ pop_i(SSR); 
  1198 			__ orr(FSR, SSR, FSR); 
  1199 			break;
  1200 		case _xor :                    
  1201 			__ pop_i(SSR); 
  1202 			__ xorr(FSR, SSR, FSR); 
  1203 			break;
  1204 		case shl  : 
  1205 			__ pop_i(SSR); 
  1206 			__ sllv(FSR, SSR, FSR);      
  1207 			break; // implicit masking of lower 5 bits by Intel shift instr. mips also
  1208 		case shr  : 
  1209 			__ pop_i(SSR); 
  1210 			__ srav(FSR, SSR, FSR);      
  1211 			break; // implicit masking of lower 5 bits by Intel shift instr. mips also
  1212 		case ushr : 
  1213 			__ pop_i(SSR); 
  1214 			__ srlv(FSR, SSR, FSR);     
  1215 			break; // implicit masking of lower 5 bits by Intel shift instr. mips also
  1216 		default   : ShouldNotReachHere();
  1220 // the result stored in FSR, SSR,
  1221 // used registers : T2, T3
  1222 //FIXME, aoqi
  1223 void TemplateTable::lop2(Operation op) {
  1224   transition(ltos, ltos);
  1225   //__ pop2(T2, T3);
  1226   __ pop_l(T2, T3);
  1227 #ifdef ASSERT
  1229     Label  L;
  1230     __ beq(T3, R0, L);
  1231     __ delayed()->nop();
  1232     // FIXME: stack verification required
  1233 //    __ stop("lop2, wrong stack");  // <--- Fu 20130930
  1234     __ bind(L);
  1236 #endif
  1237   switch (op) {
  1238     case add : 
  1239       __ daddu(FSR, T2, FSR);
  1240       //__ sltu(AT, FSR, T2);
  1241       //__ daddu(SSR, T3, SSR);
  1242       //__ daddu(SSR, SSR, AT); 
  1243       break;
  1244     case sub :
  1245       __ dsubu(FSR, T2, FSR);
  1246       //__ sltu(AT, T2, FSR);
  1247       //__ dsubu(SSR, T3, SSR);
  1248       //__ dsubu(SSR, SSR, AT);
  1249       break;
  1250     case _and: 
  1251       __ andr(FSR, T2, FSR); 
  1252       //__ andr(SSR, T3, SSR); 
  1253       break;
  1254     case _or : 
  1255       __ orr(FSR, T2, FSR); 
  1256       //__ orr(SSR, T3, SSR); 
  1257       break;
  1258     case _xor: 
  1259       __ xorr(FSR, T2, FSR); 
  1260       //__ xorr(SSR, T3, SSR); 
  1261       break;
  1262     default : ShouldNotReachHere();
  1266 // java require this bytecode could handle 0x80000000/-1, dont cause a overflow exception, 
  1267 // the result is 0x80000000
  1268 // the godson2 cpu do the same, so we need not handle this specially like x86
  1269 void TemplateTable::idiv() {
  1270 	transition(itos, itos);
  1271 	Label not_zero;
  1272 	//__ pop(SSR);
  1273 	__ pop_i(SSR);
  1274 	__ div(SSR, FSR);
  1276 	__ bne(FSR, R0, not_zero);
  1277 	__ delayed()->nop();
  1278 	//__ brk(7);
  1279 	__ jmp(Interpreter::_throw_ArithmeticException_entry); 
  1280 	__ delayed()->nop();
  1282 	__ bind(not_zero);
  1283 	__ mflo(FSR);
  1286 void TemplateTable::irem() {
  1287 	transition(itos, itos);
  1288 	Label not_zero;
  1289 	//__ pop(SSR);
  1290 	__ pop_i(SSR);
  1291 	__ div(SSR, FSR);
  1293 	__ bne(FSR, R0, not_zero);
  1294 	__ delayed()->nop();
  1295 	//__ brk(7);
  1296 	__ jmp(Interpreter::_throw_ArithmeticException_entry);
  1297 	__ delayed()->nop();
  1299 	__ bind(not_zero);
  1300 	__ mfhi(FSR);
  1303 // the multiplier in SSR||FSR, the multiplicand in stack
  1304 // the result in SSR||FSR
  1305 // used registers : T2, T3
  1306 void TemplateTable::lmul() {
  1307   transition(ltos, ltos);
  1308   Label done;
  1310   __ pop_l(T2, T3);
  1311 #ifdef ASSERT
  1313     Label  L;
  1314     __ orr(AT, T3, SSR);
  1315     __ beq(AT, R0, L);
  1316     __ delayed()->nop();
  1317     //FIXME, aoqi
  1318     //__ stop("lmul, wrong stack");
  1319     __ bind(L);
  1321 #endif
  1322   __ orr(AT, T2, FSR);
  1323   __ beq(AT, R0, done);
  1324   __ delayed()->nop();
  1326   __ dmultu(T2, FSR);
  1327   __ daddu(SSR, SSR, T3);
  1328   __ nop();
  1329   __ mflo(FSR);
  1330   __ mfhi(SSR);
  1331   __ b(done);
  1332   __ delayed()->nop();
  1334   __ bind(done);
  1337 // NOTE: i DONT use the Interpreter::_throw_ArithmeticException_entry
  1338 void TemplateTable::ldiv() {
  1339   transition(ltos, ltos);
  1340   Label normal;
  1342   __ bne(FSR, R0, normal);
  1343   __ delayed()->nop();
  1345   //__ brk(7);		//generate FPE
  1346   __ jmp(Interpreter::_throw_ArithmeticException_entry);
  1347   __ delayed()->nop();
  1349   __ bind(normal);
  1350   __ move(A1, FSR);
  1351   __ pop_l(A2, A3); 
  1352   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::ldiv), A1, A2);
  1355 // NOTE: i DONT use the Interpreter::_throw_ArithmeticException_entry
  1356 void TemplateTable::lrem() {
  1357   transition(ltos, ltos);
  1358   Label normal;
  1360   __ bne(FSR, R0, normal);
  1361   __ delayed()->nop();
  1363   __ jmp(Interpreter::_throw_ArithmeticException_entry);
  1364   __ delayed()->nop();
  1366   __ bind(normal);
  1367   __ move(A1, FSR);
  1368   __ pop_l (A2, A3); 
  1369   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::lrem), A1, A2);
  1372 // result in FSR
  1373 // used registers : T0
  1374 void TemplateTable::lshl() {
  1375   transition(itos, ltos);
  1376   __ pop_l(T0, T1);	
  1377 #ifdef ASSERT
  1379     Label  L;
  1380     __ beq(T1, R0, L);
  1381     __ delayed()->nop();
  1382     //__ stop("lshl, wrong stack");  // <-- Fu 20130930 
  1383     __ bind(L);
  1385 #endif
  1386   __ andi(FSR, FSR, 0x3f);	      // the bit to be shifted
  1387   __ dsllv(FSR, T0, FSR);
  1390 // used registers : T0
  1391 void TemplateTable::lshr() {
  1392   transition(itos, ltos);
  1393   __ pop_l(T0, T1);	
  1394 #ifdef ASSERT
  1396     Label  L;
  1397     __ beq(T1, R0, L);
  1398     __ delayed()->nop();
  1399     __ stop("lshr, wrong stack");
  1400     __ bind(L);
  1402 #endif
  1403   __ andi(FSR, FSR, 0x3f);				// the bit to be shifted
  1404   __ dsrav(FSR, T0, FSR);
  1407 // used registers : T0
  1408 void TemplateTable::lushr() {
  1409   transition(itos, ltos);
  1410   __ pop_l(T0, T1);	
  1411 #ifdef ASSERT
  1413     Label  L;
  1414     __ beq(T1, R0, L);
  1415     __ delayed()->nop();
  1416     __ stop("lushr, wrong stack");
  1417     __ bind(L);
  1419 #endif
  1420   __ andi(FSR, FSR, 0x3f);				// the bit to be shifted
  1421   __ dsrlv(FSR, T0, FSR);
  1424 // result in FSF
  1425 void TemplateTable::fop2(Operation op) {
  1426 	transition(ftos, ftos);
  1427 	__ pop_ftos_to_esp();  // pop ftos into esp
  1428 	switch (op) {
  1429 		case add:
  1430 			__ lwc1(FTF, at_sp());
  1431 			__ add_s(FSF, FTF, FSF);
  1432 			break;
  1433 		case sub: 
  1434 			__ lwc1(FTF, at_sp());
  1435 			__ sub_s(FSF, FTF, FSF);
  1436 			break;
  1437 		case mul: 
  1438 			__ lwc1(FTF, at_sp());
  1439 			__ mul_s(FSF, FTF, FSF);
  1440 			break;
  1441 		case div: 
  1442 			__ lwc1(FTF, at_sp());
  1443 			__ div_s(FSF, FTF, FSF);
  1444 			break;
  1445 		case rem: 
  1446 			__ mfc1(FSR, FSF);
  1447 			__ mtc1(FSR, F12);
  1448 			__ lwc1(FTF, at_sp());
  1449 			__ rem_s(FSF, FTF, F12, FSF);
  1450 			break;
  1451 		default : ShouldNotReachHere();
  1454 	__ daddi(SP, SP, 1 * wordSize);
  1457 // result in SSF||FSF
  1458 // i dont handle the strict flags
  1459 void TemplateTable::dop2(Operation op) {
  1460 	transition(dtos, dtos);
  1461 	__ pop_dtos_to_esp();  // pop dtos into esp
  1462 	switch (op) {
  1463 		case add: 
  1464 			__ ldc1(FTF, at_sp());
  1465 			__ add_d(FSF, FTF, FSF);
  1466 			break;
  1467 		case sub: 
  1468 			__ ldc1(FTF, at_sp());
  1469 			__ sub_d(FSF, FTF, FSF);
  1470 			break;
  1471 		case mul: 
  1472 			__ ldc1(FTF, at_sp());
  1473 			__ mul_d(FSF, FTF, FSF);
  1474 			break;
  1475 		case div:
  1476 			__ ldc1(FTF, at_sp());
  1477 			__ div_d(FSF, FTF, FSF);
  1478 			break;
  1479 		case rem:
  1480 			__ dmfc1(FSR, FSF);
  1481 			__ dmtc1(FSR, F12);
  1482 			__ ldc1(FTF, at_sp());
  1483 			__ rem_d(FSF, FTF, F12, FSF);
  1484 			break;
  1485 		default : ShouldNotReachHere();
  1488 	__ daddi(SP, SP, 2 * wordSize);
  1491 void TemplateTable::ineg() {
  1492 	transition(itos, itos);
  1493 	__ neg(FSR);
  1496 void TemplateTable::lneg() {
  1497 	transition(ltos, ltos);
  1498 	__ dsubu(FSR, R0, FSR);
  1500 /*
  1501 // Note: 'double' and 'long long' have 32-bits alignment on x86.
  1502 static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) {
  1503   // Use the expression (adr)&(~0xF) to provide 128-bits aligned address
  1504   // of 128-bits operands for SSE instructions.
  1505   jlong *operand = (jlong*)(((intptr_t)adr)&((intptr_t)(~0xF)));
  1506   // Store the value to a 128-bits operand.
  1507   operand[0] = lo;
  1508   operand[1] = hi;
  1509   return operand;
  1512 // Buffer for 128-bits masks used by SSE instructions.
  1513 static jlong float_signflip_pool[2*2];
  1514 static jlong double_signflip_pool[2*2];
  1515 */
  1516 void TemplateTable::fneg() {
  1517 	transition(ftos, ftos);
  1518 	__ neg_s(FSF, FSF);
  1521 void TemplateTable::dneg() {
  1522 	transition(dtos, dtos);
  1523 	__ neg_d(FSF, FSF);
  1526 // used registers : T2
  1527 void TemplateTable::iinc() {
  1528 	transition(vtos, vtos);
  1529 	locals_index(T2);
  1530 	__ lw(FSR, T2, 0);
  1531 	__ lb(AT, at_bcp(2));           // get constant
  1532 	__ daddu(FSR, FSR, AT);
  1533 	__ sw(FSR, T2, 0);
  1536 // used register : T2
  1537 void TemplateTable::wide_iinc() {
  1538 	transition(vtos, vtos);
  1539 	locals_index_wide(T2);
  1540 	__ load_two_bytes_from_at_bcp(FSR, AT, 4);
  1541 	__ hswap(FSR);
  1542 	__ lw(AT, T2, 0);
  1543 	__ daddu(FSR, AT, FSR);
  1544 	__ sw(FSR, T2, 0);
  1547 void TemplateTable::convert() {
  1548   // Checking
  1549 #ifdef ASSERT
  1550   { TosState tos_in  = ilgl;
  1551     TosState tos_out = ilgl;
  1552     switch (bytecode()) {
  1553       case Bytecodes::_i2l: // fall through
  1554       case Bytecodes::_i2f: // fall through
  1555       case Bytecodes::_i2d: // fall through
  1556       case Bytecodes::_i2b: // fall through
  1557       case Bytecodes::_i2c: // fall through
  1558       case Bytecodes::_i2s: tos_in = itos; break;
  1559       case Bytecodes::_l2i: // fall through
  1560       case Bytecodes::_l2f: // fall through
  1561       case Bytecodes::_l2d: tos_in = ltos; break;
  1562       case Bytecodes::_f2i: // fall through
  1563       case Bytecodes::_f2l: // fall through
  1564       case Bytecodes::_f2d: tos_in = ftos; break;
  1565       case Bytecodes::_d2i: // fall through
  1566       case Bytecodes::_d2l: // fall through
  1567       case Bytecodes::_d2f: tos_in = dtos; break;
  1568       default             : ShouldNotReachHere();
  1570     switch (bytecode()) {
  1571       case Bytecodes::_l2i: // fall through
  1572       case Bytecodes::_f2i: // fall through
  1573       case Bytecodes::_d2i: // fall through
  1574       case Bytecodes::_i2b: // fall through
  1575       case Bytecodes::_i2c: // fall through
  1576       case Bytecodes::_i2s: tos_out = itos; break;
  1577       case Bytecodes::_i2l: // fall through
  1578       case Bytecodes::_f2l: // fall through
  1579       case Bytecodes::_d2l: tos_out = ltos; break;
  1580       case Bytecodes::_i2f: // fall through
  1581       case Bytecodes::_l2f: // fall through
  1582       case Bytecodes::_d2f: tos_out = ftos; break;
  1583       case Bytecodes::_i2d: // fall through
  1584       case Bytecodes::_l2d: // fall through
  1585       case Bytecodes::_f2d: tos_out = dtos; break;
  1586       default             : ShouldNotReachHere();
  1588     transition(tos_in, tos_out);
  1590 #endif // ASSERT
  1592   // Conversion
  1593   // (Note: use pushl(ecx)/popl(ecx) for 1/2-word stack-ptr manipulation)
  1594   switch (bytecode()) {
  1595     case Bytecodes::_i2l:
  1596       //__ extend_sign(SSR, FSR);
  1597       __ sll(FSR, FSR, 0);
  1598       break;
  1599     case Bytecodes::_i2f:
  1600       __ mtc1(FSR, FSF);
  1601       __ cvt_s_w(FSF, FSF);
  1602       break;
  1603     case Bytecodes::_i2d:
  1604       __ mtc1(FSR, FSF);
  1605       __ cvt_d_w(FSF, FSF);
  1606       break;
  1607     case Bytecodes::_i2b:
  1608       __ dsll32(FSR, FSR, 24);
  1609       __ dsra32(FSR, FSR, 24);
  1610       break;
  1611     case Bytecodes::_i2c:
  1612       __ andi(FSR, FSR, 0xFFFF);  // truncate upper 56 bits
  1613       break;
  1614     case Bytecodes::_i2s:
  1615       __ dsll32(FSR, FSR, 16);
  1616       __ dsra32(FSR, FSR, 16);
  1617       break;
  1618     case Bytecodes::_l2i:
  1619       __ dsll32(FSR, FSR, 0);
  1620       __ dsra32(FSR, FSR, 0);
  1621       break;
  1622     case Bytecodes::_l2f:
  1623       __ dmtc1(FSR, FSF);
  1624       //__ mtc1(SSR, SSF);
  1625       __ cvt_s_l(FSF, FSF);
  1626       break;
  1627     case Bytecodes::_l2d:
  1628       __ dmtc1(FSR, FSF);
  1629       //__ mtc1(SSR, SSF);
  1630       __ cvt_d_l(FSF, FSF);
  1631       break;
  1632     case Bytecodes::_f2i:
  1634 	Label L;
  1635 	/*
  1636 	__ c_un_s(FSF, FSF);		//NaN?
  1637 	__ bc1t(L);
  1638 	__ delayed(); __ move(FSR, R0);
  1639 	*/
  1640 	__ trunc_w_s(F12, FSF);
  1641 	__ cfc1(AT, 31);
  1642 	__ li(T0, 0x10000);
  1643 	__ andr(AT, AT, T0);
  1644 	__ beq(AT, R0, L);
  1645 	__ delayed()->mfc1(FSR, F12);
  1647 	__ mov_s(F12, FSF);
  1648 	__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
  1649 	__ bind(L);
  1651       break;
  1652     case Bytecodes::_f2l:
  1654 	Label L;
  1655 	/*
  1656 	__ move(SSR, R0);
  1657 	__ c_un_s(FSF, FSF);		//NaN?
  1658 	__ bc1t(L);
  1659 	__ delayed();
  1660 	__ move(FSR, R0);
  1661 	*/
  1662 	__ trunc_l_s(F12, FSF);
  1663 	__ cfc1(AT, 31);
  1664 	__ li(T0, 0x10000);
  1665 	__ andr(AT, AT, T0);
  1666 	__ beq(AT, R0, L);
  1667 	__ delayed()->dmfc1(FSR, F12);
  1669 	__ mov_s(F12, FSF);
  1670 	__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
  1671 	__ bind(L);
  1673       break;
  1674     case Bytecodes::_f2d:
  1675       __ cvt_d_s(FSF, FSF);
  1676       break;
  1677     case Bytecodes::_d2i:
  1679 	Label L;
  1680 	/*
  1681 	__ c_un_d(FSF, FSF);		//NaN?
  1682 	__ bc1t(L);
  1683 	__ delayed(); __ move(FSR, R0);
  1684 	*/
  1685 	__ trunc_w_d(F12, FSF);
  1686 	__ cfc1(AT, 31);
  1687 	__ li(T0, 0x10000);
  1688 	__ andr(AT, AT, T0);
  1689 	__ beq(AT, R0, L);
  1690 	__ delayed()->mfc1(FSR, F12);
  1692 	__ mov_d(F12, FSF);
  1693 	__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 1);
  1694 	__ bind(L);
  1696       break;
  1697     case Bytecodes::_d2l:
  1699 	Label L;
  1700 	/*
  1701 	__ move(SSR, R0);
  1702 	__ c_un_d(FSF, FSF);		//NaN?
  1703 	__ bc1t(L);
  1704 	__ delayed(); __ move(FSR, R0);
  1705 	*/
  1706 	__ trunc_l_d(F12, FSF);
  1707 	__ cfc1(AT, 31);
  1708 	__ li(T0, 0x10000);
  1709 	__ andr(AT, AT, T0);
  1710 	__ beq(AT, R0, L);
  1711 	__ delayed()->dmfc1(FSR, F12);
  1713 	__ mov_d(F12, FSF);
  1714 	__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 1);
  1715 	__ bind(L);
  1717       break;
  1718     case Bytecodes::_d2f:
  1719       __ cvt_s_d(FSF, FSF);
  1720       break;
  1721     default             :
  1722       ShouldNotReachHere();
  1726 void TemplateTable::lcmp() {
  1727   transition(ltos, itos);
  1729   Label low, high, done;
  1730   __ pop(T0);
  1731   __ pop(R0);
  1732   __ slt(AT, T0, FSR);
  1733   __ bne(AT, R0, low);
  1734   __ delayed()->nop();
  1736   __ bne(T0, FSR, high);
  1737   __ delayed()->nop();
  1739   __ li(FSR, (long)0);
  1740   __ b(done);
  1741   __ delayed()->nop();
  1743   __ bind(low);
  1744   __ li(FSR, (long)-1);
  1745   __ b(done);
  1746   __ delayed()->nop();
  1748   __ bind(high);
  1749   __ li(FSR, (long)1);
  1750   __ b(done);
  1751   __ delayed()->nop();
  1753   __ bind(done);
  1756 void TemplateTable::float_cmp(bool is_float, int unordered_result) {
  1757 	Label less, done;
  1759 	__ move(FSR, R0);
  1761 	if (is_float) {
  1762 		__ pop_ftos_to_esp();
  1763 		__ lwc1(FTF, at_sp());
  1764 		__ c_eq_s(FTF, FSF);
  1765 		__ bc1t(done);
  1766 		__ delayed()->daddi(SP, SP, 1 * wordSize);
  1768 		if (unordered_result<0)
  1769 			__ c_ult_s(FTF, FSF);
  1770 		else
  1771 			__ c_olt_s(FTF, FSF);
  1772 	} else {
  1773 		__ pop_dtos_to_esp();
  1774 		__ ldc1(FTF, at_sp());
  1775 		__ c_eq_d(FTF, FSF);
  1776 		__ bc1t(done);
  1777 		__ delayed()->daddi(SP, SP, 2 * wordSize);
  1779 		if (unordered_result<0)
  1780 			__ c_ult_d(FTF, FSF);
  1781 		else
  1782 			__ c_olt_d(FTF, FSF);
  1784 	__ bc1t(less);
  1785 	__ delayed()->nop();
  1786 	__ move(FSR, 1);
  1787 	__ b(done);
  1788 	__ delayed()->nop();
  1789 	__ bind(less);
  1790 	__ move(FSR, -1);
  1791 	__ bind(done);
  1795 // used registers : T3, A7, Rnext
  1796 // FSR : return bci, this is defined by the vm specification
  1797 // T2 : MDO taken count
  1798 // T3 : method
  1799 // A7 : offset
  1800 // Rnext : next bytecode, this is required by dispatch_base
  1801 void TemplateTable::branch(bool is_jsr, bool is_wide) {
  1802   __ get_method(T3);
  1803   __ profile_taken_branch(A7, T2);		// only C2 meaningful 
  1805 #ifndef CORE
  1806   const ByteSize be_offset = MethodCounters::backedge_counter_offset() 
  1807     + InvocationCounter::counter_offset();
  1808   const ByteSize inv_offset = MethodCounters::invocation_counter_offset() 
  1809     + InvocationCounter::counter_offset();
  1810   const int method_offset = frame::interpreter_frame_method_offset * wordSize;
  1811 #endif // CORE
  1813   // Load up T4 with the branch displacement
  1814   if (!is_wide) {
  1815     __ load_two_bytes_from_at_bcp(A7, AT, 1);
  1816     __ hswap(A7);
  1817   } else {
  1818     __ lw(A7, at_bcp(1));
  1819     __ swap(A7);
  1822   // Handle all the JSR stuff here, then exit.
  1823   // It's much shorter and cleaner than intermingling with the
  1824   // non-JSR normal-branch stuff occuring below.
  1825   if (is_jsr) {
  1826     // Pre-load the next target bytecode into Rnext
  1827     __ dadd(AT, BCP, A7);
  1828     __ lbu(Rnext, AT, 0);
  1830     // compute return address as bci in FSR
  1831     __ daddi(FSR, BCP, (is_wide?5:3) - in_bytes(ConstMethod::codes_offset()));
  1832     __ ld(AT, T3, in_bytes(Method::const_offset()));
  1833     __ dsub(FSR, FSR, AT);
  1834     // Adjust the bcp in BCP by the displacement in A7
  1835     __ dadd(BCP, BCP, A7);
  1836     // jsr returns atos that is not an oop
  1837     // __ dispatch_only_noverify(atos);
  1838     // Push return address
  1839     __ push_i(FSR);
  1840     // jsr returns vtos
  1841     __ dispatch_only_noverify(vtos);
  1843     return;
  1846   // Normal (non-jsr) branch handling
  1848   // Adjust the bcp in S0 by the displacement in T4
  1849   __ dadd(BCP, BCP, A7);
  1851 #ifdef CORE
  1852   // Pre-load the next target bytecode into EBX
  1853   __ lbu(Rnext, BCP, 0);
  1854   // continue with the bytecode @ target
  1855   __ dispatch_only(vtos);
  1856 #else
  1857   assert(UseLoopCounter || !UseOnStackReplacement, "on-stack-replacement requires loop counters");
  1858   Label backedge_counter_overflow;
  1859   Label profile_method;
  1860   Label dispatch;
  1861   if (UseLoopCounter) {
  1862     // increment backedge counter for backward branches
  1863     // eax: MDO
  1864     // ebx: MDO bumped taken-count
  1865     // T3: method
  1866     // T4: target offset
  1867     // BCP: target bcp
  1868     // LVP: locals pointer
  1869     __ bgtz(A7, dispatch);	// check if forward or backward branch
  1870     __ delayed()->nop();
  1872     // check if MethodCounters exists
  1873     Label has_counters;
  1874     __ ld(AT, T3, in_bytes(Method::method_counters_offset()));  // use AT as MDO, TEMP 
  1875     __ bne(AT, R0, has_counters);
  1876     __ nop();
  1877     //__ push(T3);
  1878     //__ push(A7);
  1879     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::build_method_counters),
  1880                T3);
  1881     //__ pop(A7);
  1882     //__ pop(T3);
  1883     __ ld(AT, T3, in_bytes(Method::method_counters_offset()));  // use AT as MDO, TEMP
  1884     __ beq(AT, R0, dispatch);
  1885     __ nop();
  1886     __ bind(has_counters);
  1888     // increment back edge counter 
  1889     __ ld(T1, T3, in_bytes(Method::method_counters_offset()));
  1890     __ lw(T0, T1, in_bytes(be_offset));
  1891     __ increment(T0, InvocationCounter::count_increment);
  1892     __ sw(T0, T1, in_bytes(be_offset));
  1894     // load invocation counter
  1895     __ lw(T1, T1, in_bytes(inv_offset));
  1896     // buffer bit added, mask no needed
  1897     // by yjl 10/24/2005
  1898     //__ move(AT, InvocationCounter::count_mask_value);
  1899     //__ andr(T1, T1, AT);
  1901     // dadd backedge counter & invocation counter
  1902     __ dadd(T1, T1, T0);
  1904     if (ProfileInterpreter) {
  1905       // Test to see if we should create a method data oop
  1906       //__ lui(AT, Assembler::split_high(int(&InvocationCounter::InterpreterProfileLimit)));
  1907       //__ lw(AT, AT, Assembler::split_low(int(&InvocationCounter::InterpreterProfileLimit)));
  1908       // T1 : backedge counter & invocation counter
  1909       __ li(AT, (long)&InvocationCounter::InterpreterProfileLimit);
  1910       __ lw(AT, AT, 0);
  1911       __ slt(AT, T1, AT);
  1912       __ bne(AT, R0, dispatch);
  1913       __ delayed()->nop();
  1915       // if no method data exists, go to profile method
  1916       __ test_method_data_pointer(T1, profile_method);
  1918       if (UseOnStackReplacement) {
  1919 	// check for overflow against ebx which is the MDO taken count
  1920 	//__ lui(AT, Assembler::split_high(int(&InvocationCounter::InterpreterBackwardBranchLimit)));
  1921 	//__ lw(AT, AT, Assembler::split_low(int(&InvocationCounter::InterpreterBackwardBranchLimit)));
  1922 	__ li(AT, (long)&InvocationCounter::InterpreterBackwardBranchLimit);
  1923 	__ lw(AT, AT, 0);
  1924 	// the value Rnext Is get from the beginning profile_taken_branch
  1925 	__ slt(AT, T2, AT);
  1926 	__ bne(AT, R0, dispatch);
  1927 	__ delayed()->nop();
  1929 	// When ProfileInterpreter is on, the backedge_count comes 
  1930 	// from the methodDataOop, which value does not get reset on 
  1931 	// the call to  frequency_counter_overflow().  
  1932 	// To avoid excessive calls to the overflow routine while 
  1933 	// the method is being compiled, dadd a second test to make 
  1934 	// sure the overflow function is called only once every 
  1935 	// overflow_frequency.
  1936 	const int overflow_frequency = 1024;
  1937 	__ andi(AT, T2, overflow_frequency-1);
  1938 	__ beq(AT, R0, backedge_counter_overflow);
  1939 	__ delayed()->nop();
  1941     } else {
  1942       if (UseOnStackReplacement) {
  1943 	// check for overflow against eax, which is the sum of the counters
  1944 	//__ lui(AT, Assembler::split_high(int(&InvocationCounter::InterpreterBackwardBranchLimit)));
  1945 	//__ lw(AT, AT, Assembler::split_low(int(&InvocationCounter::InterpreterBackwardBranchLimit)));
  1946 	__ li(AT, (long)&InvocationCounter::InterpreterBackwardBranchLimit);
  1947 	__ lw(AT, AT, 0);
  1948 	__ slt(AT, T1, AT);
  1949 	__ beq(AT, R0, backedge_counter_overflow);
  1950 	__ delayed()->nop();
  1953     __ bind(dispatch);
  1956   // Pre-load the next target bytecode into Rnext
  1957   __ lbu(Rnext, BCP, 0);
  1959   // continue with the bytecode @ target
  1960   // FSR: return bci for jsr's, unused otherwise
  1961   // Rnext: target bytecode
  1962   // BCP: target bcp
  1963   __ dispatch_only(vtos);
  1965   if (UseLoopCounter) {
  1966     if (ProfileInterpreter) {
  1967       // Out-of-line code to allocate method data oop.
  1968       __ bind(profile_method);
  1969       __ call_VM(NOREG, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
  1970       __ lbu(Rnext, BCP, 0);
  1972       __ set_method_data_pointer_for_bcp();
  1973 /*
  1974       __ ld(T3, FP, method_offset);
  1975       __ lw(T3, T3, in_bytes(Method::method_data_offset()));
  1976       __ sw(T3, FP, frame::interpreter_frame_mdx_offset * wordSize);
  1977       __ test_method_data_pointer(T3, dispatch);
  1978       // offset non-null mdp by MDO::data_offset() + IR::profile_method()
  1979       __ daddi(T3, T3, in_bytes(MethodData::data_offset()));
  1980       __ dadd(T3, T3, T1);
  1981       __ sw(T3, FP, frame::interpreter_frame_mdx_offset * wordSize);
  1982 */
  1983       __ b(dispatch);
  1984       __ delayed()->nop();
  1987     if (UseOnStackReplacement) {
  1988       // invocation counter overflow
  1989       __ bind(backedge_counter_overflow);
  1990       __ sub(A7, BCP, A7);	// branch bcp
  1991       call_VM(NOREG, CAST_FROM_FN_PTR(address, 
  1992 	    InterpreterRuntime::frequency_counter_overflow), A7);
  1993       __ lbu(Rnext, BCP, 0);
  1995       // V0: osr nmethod (osr ok) or NULL (osr not possible)
  1996       // V1: osr adapter frame return address
  1997       // Rnext: target bytecode
  1998       // LVP: locals pointer
  1999       // BCP: bcp
  2000       __ beq(V0, R0, dispatch);
  2001       __ delayed()->nop();
  2002       // nmethod may have been invalidated (VM may block upon call_VM return)
  2003       __ lw(T3, V0, nmethod::entry_bci_offset());
  2004       __ move(AT, InvalidOSREntryBci);
  2005       __ beq(AT, T3, dispatch);
  2006       __ delayed()->nop();
  2007       // We need to prepare to execute the OSR method. First we must
  2008       // migrate the locals and monitors off of the stack.
  2009       //eax V0: osr nmethod (osr ok) or NULL (osr not possible)
  2010       //ebx V1: osr adapter frame return address
  2011       //edx  Rnext: target bytecode
  2012       //edi  LVP: locals pointer
  2013       //esi  BCP: bcp
  2014       __ move(BCP, V0); 
  2015       // const Register thread = ecx;
  2016       const Register thread = TREG;
  2017 #ifndef OPT_THREAD
  2018       __ get_thread(thread);
  2019 #endif
  2020       call_VM(noreg, CAST_FROM_FN_PTR(address, 
  2021 	    SharedRuntime::OSR_migration_begin));
  2022       // eax is OSR buffer, move it to expected parameter location
  2023       //refer to osrBufferPointer in c1_LIRAssembler_mips.cpp	
  2024       __ move(T0, V0);
  2026       // pop the interpreter frame
  2027       //  __ movl(edx, Address(ebp, frame::interpreter_frame_sender_sp_offset 
  2028       //  * wordSize)); // get sender sp
  2029       __ ld(A7, Address(FP, 
  2030 	    frame::interpreter_frame_sender_sp_offset * wordSize)); 
  2031       //FIXME, shall we keep the return address on the stack?	
  2032       __ leave();                                // remove frame anchor
  2033       // __ popl(edi);                         // get return address
  2034       //__ daddi(SP, SP, wordSize);               // get return address
  2035       //   __ pop(LVP);	
  2036       __ move(LVP, RA);	
  2037       // __ movl(esp, edx);                         // set sp to sender sp
  2038       __ move(SP, A7);
  2040       Label skip;
  2041       Label chkint;
  2043       // The interpreter frame we have removed may be returning to
  2044       // either the callstub or the interpreter. Since we will
  2045       // now be returning from a compiled (OSR) nmethod we must
  2046       // adjust the return to the return were it can handler compiled
  2047       // results and clean the fpu stack. This is very similar to
  2048       // what a i2c adapter must do.
  2050       // Are we returning to the call stub?
  2051 #if 0	
  2052       // __ cmpl(edi, (int)StubRoutines::_call_stub_return_address);
  2053       __ daddi(AT, LVP, -(int)StubRoutines::_call_stub_return_address); 
  2054       //  __ jcc(Assembler::notEqual, chkint);
  2055       __ bne(AT, R0, chkint);
  2056       __ delayed()->nop();      
  2057       // yes adjust to the specialized call stub  return.
  2058       // assert(StubRoutines::i486::get_call_stub_compiled_return() != NULL,
  2059       // "must be set");
  2060       assert(StubRoutines::gs2::get_call_stub_compiled_return() != NULL, 
  2061 	  "must be set");
  2062       // __ movl(edi, (intptr_t) StubRoutines::i486::get_call_stub_compiled_return());
  2063       __ move(LVP, (intptr_t) StubRoutines::gs2::get_call_stub_compiled_return()); 
  2064       //  __ jmp(skip);
  2065       __ b(skip);
  2066       __ delayed()->nop();
  2067       __ bind(chkint);
  2069       // Are we returning to the interpreter? Look for sentinel
  2071       //__ cmpl(Address(edi, -8), Interpreter::return_sentinel);
  2072       __ lw(AT, LVP , -8); 
  2073       __ daddi(AT, AT, -Interpreter::return_sentinel); 
  2074       //__ jcc(Assembler::notEqual, skip);
  2075       __ bne(AT, R0, skip);
  2076       __ delayed()->nop(); 
  2077       // Adjust to compiled return back to interpreter
  2079       // __ movl(edi, Address(edi, -4));
  2080       __ lw(LVP, LVP, -4); 
  2082       __ bind(skip);
  2083 #endif
  2084       // Align stack pointer for compiled code (note that caller is
  2085       // responsible for undoing this fixup by remembering the old SP
  2086       // in an ebp-relative location)
  2087       //  __ andl(esp, -(StackAlignmentInBytes));
  2088       __ move(AT, -(StackAlignmentInBytes));	
  2089       __ andr(SP , SP , AT);
  2090       // push the (possibly adjusted) return address
  2091       //  __ pushl(edi);
  2092       //__ push(LVP);
  2093       //			__ move(RA, LVP);	
  2094       // and begin the OSR nmethod
  2095       //  __ jmp(Address(esi, nmethod::osr_entry_point_offset()));
  2096       //refer to osr_entry in c1_LIRAssembler_mips.cpp	
  2097       __ ld(AT, BCP, nmethod::osr_entry_point_offset()); 
  2098       __ jr(AT); 
  2099       __ delayed()->nop(); 
  2102 #endif // not CORE
  2105 void TemplateTable::if_0cmp(Condition cc) {
  2106   transition(itos, vtos);
  2107   // assume branch is more often taken than not (loops use backward branches)
  2108   Label not_taken;
  2109   switch(cc) {
  2110     case not_equal:
  2111       __ beq(FSR, R0, not_taken);
  2112       break;
  2113     case equal:
  2114       __ bne(FSR, R0, not_taken);
  2115       break;
  2116     case less:
  2117       __ bgez(FSR, not_taken);
  2118       break;
  2119     case less_equal:
  2120       __ bgtz(FSR, not_taken);
  2121       break;
  2122     case greater:
  2123       __ blez(FSR, not_taken);
  2124       break;
  2125     case greater_equal:
  2126       __ bltz(FSR, not_taken);
  2127       break;
  2129   __ delayed()->nop();
  2131   branch(false, false);
  2133   __ bind(not_taken);
  2134   __ profile_not_taken_branch(FSR);
  2138 void TemplateTable::if_icmp(Condition cc) {
  2139   transition(itos, vtos);
  2140   // assume branch is more often taken than not (loops use backward branches)
  2141   Label not_taken;
  2143   __ pop_i(SSR);	
  2144   switch(cc) {
  2145     case not_equal:
  2146       __ beq(SSR, FSR, not_taken);
  2147       break;
  2148     case equal:
  2149       __ bne(SSR, FSR, not_taken);
  2150       break;
  2151     case less:
  2152       __ slt(AT, SSR, FSR);
  2153       __ beq(AT, R0, not_taken);
  2154       break;
  2155     case less_equal:
  2156       __ slt(AT, FSR, SSR);
  2157       __ bne(AT, R0, not_taken);
  2158       break;
  2159     case greater:
  2160       __ slt(AT, FSR, SSR);
  2161       __ beq(AT, R0, not_taken);
  2162       break;
  2163     case greater_equal:
  2164       __ slt(AT, SSR, FSR);
  2165       __ bne(AT, R0, not_taken);
  2166       break;
  2168   __ delayed()->nop();
  2170   branch(false, false);
  2172   __ bind(not_taken);
  2173   __ profile_not_taken_branch(FSR);
  2177 void TemplateTable::if_nullcmp(Condition cc) {
  2178   transition(atos, vtos);
  2179   // assume branch is more often taken than not (loops use backward branches)
  2180   Label not_taken;
  2181   switch(cc) {
  2182     case not_equal:
  2183       __ beq(FSR, R0, not_taken);
  2184       break;
  2185     case equal:
  2186       __ bne(FSR, R0, not_taken);
  2187       break;
  2188     default:
  2189       ShouldNotReachHere();
  2191   __ delayed()->nop();
  2193   branch(false, false);
  2195   __ bind(not_taken);
  2196   __ profile_not_taken_branch(FSR);
  2200 void TemplateTable::if_acmp(Condition cc) {
  2201 	transition(atos, vtos);
  2202 	// assume branch is more often taken than not (loops use backward branches)
  2203 	Label not_taken;
  2204 	//	__ lw(SSR, SP, 0);
  2205 	__ pop_ptr(SSR);
  2206 	switch(cc) {
  2207 		case not_equal:
  2208 			__ beq(SSR, FSR, not_taken);
  2209 			break;
  2210 		case equal:
  2211 			__ bne(SSR, FSR, not_taken);
  2212 			break;
  2213 		default:
  2214 			ShouldNotReachHere();
  2216 	//	__ delayed()->daddi(SP, SP, 4);
  2217 	__ delayed()->nop();
  2219 	branch(false, false);
  2221 	__ bind(not_taken);
  2222 	__ profile_not_taken_branch(FSR);
  2225 // used registers : T1, T2, T3
  2226 // T1 : method
  2227 // T2 : returb bci
  2228 void TemplateTable::ret() {
  2229 	transition(vtos, vtos);
  2231 	locals_index(T2);
  2232 	__ ld(T2, T2, 0);
  2233 	__ profile_ret(T2, T3);
  2235 	__ get_method(T1);
  2236 	__ ld(BCP, T1, in_bytes(Method::const_offset()));
  2237 	__ dadd(BCP, BCP, T2);
  2238 	__ daddi(BCP, BCP, in_bytes(ConstMethod::codes_offset()));
  2240 	__ dispatch_next(vtos);
  2243 // used registers : T1, T2, T3
  2244 // T1 : method
  2245 // T2 : returb bci
  2246 void TemplateTable::wide_ret() {
  2247 	transition(vtos, vtos);
  2249 	locals_index_wide(T2);
  2250 	__ ld(T2, T2, 0);                   // get return bci, compute return bcp
  2251 	__ profile_ret(T2, T3);
  2253 	__ get_method(T1);
  2254 	__ ld(BCP, T1, in_bytes(Method::const_offset()));
  2255 	__ dadd(BCP, BCP, T2);
  2256 	__ daddi(BCP, BCP, in_bytes(ConstMethod::codes_offset()));
  2258 	__ dispatch_next(vtos);
  2261 // used register T2, T3, A7, Rnext
  2262 // T2 : bytecode pointer
  2263 // T3 : low
  2264 // A7 : high
  2265 // Rnext : dest bytecode, required by dispatch_base
  2266 void TemplateTable::tableswitch() {
  2267 	Label default_case, continue_execution;
  2268 	transition(itos, vtos);
  2270 	// align BCP
  2271 	__ daddi(T2, BCP, BytesPerInt);
  2272 	__ li(AT, -BytesPerInt);
  2273 	__ andr(T2, T2, AT);
  2275 	// load lo & hi
  2276 	__ lw(T3, T2, 1 * BytesPerInt);
  2277 	__ swap(T3);
  2278 	__ lw(A7, T2, 2 * BytesPerInt);
  2279 	__ swap(A7);
  2281 	// check against lo & hi
  2282 	__ slt(AT, FSR, T3);
  2283 	__ bne(AT, R0, default_case);
  2284 	__ delayed()->nop();
  2286 	__ slt(AT, A7, FSR);
  2287 	__ bne(AT, R0, default_case);
  2288 	__ delayed()->nop();
  2290 	// lookup dispatch offset, in A7 big endian
  2291 	__ dsub(FSR, FSR, T3);
  2292 	__ dsll(AT, FSR, Address::times_4);
  2293 	__ dadd(AT, T2, AT);
  2294 	__ lw(A7, AT, 3 * BytesPerInt);
  2295 	__ profile_switch_case(FSR, T9, T3);
  2297 	__ bind(continue_execution);
  2298 	__ swap(A7);
  2299 	__ dadd(BCP, BCP, A7);
  2300 	__ lbu(Rnext, BCP, 0);
  2301 	__ dispatch_only(vtos);
  2303 	// handle default
  2304 	__ bind(default_case);
  2305 	__ profile_switch_default(FSR);
  2306 	__ lw(A7, T2, 0);
  2307 	__ b(continue_execution);
  2308 	__ delayed()->nop();
  2311 void TemplateTable::lookupswitch() {
  2312 	transition(itos, itos);
  2313 	__ stop("lookupswitch bytecode should have been rewritten");
  2316 // used registers : T2, T3, A7, Rnext
  2317 // T2 : bytecode pointer
  2318 // T3 : pair index
  2319 // A7 : offset
  2320 // Rnext : dest bytecode
  2321 // the data after the opcode is the same as lookupswitch
  2322 // see Rewriter::rewrite_method for more information
  2323 void TemplateTable::fast_linearswitch() {
  2324   transition(itos, vtos);
  2325   Label loop_entry, loop, found, continue_execution;  
  2327   // swap eax so we can avoid swapping the table entries
  2328   __ swap(FSR);
  2330   // align BCP
  2331   __ daddi(T2, BCP, BytesPerInt);
  2332   __ li(AT, -BytesPerInt);
  2333   __ andr(T2, T2, AT);
  2335   // set counter
  2336   __ lw(T3, T2, BytesPerInt);
  2337   __ swap(T3);
  2338   __ b(loop_entry);
  2339   __ delayed()->nop();
  2341   // table search
  2342   __ bind(loop);
  2343   // get the entry value
  2344   __ dsll(AT, T3, Address::times_8);
  2345   __ dadd(AT, T2, AT);
  2346   __ lw(AT, AT, 2 * BytesPerInt);
  2348   // found?
  2349   __ beq(FSR, AT, found);
  2350   __ delayed()->nop();
  2352   __ bind(loop_entry);
  2353   __ bgtz(T3, loop);
  2354   __ delayed()->daddiu(T3, T3, -1);
  2356   // default case
  2357   __ profile_switch_default(FSR);
  2358   __ lw(A7, T2, 0);
  2359   __ b(continue_execution);
  2360   __ delayed()->nop();
  2362   // entry found -> get offset
  2363   __ bind(found);
  2364   __ dsll(AT, T3, Address::times_8);
  2365   __ dadd(AT, T2, AT);
  2366   __ lw(A7, AT, 3 * BytesPerInt);
  2367   __ profile_switch_case(T3, FSR, T2);
  2369   // continue execution
  2370   __ bind(continue_execution);  
  2371   __ swap(A7);
  2372   __ dadd(BCP, BCP, A7);
  2373   __ lbu(Rnext, BCP, 0);
  2374   __ dispatch_only(vtos);
  2377 // used registers : T0, T1, T2, T3, A7, Rnext
  2378 // T2 : pairs address(array)
  2379 // Rnext : dest bytecode
  2380 // the data after the opcode is the same as lookupswitch
  2381 // see Rewriter::rewrite_method for more information
  2382 void TemplateTable::fast_binaryswitch() {
  2383   transition(itos, vtos);
  2384   // Implementation using the following core algorithm:
  2385   //
  2386   // int binary_search(int key, LookupswitchPair* array, int n) {
  2387   //   // Binary search according to "Methodik des Programmierens" by
  2388   //   // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
  2389   //   int i = 0;
  2390   //   int j = n;
  2391   //   while (i+1 < j) {
  2392   //     // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
  2393   //     // with      Q: for all i: 0 <= i < n: key < a[i]
  2394   //     // where a stands for the array and assuming that the (inexisting)
  2395   //     // element a[n] is infinitely big.
  2396   //     int h = (i + j) >> 1;
  2397   //     // i < h < j
  2398   //     if (key < array[h].fast_match()) {
  2399   //       j = h;
  2400   //     } else {
  2401   //       i = h;
  2402   //     }
  2403   //   }
  2404   //   // R: a[i] <= key < a[i+1] or Q
  2405   //   // (i.e., if key is within array, i is the correct index)
  2406   //   return i;
  2407   // }
  2409   // register allocation
  2410   const Register array = T2;
  2411   const Register i = T3, j = A7;
  2412   const Register h = T1;
  2413   const Register temp = T0;
  2414   const Register key = FSR;
  2416   // setup array
  2417   __ daddi(array, BCP, 3*BytesPerInt);
  2418   __ li(AT, -BytesPerInt);
  2419   __ andr(array, array, AT);
  2421   // initialize i & j
  2422   __ move(i, R0);
  2423   __ lw(j, array, - 1 * BytesPerInt);
  2424   // Convert j into native byteordering  
  2425   __ swap(j);
  2427   // and start
  2428   Label entry;
  2429   __ b(entry);
  2430   __ delayed()->nop();
  2432   // binary search loop
  2434     Label loop;
  2435     __ bind(loop);
  2436     // int h = (i + j) >> 1;
  2437     __ dadd(h, i, j);
  2438     __ dsrl(h, h, 1);
  2439     // if (key < array[h].fast_match()) {
  2440     //   j = h;
  2441     // } else {
  2442     //   i = h;
  2443     // }
  2444     // Convert array[h].match to native byte-ordering before compare
  2445     __ dsll(AT, h, Address::times_8);
  2446     __ dadd(AT, array, AT);
  2447     __ lw(temp, AT, 0 * BytesPerInt);
  2448     __ swap(temp);
  2451       Label set_i, end_of_if;
  2452       __ slt(AT, key, temp);
  2453       __ beq(AT, R0, set_i);
  2454       __ delayed()->nop(); 
  2456       __ b(end_of_if);
  2457       __ delayed(); __ move(j, h);
  2459       __ bind(set_i);
  2460       __ move(i, h);
  2462       __ bind(end_of_if);
  2464     // while (i+1 < j)
  2465     __ bind(entry);
  2466     __ daddi(h, i, 1);
  2467     __ slt(AT, h, j);
  2468     __ bne(AT, R0, loop);
  2469     __ delayed()->nop();
  2472   // end of binary search, result index is i (must check again!)
  2473   Label default_case;
  2474   // Convert array[i].match to native byte-ordering before compare
  2475   __ dsll(AT, i, Address::times_8);
  2476   __ dadd(AT, array, AT);
  2477   __ lw(temp, AT, 0 * BytesPerInt);
  2478   __ swap(temp);
  2479   __ bne(key, temp, default_case);
  2480   __ delayed()->nop();
  2482   // entry found -> j = offset
  2483   __ dsll(AT, i, Address::times_8);
  2484   __ dadd(AT, array, AT);
  2485   __ lw(j, AT, 1 * BytesPerInt);
  2486   __ profile_switch_case(i, key, array);
  2487   __ swap(j);
  2489   __ dadd(BCP, BCP, j);
  2490   __ lbu(Rnext, BCP, 0);
  2491   __ dispatch_only(vtos);
  2493   // default case -> j = default offset
  2494   __ bind(default_case);
  2495   __ profile_switch_default(i);
  2496   __ lw(j, array, - 2 * BytesPerInt);
  2497   __ swap(j);
  2498   __ dadd(BCP, BCP, j);
  2499   __ lbu(Rnext, BCP, 0);
  2500   __ dispatch_only(vtos);
  2503 void TemplateTable::_return(TosState state) {
  2504   transition(state, state);
  2505   assert(_desc->calls_vm(), "inconsistent calls_vm information"); // call in remove_activation
  2506   if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
  2507     assert(state == vtos, "only valid state");
  2508     __ ld(T1, aaddress(0));
  2509     //__ ld(LVP, T1, oopDesc::klass_offset_in_bytes());
  2510     __ load_klass(LVP, T1);
  2511     __ lw(LVP, LVP, in_bytes(Klass::access_flags_offset()));
  2512     __ move(AT, JVM_ACC_HAS_FINALIZER); 
  2513     __ andr(AT, AT, LVP);//by_css
  2514     Label skip_register_finalizer;
  2515     __ beq(AT, R0, skip_register_finalizer);
  2516     __ delayed()->nop(); 
  2517     __ call_VM(noreg, CAST_FROM_FN_PTR(address, 
  2518 	  InterpreterRuntime::register_finalizer), T1);
  2519     __ bind(skip_register_finalizer);
  2521   __ remove_activation(state, T9);
  2523   __ jr(T9);
  2524   __ delayed()->nop();
  2527 // ----------------------------------------------------------------------------
  2528 // Volatile variables demand their effects be made known to all CPU's
  2529 // in order.  Store buffers on most chips allow reads & writes to
  2530 // reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode
  2531 // without some kind of memory barrier (i.e., it's not sufficient that
  2532 // the interpreter does not reorder volatile references, the hardware
  2533 // also must not reorder them).
  2534 //
  2535 // According to the new Java Memory Model (JMM):
  2536 // (1) All volatiles are serialized wrt to each other.  ALSO reads &
  2537 //     writes act as aquire & release, so:
  2538 // (2) A read cannot let unrelated NON-volatile memory refs that
  2539 //     happen after the read float up to before the read.  It's OK for
  2540 //     non-volatile memory refs that happen before the volatile read to
  2541 //     float down below it.
  2542 // (3) Similar a volatile write cannot let unrelated NON-volatile
  2543 //     memory refs that happen BEFORE the write float down to after the
  2544 //     write.  It's OK for non-volatile memory refs that happen after the
  2545 //     volatile write to float up before it.
  2546 //
  2547 // We only put in barriers around volatile refs (they are expensive),
  2548 // not _between_ memory refs (that would require us to track the
  2549 // flavor of the previous memory refs).  Requirements (2) and (3)
  2550 // require some barriers before volatile stores and after volatile
  2551 // loads.  These nearly cover requirement (1) but miss the
  2552 // volatile-store-volatile-load case.  This final case is placed after
  2553 // volatile-stores although it could just as well go before
  2554 // volatile-loads.
  2555 //void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits
  2556 //                                     order_constraint) {
  2557 void TemplateTable::volatile_barrier( ) {
  2558   // Helper function to insert a is-volatile test and memory barrier
  2559   //if (os::is_MP()) { // Not needed on single CPU
  2560   //  __ membar(order_constraint);
  2561   //}
  2562 	if( !os::is_MP() ) return;	// Not needed on single CPU
  2563 	__ sync();
  2566 // we dont shift left 2 bits in get_cache_and_index_at_bcp
  2567 // for we always need shift the index we use it. the ConstantPoolCacheEntry 
  2568 // is 16-byte long, index is the index in 
  2569 // ConstantPoolCache, so cache + base_offset() + index * 16 is 
  2570 // the corresponding ConstantPoolCacheEntry
  2571 // used registers : T2
  2572 // NOTE : the returned index need also shift left 4 to get the address!
  2573 void TemplateTable::resolve_cache_and_index(int byte_no,
  2574                                             Register Rcache,
  2575 					    Register index,
  2576                                             size_t index_size) {
  2577   assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
  2578   const Register temp = A1;
  2579   assert_different_registers(Rcache, index);
  2580   const int shift_count = (1 + byte_no)*BitsPerByte;
  2581   Label resolved;
  2582   __ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, temp, byte_no, 1, index_size);
  2583   // is resolved?
  2584   int i = (int)bytecode();
  2585   __ addi(temp, temp, -i);
  2586   __ beq(temp, R0, resolved);
  2587   __ delayed()->nop();
  2588   // resolve first time through
  2589   address entry;
  2590   switch (bytecode()) {
  2591     case Bytecodes::_getstatic      : // fall through
  2592     case Bytecodes::_putstatic      : // fall through
  2593     case Bytecodes::_getfield       : // fall through
  2594     case Bytecodes::_putfield       : 
  2595       entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put); 
  2596       break;
  2597     case Bytecodes::_invokevirtual  : // fall through
  2598     case Bytecodes::_invokespecial  : // fall through
  2599     case Bytecodes::_invokestatic   : // fall through
  2600     case Bytecodes::_invokeinterface: 
  2601       entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke);  
  2602       break;
  2603     case Bytecodes::_invokehandle:
  2604       entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokehandle);
  2605       break;
  2606     case Bytecodes::_invokedynamic:
  2607       entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic);
  2608       break;
  2609     default                      		: 
  2610       fatal(err_msg("unexpected bytecode: %s", Bytecodes::name(bytecode())));
  2613   __ move(temp, i);
  2614   __ call_VM(NOREG, entry, temp);
  2616   // Update registers with resolved info
  2617   __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
  2618   __ bind(resolved);
  2621 // The Rcache and index registers must be set before call
  2622 void TemplateTable::load_field_cp_cache_entry(Register obj,
  2623                                               Register cache,
  2624                                               Register index,
  2625                                               Register off,
  2626                                               Register flags,
  2627                                               bool is_static = false) {
  2628   assert_different_registers(cache, index, flags, off);
  2629   ByteSize cp_base_offset = ConstantPoolCache::base_offset();
  2630   // Field offset
  2631   __ dsll(AT, index, Address::times_ptr);
  2632   __ dadd(AT, cache, AT);
  2633   __ ld(off, AT, in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset()));
  2634   // Flags    
  2635   __ ld(flags, AT, in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset()));
  2637   // klass     overwrite register
  2638   if (is_static) {
  2639     __ ld(obj, AT, in_bytes(cp_base_offset + ConstantPoolCacheEntry::f1_offset())); 
  2640     const int mirror_offset = in_bytes(Klass::java_mirror_offset());
  2641     __ ld(obj, Address(obj, mirror_offset));
  2643     __ verify_oop(obj);	
  2647 // get the method, itable_index and flags of the current invoke
  2648 void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
  2649                                                Register method,
  2650                                                Register itable_index,
  2651                                                Register flags,
  2652                                                bool is_invokevirtual,
  2653                                                bool is_invokevfinal, /*unused*/
  2654                                                bool is_invokedynamic) {
  2655   // setup registers
  2656   const Register cache = T3;
  2657   const Register index = T1;
  2658   assert_different_registers(method, flags);
  2659   assert_different_registers(method, cache, index);
  2660   assert_different_registers(itable_index, flags);
  2661   assert_different_registers(itable_index, cache, index);
  2662   assert(is_invokevirtual == (byte_no == f2_byte), "is invokevirtual flag redundant");
  2663   // determine constant pool cache field offsets
  2664   const int method_offset = in_bytes(
  2665       ConstantPoolCache::base_offset() +
  2666       ((byte_no == f2_byte)
  2667        ? ConstantPoolCacheEntry::f2_offset()
  2668        : ConstantPoolCacheEntry::f1_offset()
  2670       );
  2671   const int flags_offset = in_bytes(ConstantPoolCache::base_offset() +
  2672       ConstantPoolCacheEntry::flags_offset());
  2673   // access constant pool cache fields
  2674   const int index_offset = in_bytes(ConstantPoolCache::base_offset() +
  2675       ConstantPoolCacheEntry::f2_offset());
  2676   size_t index_size = (is_invokedynamic ? sizeof(u4): sizeof(u2));
  2677   resolve_cache_and_index(byte_no, cache, index, index_size);
  2679   //assert(wordSize == 8, "adjust code below");
  2680   // note we shift 4 not 2, for we get is the true inde 
  2681   // of ConstantPoolCacheEntry, not the shifted 2-bit index as x86 version
  2682   __ dsll(AT, index, Address::times_ptr);
  2683   __ dadd(AT, cache, AT);
  2684   __ ld(method, AT, method_offset);
  2687   if (itable_index != NOREG) {
  2688     __ ld(itable_index, AT, index_offset);
  2690   __ ld(flags, AT, flags_offset);
  2694 // The registers cache and index expected to be set before call.
  2695 // Correct values of the cache and index registers are preserved.
  2696 void TemplateTable::jvmti_post_field_access(Register cache, Register index,
  2697                                             bool is_static, bool has_tos) {
  2698   // do the JVMTI work here to avoid disturbing the register state below
  2699   // We use c_rarg registers here because we want to use the register used in
  2700   // the call to the VM
  2701 	if (JvmtiExport::can_post_field_access()) {
  2702 		// Check to see if a field access watch has been set before we take
  2703 		// the time to call into the VM.
  2704 		Label L1;
  2705 		assert_different_registers(cache, index, FSR);
  2706 		__ li(AT, (intptr_t)JvmtiExport::get_field_access_count_addr());
  2707 		__ lw(FSR, AT, 0);
  2708 		__ beq(FSR, R0, L1);
  2709 		__ delayed()->nop();
  2711 		// We rely on the bytecode being resolved and the cpCache entry filled in.
  2712 		// cache entry pointer
  2713 		//__ get_cache_and_index_at_bcp(c_rarg2, c_rarg3, 1);
  2714 		__ daddi(cache, cache, in_bytes(ConstantPoolCache::base_offset()));
  2715 		__ shl(index, 4);
  2716 		__ dadd(cache, cache, index);
  2717 		if (is_static) {
  2718 			__ move(FSR, R0);
  2719 		} else {
  2720 			__ lw(FSR, SP, 0);
  2721 			__ verify_oop(FSR);
  2723 		// FSR: object pointer or NULL
  2724 		// cache: cache entry pointer
  2725 		__ call_VM(NOREG, CAST_FROM_FN_PTR(address, 
  2726 					InterpreterRuntime::post_field_access), FSR, cache);
  2727 		__ get_cache_and_index_at_bcp(cache, index, 1);
  2728 		__ bind(L1);
  2732 void TemplateTable::pop_and_check_object(Register r) {
  2733   __ pop_ptr(r);
  2734   __ null_check(r);  // for field access must check obj.
  2735   __ verify_oop(r);
  2738 // used registers : T1, T2, T3, T1
  2739 // T1 : flags
  2740 // T2 : off
  2741 // T3 : obj
  2742 // T1 : field address
  2743 // The flags 31, 30, 29, 28 together build a 4 bit number 0 to 8 with the
  2744 // following mapping to the TosState states:
  2745 // btos: 0
  2746 // ctos: 1
  2747 // stos: 2
  2748 // itos: 3
  2749 // ltos: 4
  2750 // ftos: 5
  2751 // dtos: 6
  2752 // atos: 7
  2753 // vtos: 8
  2754 // see ConstantPoolCacheEntry::set_field for more info
  2755 void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
  2756   transition(vtos, vtos);
  2758   const Register cache = T3;
  2759   const Register index = T0;
  2761   const Register obj   = T3;
  2762   const Register off   = T2;
  2763   const Register flags = T1;
  2764   resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
  2765   //jvmti_post_field_access(cache, index, is_static, false);
  2767   load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
  2769   if (!is_static) pop_and_check_object(obj);
  2770   __ dadd(index, obj, off);
  2773   Label Done, notByte, notInt, notShort, notChar, notLong, notFloat, notObj, notDouble;
  2775   assert(btos == 0, "change code, btos != 0");
  2776   __ dsrl(flags, flags, ConstantPoolCacheEntry::tos_state_shift);
  2777   __ andi(flags, flags, 0xf);
  2778   __ bne(flags, R0, notByte);
  2779   __ delayed()->nop();
  2781   // btos
  2782   __ lb(FSR, index, 0);	
  2783   __ sd(FSR, SP, - wordSize);
  2785   // Rewrite bytecode to be faster
  2786   if (!is_static) {
  2787     patch_bytecode(Bytecodes::_fast_bgetfield, T3, T2);
  2789   __ b(Done);
  2790   __ delayed()->daddi(SP, SP, - wordSize);
  2792   __ bind(notByte);
  2793   __ move(AT, itos);
  2794   __ bne(flags, AT, notInt);
  2795   __ delayed()->nop();
  2797   // itos
  2798   __ lw(FSR, index, 0);
  2799   __ sd(FSR, SP, - wordSize);
  2801   // Rewrite bytecode to be faster
  2802   if (!is_static) {
  2803     // patch_bytecode(Bytecodes::_fast_igetfield, T3, T2);
  2804     patch_bytecode(Bytecodes::_fast_igetfield, T3, T2);
  2806   __ b(Done);
  2807   __ delayed()->daddi(SP, SP, - wordSize);
  2809   __ bind(notInt);
  2810   __ move(AT, atos);
  2811   __ bne(flags, AT, notObj);
  2812   __ delayed()->nop();
  2814   // atos
  2815   //add for compressedoops
  2816   __ load_heap_oop(FSR, Address(index, 0));
  2817   __ sd(FSR, SP, - wordSize);
  2819   if (!is_static) {
  2820     //patch_bytecode(Bytecodes::_fast_agetfield, T3, T2);
  2821     patch_bytecode(Bytecodes::_fast_agetfield, T3, T2);
  2823   __ b(Done);
  2824   __ delayed()->daddi(SP, SP, - wordSize);
  2826   __ bind(notObj);
  2827   __ move(AT, ctos);
  2828   __ bne(flags, AT, notChar);
  2829   __ delayed()->nop();
  2831   // ctos
  2832   __ lhu(FSR, index, 0);
  2833   __ sd(FSR, SP, - wordSize);
  2835   if (!is_static) {
  2836     patch_bytecode(Bytecodes::_fast_cgetfield, T3, T2);
  2838   __ b(Done);
  2839   __ delayed()->daddi(SP, SP, - wordSize);
  2841   __ bind(notChar);
  2842   __ move(AT, stos);
  2843   __ bne(flags, AT, notShort);
  2844   __ delayed()->nop();
  2846   // stos
  2847   __ lh(FSR, index, 0);
  2848   __ sd(FSR, SP, - wordSize);
  2850   if (!is_static) {
  2851     // patch_bytecode(Bytecodes::_fast_sgetfield, T3, T2);
  2852     patch_bytecode(Bytecodes::_fast_sgetfield, T3, T2);
  2854   __ b(Done);
  2855   __ delayed()->daddi(SP, SP, - wordSize);
  2857   __ bind(notShort);
  2858   __ move(AT, ltos);
  2859   __ bne(flags, AT, notLong);
  2860   __ delayed()->nop();
  2862   // FIXME : the load/store should be atomic, we have no simple method to do this in mips32
  2863   // ltos
  2864   __ ld(FSR, index, 0 * wordSize);
  2865   __ sd(FSR, SP, -2 * wordSize);
  2866   __ sd(R0, SP, -1 * wordSize);
  2868   // Don't rewrite to _fast_lgetfield for potential volatile case.
  2869   __ b(Done);
  2870   __ delayed()->daddi(SP, SP, - 2 * wordSize);
  2872   __ bind(notLong);
  2873   __ move(AT, ftos);
  2874   __ bne(flags, AT, notFloat);
  2875   __ delayed()->nop();
  2877   // ftos
  2878   __ lwc1(FSF, index, 0);
  2879   __ sdc1(FSF, SP, - wordSize);
  2881   if (!is_static) {
  2882     patch_bytecode(Bytecodes::_fast_fgetfield, T3, T2);
  2884   __ b(Done);
  2885   __ delayed()->daddi(SP, SP, - wordSize);
  2887   __ bind(notFloat);
  2888   __ move(AT, dtos);
  2889   __ bne(flags, AT, notDouble);
  2890   __ delayed()->nop();
  2892   // dtos
  2893   __ ldc1(FSF, index, 0 * wordSize);
  2894   __ sdc1(FSF, SP, - 2 * wordSize);
  2895   __ sd(R0, SP, - 1 * wordSize);
  2897   if (!is_static) {
  2898     patch_bytecode(Bytecodes::_fast_dgetfield, T3, T2);
  2900   __ b(Done);
  2901   __ delayed()->daddi(SP, SP, - 2 * wordSize);
  2903   __ bind(notDouble);
  2905   __ stop("Bad state");
  2907   __ bind(Done);
  2910 void TemplateTable::getfield(int byte_no) {
  2911   getfield_or_static(byte_no, false);
  2914 void TemplateTable::getstatic(int byte_no) {
  2915   getfield_or_static(byte_no, true);
  2917 /*
  2918 // used registers : T1, T2, T3, T1
  2919 // T1 : cache & cp entry
  2920 // T2 : obj
  2921 // T3 : flags & value pointer
  2922 // T1 : index
  2923 // see ConstantPoolCacheEntry::set_field for more info
  2924 void TemplateTable::jvmti_post_field_mod(int byte_no, bool is_static) {
  2925  */
  2927 // The registers cache and index expected to be set before call.
  2928 // The function may destroy various registers, just not the cache and index registers.
  2929 void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) {
  2930 	ByteSize cp_base_offset = ConstantPoolCache::base_offset();
  2932 	if (JvmtiExport::can_post_field_modification()) {
  2933 		// Check to see if a field modification watch has been set before we take
  2934 		// the time to call into the VM.
  2935 		Label L1;
  2936 		assert_different_registers(cache, index, AT);
  2938 		//__ lui(AT, Assembler::split_high((int)JvmtiExport::get_field_modification_count_addr()));
  2939 		//__ lw(FSR, AT, Assembler::split_low((int)JvmtiExport::get_field_modification_count_addr()));
  2940 		__ li(AT, JvmtiExport::get_field_modification_count_addr());
  2941 		__ lw(FSR, AT, 0);
  2942 		__ beq(FSR, R0, L1);
  2943 		__ delayed()->nop();
  2945 		/* // We rely on the bytecode being resolved and the cpCache entry filled in.
  2946 		   resolve_cache_and_index(byte_no, T1, T1);
  2947 		   */
  2948 		// The cache and index registers have been already set.
  2949 		// This allows to eliminate this call but the cache and index
  2950 		// registers have to be correspondingly used after this line.
  2951 		// __ get_cache_and_index_at_bcp(eax, edx, 1);
  2952 		__ get_cache_and_index_at_bcp(T1, T9, 1);
  2954 		if (is_static) {
  2955 			__ move(T2, R0);
  2956 		} else {
  2957 			// Life is harder. The stack holds the value on top, 
  2958 			// followed by the object.
  2959 			// We don't know the size of the value, though; 
  2960 			// it could be one or two words
  2961 			// depending on its type. As a result, we must find 
  2962 			// the type to determine where the object is.
  2963 			Label two_word, valsize_known;
  2964 			__ dsll(AT, T1, 4); 
  2965 			__ dadd(AT, T1, AT);
  2966 			__ lw(T3, AT, in_bytes(cp_base_offset 
  2967 						+ ConstantPoolCacheEntry::flags_offset()));
  2968 			__ move(T2, SP);
  2969 			__ shr(T3, ConstantPoolCacheEntry::tos_state_shift);
  2971 			// Make sure we don't need to mask ecx for tos_state_shift 
  2972 			// after the above shift
  2973 			ConstantPoolCacheEntry::verify_tos_state_shift();
  2974 			__ move(AT, ltos);
  2975 			__ beq(T3, AT, two_word);
  2976 			__ delayed()->nop();
  2977 			__ move(AT, dtos);
  2978 			__ beq(T3, AT, two_word);
  2979 			__ delayed()->nop();
  2980 			__ b(valsize_known);
  2981 			//__ delayed()->daddi(T2, T2, wordSize*1);
  2982 			__ delayed()->daddi(T2, T2,Interpreter::expr_offset_in_bytes(1) );
  2984 			__ bind(two_word);
  2985 			//	__ daddi(T2, T2, wordSize*2);
  2986 			__ daddi(T2, T2,Interpreter::expr_offset_in_bytes(2));
  2988 			__ bind(valsize_known);
  2989 			// setup object pointer
  2990 			__ lw(T2, T2, 0*wordSize);
  2992 		// cache entry pointer
  2993 		__ daddi(T1, T1, in_bytes(cp_base_offset));
  2994 		__ shl(T1, 4); 
  2995 		__ daddu(T1, T1, T1);
  2996 		// object (tos)
  2997 		__ move(T3, SP);
  2998 		// T2: object pointer set up above (NULL if static)
  2999 		// T1: cache entry pointer
  3000 		// T3: jvalue object on the stack
  3001 		__ call_VM(NOREG, CAST_FROM_FN_PTR(address, 
  3002 				InterpreterRuntime::post_field_modification), T2, T1, T3);
  3003 		__ get_cache_and_index_at_bcp(cache, index, 1);
  3004 		__ bind(L1);
  3008 // used registers : T0, T1, T2, T3, T8
  3009 // T1 : flags
  3010 // T2 : off
  3011 // T3 : obj
  3012 // T8 : volatile bit
  3013 // see ConstantPoolCacheEntry::set_field for more info
  3014 void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
  3015   transition(vtos, vtos);
  3017   const Register cache = T3;
  3018   const Register index = T0;
  3019   const Register obj   = T3;
  3020   const Register off   = T2;
  3021   const Register flags = T1;
  3022   const Register bc    = T3;
  3024   resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
  3025   //TODO: LEE
  3026   //jvmti_post_field_mod(cache, index, is_static);
  3027   load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
  3028   // Doug Lea believes this is not needed with current Sparcs (TSO) and Intel (PSO).
  3029   // volatile_barrier( );
  3031   Label notVolatile, Done;
  3032   __ move(AT, 1<<ConstantPoolCacheEntry::is_volatile_shift);
  3033   __ andr(T8, flags, AT);
  3035   Label notByte, notInt, notShort, notChar, notLong, notFloat, notObj, notDouble;
  3037   assert(btos == 0, "change code, btos != 0");
  3038   // btos
  3039   __ dsrl(flags, flags, ConstantPoolCacheEntry::tos_state_shift);
  3040   __ andi(flags, flags, ConstantPoolCacheEntry::tos_state_mask);
  3041   __ bne(flags, R0, notByte);
  3042   __ delayed()->nop();
  3044   __ pop(btos);
  3045   if (!is_static) {
  3046     pop_and_check_object(obj); 
  3048   __ dadd(AT, obj, off);
  3049   __ sb(FSR, AT, 0);
  3051   if (!is_static) {
  3052     patch_bytecode(Bytecodes::_fast_bputfield, bc, off, true, byte_no);
  3054   __ b(Done);
  3055   __ delayed()->nop();
  3057   __ bind(notByte);
  3058   // itos
  3059   __ move(AT, itos);
  3060   __ bne(flags, AT, notInt);
  3061   __ delayed()->nop();
  3063   __ pop(itos);
  3064   if (!is_static) {
  3065     pop_and_check_object(obj); 
  3067   __ dadd(AT, obj, off);
  3068   __ sw(FSR, AT, 0);
  3070   if (!is_static) {
  3071     patch_bytecode(Bytecodes::_fast_iputfield, bc, off, true, byte_no);
  3073   __ b(Done);
  3074   __ delayed()->nop();  
  3075   __ bind(notInt);
  3076   // atos
  3077   __ move(AT, atos);
  3078   __ bne(flags, AT, notObj);
  3079   __ delayed()->nop();
  3081   __ pop(atos);
  3082   if (!is_static) {
  3083     pop_and_check_object(obj); 
  3086   __ dadd(AT, obj, off);
  3087   //__ sd(FSR, AT, 0);
  3088   __ store_heap_oop(Address(AT, 0), FSR);
  3089   __ store_check(obj);
  3091   if (!is_static) {
  3092     patch_bytecode(Bytecodes::_fast_aputfield, bc, off, true, byte_no);
  3094   __ b(Done);
  3095   __ delayed()->nop();
  3096   __ bind(notObj);
  3097   // ctos
  3098   __ move(AT, ctos);
  3099   __ bne(flags, AT, notChar);
  3100   __ delayed()->nop();
  3102   __ pop(ctos);
  3103   if (!is_static) {
  3104     pop_and_check_object(obj); 
  3106   __ dadd(AT, obj, off);
  3107   __ sh(FSR, AT, 0);
  3108   if (!is_static) {
  3109     patch_bytecode(Bytecodes::_fast_cputfield, bc, off, true, byte_no);
  3111   __ b(Done);
  3112   __ delayed()->nop();
  3113   __ bind(notChar);
  3114   // stos
  3115   __ move(AT, stos);
  3116   __ bne(flags, AT, notShort);
  3117   __ delayed()->nop();
  3119   __ pop(stos);
  3120   if (!is_static) {
  3121     pop_and_check_object(obj); 
  3123   __ dadd(AT, obj, off);
  3124   __ sh(FSR, AT, 0);
  3125   if (!is_static) {
  3126     patch_bytecode(Bytecodes::_fast_sputfield, bc, off, true, byte_no);
  3128   __ b(Done);
  3129   __ delayed()->nop();
  3130   __ bind(notShort);
  3131   // ltos
  3132   __ move(AT, ltos);
  3133   __ bne(flags, AT, notLong);
  3134   __ delayed()->nop();
  3136   // FIXME: there is no simple method to load/store 64-bit data in a atomic operation
  3137   // we just ignore the volatile flag.
  3138   //Label notVolatileLong;
  3139   //__ beq(T1, R0, notVolatileLong);
  3140   //__ delayed()->nop();
  3142   //addent = 2 * wordSize;
  3143   // no need
  3144   //__ lw(FSR, SP, 0);
  3145   //__ lw(SSR, SP, 1 * wordSize);
  3146   //if (!is_static) {
  3147   //	__ lw(T3, SP, addent);
  3148   //	addent += 1 * wordSize;
  3149   //	__ verify_oop(T3);
  3150   //}
  3152   //__ daddu(AT, T3, T2);
  3154   // Replace with real volatile test
  3155   // NOTE : we assume that sdc1&ldc1 operate in 32-bit, this is true for Godson2 even in 64-bit kernel
  3156   // last modified by yjl 7/12/2005
  3157   //__ ldc1(FSF, SP, 0); 
  3158   //__ sdc1(FSF, AT, 0);
  3159   //volatile_barrier();
  3161   // Don't rewrite volatile version
  3162   //__ b(notVolatile);
  3163   //__ delayed()->addiu(SP, SP, addent);
  3165   //__ bind(notVolatileLong);
  3167   //__ pop(ltos);  // overwrites edx
  3168   //	__ lw(FSR, SP, 0 * wordSize);
  3169   //	__ lw(SSR, SP, 1 * wordSize);
  3170   //	__ daddi(SP, SP, 2*wordSize);
  3171   __ pop(ltos);
  3172   if (!is_static) {
  3173     pop_and_check_object(obj); 
  3175   __ dadd(AT, obj, off);
  3176   __ sd(FSR, AT, 0);
  3177   if (!is_static) {
  3178     patch_bytecode(Bytecodes::_fast_lputfield, bc, off, true, byte_no);
  3180   __ b(notVolatile);
  3181   __ delayed()->nop();
  3183   __ bind(notLong);
  3184   // ftos
  3185   __ move(AT, ftos);
  3186   __ bne(flags, AT, notFloat);
  3187   __ delayed()->nop();
  3189   __ pop(ftos);
  3190   if (!is_static) {
  3191     pop_and_check_object(obj); 
  3193   __ dadd(AT, obj, off);
  3194   __ swc1(FSF, AT, 0);
  3195   if (!is_static) {
  3196     patch_bytecode(Bytecodes::_fast_fputfield, bc, off, true, byte_no);
  3198   __ b(Done);
  3199   __ delayed()->nop();
  3200   __ bind(notFloat);
  3201   // dtos
  3202   __ move(AT, dtos);
  3203   __ bne(flags, AT, notDouble);
  3204   __ delayed()->nop();
  3206   __ pop(dtos);
  3207   if (!is_static) {
  3208     pop_and_check_object(obj); 
  3210   __ dadd(AT, obj, off);
  3211   __ sdc1(FSF, AT, 0);
  3212   if (!is_static) {
  3213     patch_bytecode(Bytecodes::_fast_dputfield, bc, off, true, byte_no);
  3215   __ b(Done);
  3216   __ delayed()->nop();
  3217   __ bind(notDouble);
  3219   __ stop("Bad state");
  3221   __ bind(Done);
  3223   // Check for volatile store
  3224   __ beq(T8, R0, notVolatile);
  3225   __ delayed()->nop();
  3226   volatile_barrier( );
  3227   __ bind(notVolatile);
  3230 void TemplateTable::putfield(int byte_no) {
  3231   putfield_or_static(byte_no, false);
  3234 void TemplateTable::putstatic(int byte_no) {
  3235   putfield_or_static(byte_no, true);
  3238 // used registers : T1, T2, T3
  3239 // T1 : cp_entry
  3240 // T2 : obj
  3241 // T3 : value pointer
  3242 void TemplateTable::jvmti_post_fast_field_mod() {
  3243 	if (JvmtiExport::can_post_field_modification()) {
  3244 		// Check to see if a field modification watch has been set before we take
  3245 		// the time to call into the VM.
  3246 		Label L2;
  3247 		//__ lui(AT, Assembler::split_high((intptr_t)JvmtiExport::get_field_modification_count_addr()));
  3248 		//__ lw(T3, AT, Assembler::split_low((intptr_t)JvmtiExport::get_field_modification_count_addr()));
  3249 		__ li(AT, JvmtiExport::get_field_modification_count_addr());
  3250 		__ lw(T3, AT, 0);
  3251 		__ beq(T3, R0, L2);
  3252 		__ delayed()->nop();
  3253 		//__ pop(T2);
  3254 		__ pop_ptr(T2);
  3255 		//__ lw(T2, SP, 0);
  3256 		__ verify_oop(T2);
  3257 		__ push_ptr(T2);	
  3258 		__ li(AT, -sizeof(jvalue));
  3259 		__ daddu(SP, SP, AT);
  3260 		__ move(T3, SP);
  3261 		//__ push(T2);
  3262 		//__ move(T2, R0);
  3264 		switch (bytecode()) {          // load values into the jvalue object
  3265 			case Bytecodes::_fast_bputfield: 
  3266 				__ sb(FSR, SP, 0); 
  3267 				break;
  3268 			case Bytecodes::_fast_sputfield: 
  3269 				__ sh(FSR, SP, 0);
  3270 				break;
  3271 			case Bytecodes::_fast_cputfield: 
  3272 				__ sh(FSR, SP, 0);
  3273 				break;
  3274 			case Bytecodes::_fast_iputfield: 
  3275 				__ sw(FSR, SP, 0);
  3276 				break;							 
  3277 			case Bytecodes::_fast_lputfield: 
  3278 				__ sd(FSR, SP, 0);
  3279 				break;
  3280 			case Bytecodes::_fast_fputfield: 
  3281 				__ swc1(FSF, SP, 0);
  3282 				break;
  3283 			case Bytecodes::_fast_dputfield: 
  3284 				__ sdc1(FSF, SP, 0);
  3285 				break;
  3286 			case Bytecodes::_fast_aputfield: 
  3287 				__ sd(FSR, SP, 0);
  3288 				break;
  3289 			default:  ShouldNotReachHere();
  3292 		//__ pop(T2);  // restore copy of object pointer
  3294 		// Save eax and sometimes edx because call_VM() will clobber them,
  3295 		// then use them for JVM/DI purposes
  3296 		__ push(FSR);
  3297 		if (bytecode() == Bytecodes::_fast_lputfield) __ push(SSR);
  3298 		// access constant pool cache entry
  3299 		__ get_cache_entry_pointer_at_bcp(T1, T2, 1);
  3300 		// no need, verified ahead
  3301 		__ verify_oop(T2);
  3303 		// ebx: object pointer copied above
  3304 		// eax: cache entry pointer
  3305 		// ecx: jvalue object on the stack
  3306 		__ call_VM(NOREG, CAST_FROM_FN_PTR(address, 
  3307 					InterpreterRuntime::post_field_modification), T2, T1, T3);
  3308 		if (bytecode() == Bytecodes::_fast_lputfield) __ pop(SSR);  // restore high value
  3309 		//__ pop(FSR);     // restore lower value   
  3310 		//__ daddi(SP, SP, sizeof(jvalue));  // release jvalue object space
  3311 		__ lw(FSR, SP, 0);
  3312 		__ daddiu(SP, SP, sizeof(jvalue) + 1 * wordSize);
  3313 		__ bind(L2);
  3317 // used registers : T2, T3, T1
  3318 // T2 : index & off & field address
  3319 // T3 : cache & obj
  3320 // T1 : flags
  3321 void TemplateTable::fast_storefield(TosState state) {
  3322   transition(state, vtos);
  3324   ByteSize base = ConstantPoolCache::base_offset();
  3326   jvmti_post_fast_field_mod();
  3328   // access constant pool cache
  3329   __ get_cache_and_index_at_bcp(T3, T2, 1);
  3331   // test for volatile with edx but edx is tos register for lputfield.
  3332   __ dsll(AT, T2, Address::times_8); 
  3333   __ dadd(AT, T3, AT);
  3334   __ ld(T1, AT, in_bytes(base + ConstantPoolCacheEntry::flags_offset()));
  3336   // replace index with field offset from cache entry
  3337   __ ld(T2, AT, in_bytes(base + ConstantPoolCacheEntry::f2_offset()));
  3339   // Doug Lea believes this is not needed with current Sparcs (TSO) and Intel (PSO).
  3340   // volatile_barrier( );
  3342   Label notVolatile, Done;
  3343   // Check for volatile store
  3344   __ move(AT, 1<<ConstantPoolCacheEntry::is_volatile_shift);
  3345   __ andr(AT, T1, AT);
  3346   __ beq(AT, R0, notVolatile);
  3347   __ delayed()->nop();
  3350   // Get object from stack
  3351   // NOTE : the value in FSR/FSF now
  3352   //	__ pop(T3);
  3353   //	__ verify_oop(T3);
  3354   pop_and_check_object(T3);
  3355   // field addresses
  3356   __ dadd(T2, T3, T2);
  3358   // access field
  3359   switch (bytecode()) {
  3360     case Bytecodes::_fast_bputfield: 
  3361       __ sb(FSR, T2, 0);
  3362       break;
  3363     case Bytecodes::_fast_sputfield: // fall through
  3364     case Bytecodes::_fast_cputfield: 
  3365       __ sh(FSR, T2, 0);
  3366       break;
  3367     case Bytecodes::_fast_iputfield: 
  3368       __ sw(FSR, T2, 0);
  3369       break;
  3370     case Bytecodes::_fast_lputfield: 
  3371       __ sd(FSR, T2, 0 * wordSize);
  3372       break;
  3373     case Bytecodes::_fast_fputfield: 
  3374       __ swc1(FSF, T2, 0);
  3375       break;
  3376     case Bytecodes::_fast_dputfield: 
  3377       __ sdc1(FSF, T2, 0 * wordSize);
  3378       break;
  3379     case Bytecodes::_fast_aputfield: 
  3380       __ store_heap_oop(Address(T2, 0), FSR);
  3381       __ store_check(T3);
  3382       break;
  3383     default:
  3384       ShouldNotReachHere();
  3387   Label done;
  3388   volatile_barrier( );
  3389   __ b(done);
  3390   __ delayed()->nop();
  3392   // Same code as above, but don't need edx to test for volatile.
  3393   __ bind(notVolatile);
  3395   // Get object from stack
  3396   //	__ pop(T3);
  3397   //	__ verify_oop(T3);
  3398   pop_and_check_object(T3);
  3399   //get the field address
  3400   __ dadd(T2, T3, T2);
  3402   // access field
  3403   switch (bytecode()) {
  3404     case Bytecodes::_fast_bputfield: 
  3405       __ sb(FSR, T2, 0); 
  3406       break;
  3407     case Bytecodes::_fast_sputfield: // fall through
  3408     case Bytecodes::_fast_cputfield: 
  3409       __ sh(FSR, T2, 0);
  3410       break;
  3411     case Bytecodes::_fast_iputfield: 
  3412       __ sw(FSR, T2, 0);
  3413       break;
  3414     case Bytecodes::_fast_lputfield: 
  3415       __ sd(FSR, T2, 0 * wordSize);
  3416       break;
  3417     case Bytecodes::_fast_fputfield: 
  3418       __ swc1(FSF, T2, 0);
  3419       break;
  3420     case Bytecodes::_fast_dputfield: 
  3421       __ sdc1(FSF, T2, 0 * wordSize);
  3422       break;
  3423     case Bytecodes::_fast_aputfield: 
  3424       //add for compressedoops
  3425       __ store_heap_oop(Address(T2, 0), FSR);
  3426       __ store_check(T3);
  3427       break;
  3428     default:
  3429       ShouldNotReachHere();
  3431   __ bind(done);
  3434 // used registers : T2, T3, T1
  3435 // T3 : cp_entry & cache
  3436 // T2 : index & offset
  3437 void TemplateTable::fast_accessfield(TosState state) {
  3438   transition(atos, state);
  3440   // do the JVMTI work here to avoid disturbing the register state below
  3441   if (JvmtiExport::can_post_field_access()) {
  3442     // Check to see if a field access watch has been set before we take
  3443     // the time to call into the VM.
  3444     Label L1;
  3445     __ li(AT, (intptr_t)JvmtiExport::get_field_access_count_addr());
  3446     __ lw(T3, AT, 0);
  3447     __ beq(T3, R0, L1);
  3448     __ delayed()->nop();
  3449     // access constant pool cache entry
  3450     __ get_cache_entry_pointer_at_bcp(T3, T1, 1);
  3451     __ move(TSR, FSR);
  3452     __ verify_oop(FSR);
  3453     // FSR: object pointer copied above
  3454     // T3: cache entry pointer
  3455     __ call_VM(NOREG, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access),
  3456 	FSR, T3);
  3457     __ move(FSR, TSR);
  3458     __ bind(L1);
  3461   // access constant pool cache
  3462   __ get_cache_and_index_at_bcp(T3, T2, 1);
  3463   // replace index with field offset from cache entry
  3464   __ dsll(AT, T2, Address::times_8);
  3465   //__ dsll(AT, T2, 4);
  3466   __ dadd(AT, T3, AT);
  3467   __ ld(T2, AT, in_bytes(ConstantPoolCache::base_offset() 
  3468 	+ ConstantPoolCacheEntry::f2_offset()));
  3470   // eax: object
  3471   __ verify_oop(FSR);
  3472   // __ null_check(FSR, 0);
  3473   __ null_check(FSR);
  3474   // field addresses
  3475   __ dadd(FSR, FSR, T2);
  3477   // access field
  3478   switch (bytecode()) {
  3479     case Bytecodes::_fast_bgetfield: 
  3480       __ lb(FSR, FSR, 0);
  3481       break;
  3482     case Bytecodes::_fast_sgetfield: 
  3483       __ lh(FSR, FSR, 0);
  3484       break;
  3485     case Bytecodes::_fast_cgetfield: 
  3486       __ lhu(FSR, FSR, 0);
  3487       break;
  3488     case Bytecodes::_fast_igetfield:
  3489       __ lw(FSR, FSR, 0);
  3490       break;
  3491     case Bytecodes::_fast_lgetfield: 
  3492       __ stop("should not be rewritten");  
  3493       break;
  3494     case Bytecodes::_fast_fgetfield: 
  3495       __ lwc1(FSF, FSR, 0);
  3496       break;
  3497     case Bytecodes::_fast_dgetfield: 
  3498       __ ldc1(FSF, FSR, 0);
  3499       break;
  3500     case Bytecodes::_fast_agetfield:
  3501       //add for compressedoops
  3502       __ load_heap_oop(FSR, Address(FSR, 0));
  3503       __ verify_oop(FSR);
  3504       break;
  3505     default:
  3506       ShouldNotReachHere();
  3509   // Doug Lea believes this is not needed with current Sparcs(TSO) and Intel(PSO)
  3510   // volatile_barrier( );
  3513 // generator for _fast_iaccess_0, _fast_aaccess_0, _fast_faccess_0
  3514 // used registers : T1, T2, T3, T1
  3515 // T1 : obj & field address
  3516 // T2 : off
  3517 // T3 : cache
  3518 // T1 : index
  3519 void TemplateTable::fast_xaccess(TosState state) {
  3520   transition(vtos, state);
  3521   // get receiver
  3522   __ ld(T1, aaddress(0));
  3523   // access constant pool cache
  3524   __ get_cache_and_index_at_bcp(T3, T2, 2);
  3525   __ dsll(AT, T2, Address::times_8);
  3526   __ dadd(AT, T3, AT);
  3527   __ ld(T2, AT, in_bytes(ConstantPoolCache::base_offset() 
  3528 	+ ConstantPoolCacheEntry::f2_offset()));
  3530   // make sure exception is reported in correct bcp range (getfield is next instruction)
  3531   __ daddi(BCP, BCP, 1);
  3532   //	__ null_check(T1, 0);
  3533   __ null_check(T1);
  3534   __ dadd(T1, T1, T2);
  3536   if (state == itos) {
  3537     __ lw(FSR, T1, 0);
  3538   } else if (state == atos) {
  3539     //__ ld(FSR, T1, 0);
  3540     __ load_heap_oop(FSR, Address(T1, 0));
  3541     __ verify_oop(FSR);
  3542   } else if (state == ftos) {
  3543     __ lwc1(FSF, T1, 0);
  3544   } else {
  3545     ShouldNotReachHere();
  3547   __ daddi(BCP, BCP, -1);
  3550 //---------------------------------------------------
  3551 //-------------------------------------------------
  3552 // Calls
  3554 void TemplateTable::count_calls(Register method, Register temp) {  
  3555 	// implemented elsewhere
  3556 	ShouldNotReachHere();
  3559 // method, index, recv, flags: T1, T2, T3, T1
  3560 // byte_no = 2 for _invokevirtual, 1 else
  3561 // T0 : return address
  3562 // get the method & index of the invoke, and push the return address of 
  3563 // the invoke(first word in the frame)
  3564 // this address is where the return code jmp to.
  3565 // NOTE : this method will set T3&T1 as recv&flags
  3566 void TemplateTable::prepare_invoke(int byte_no,
  3567                                    Register method, //linked method (or i-klass)
  3568                                    Register index, //itable index, MethodType ,etc.
  3569                                    Register recv, // if caller wants to see it
  3570                                    Register flags // if caller wants to test it
  3571 		                   ) {
  3572   // determine flags
  3573   const Bytecodes::Code code = bytecode();
  3574   const bool is_invokeinterface  = code == Bytecodes::_invokeinterface;
  3575   const bool is_invokedynamic    = code == Bytecodes::_invokedynamic;
  3576   const bool is_invokehandle     = code == Bytecodes::_invokehandle;
  3577   const bool is_invokevirtual    = code == Bytecodes::_invokevirtual;
  3578   const bool is_invokespecial    = code == Bytecodes::_invokespecial;
  3579   const bool load_receiver       = (recv  != noreg);
  3580   const bool save_flags          = (flags != noreg);
  3581   assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic),"");
  3582   assert(save_flags    == (is_invokeinterface || is_invokevirtual), "need flags for vfinal");
  3583   assert(flags == noreg || flags == T1, "error flags reg.");
  3584   assert(recv  == noreg || recv  == T3, "error recv reg.");
  3585   // setup registers & access constant pool cache
  3586   if(recv == noreg) recv  = T3;
  3587   if(flags == noreg) flags  = T1;
  3589   assert_different_registers(method, index, recv, flags);
  3591   // save 'interpreter return address'
  3592   __ save_bcp();
  3594   load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
  3595   if (is_invokedynamic || is_invokehandle) {
  3596    Label L_no_push;
  3597      __ move(AT, (1 << ConstantPoolCacheEntry::has_appendix_shift));
  3598      __ andr(AT, AT, flags);
  3599      __ beq(AT, R0, L_no_push);
  3600      __ delayed()->nop();
  3601      // Push the appendix as a trailing parameter.
  3602      // This must be done before we get the receiver,
  3603      // since the parameter_size includes it.
  3604      Register tmp = SSR;
  3605      __ push(tmp);
  3606      __ move(tmp, index);
  3607      assert(ConstantPoolCacheEntry::_indy_resolved_references_appendix_offset == 0, "appendix expected at index+0");
  3608      __ load_resolved_reference_at_index(index, tmp);
  3609      __ pop(tmp);
  3610      __ push(index);  // push appendix (MethodType, CallSite, etc.)
  3611      __ bind(L_no_push);
  3615 // load receiver if needed (after appendix is pushed so parameter size is correct)
  3616 // Note: no return address pushed yet
  3617   if (load_receiver) {
  3618 	 __ move(AT, ConstantPoolCacheEntry::parameter_size_mask);
  3619 	 __ andr(recv, flags, AT);
  3620          // 2014/07/31 Fu: Since we won't push RA on stack, no_return_pc_pushed_yet should be 0.
  3621 	 const int no_return_pc_pushed_yet = 0;  // argument slot correction before we push return address
  3622 	 const int receiver_is_at_end      = -1;  // back off one slot to get receiver
  3623 	 Address recv_addr = __ argument_address(recv, no_return_pc_pushed_yet + receiver_is_at_end);
  3625 	 __ ld(recv, recv_addr);
  3626 	 __ verify_oop(recv);	
  3628   if(save_flags) {
  3629     //__ movl(r13, flags);
  3630     __ move(BCP, flags);
  3632   // compute return type
  3633   __ dsrl(flags, flags, ConstantPoolCacheEntry::tos_state_shift);
  3634   __ andi(flags, flags, 0xf);
  3636   // Make sure we don't need to mask flags for tos_state_shift after the above shift
  3637   ConstantPoolCacheEntry::verify_tos_state_shift();
  3638   // load return address
  3640     const address table = (address) Interpreter::invoke_return_entry_table_for(code);
  3641     __ li(AT, (long)table);
  3642     __ dsll(flags, flags, LogBytesPerWord);
  3643     __ dadd(AT, AT, flags);
  3644     __ ld(RA, AT, 0);
  3647   if (save_flags) {
  3648     __ move(flags, BCP);
  3649     __ restore_bcp();
  3653 // used registers : T0, T3, T1, T2
  3654 // T3 : recv, this two register using convention is by prepare_invoke
  3655 // T1 : flags, klass
  3656 // Rmethod : method, index must be Rmethod
  3657 void TemplateTable::invokevirtual_helper(Register index, Register recv,
  3658 		Register flags) {
  3660   assert_different_registers(index, recv, flags, T2);
  3662   // Test for an invoke of a final method
  3663   Label notFinal;
  3664   __ move(AT, (1 << ConstantPoolCacheEntry::is_vfinal_shift));
  3665   __ andr(AT, flags, AT);
  3666   __ beq(AT, R0, notFinal);
  3667   __ delayed()->nop();
  3669   Register method = index;  // method must be Rmethod
  3670   assert(method == Rmethod, "methodOop must be Rmethod for interpreter calling convention");
  3672   // do the call - the index is actually the method to call
  3673   // the index is indeed methodOop, for this is vfinal, 
  3674   // see ConstantPoolCacheEntry::set_method for more info
  3676   __ verify_oop(method);
  3678   // It's final, need a null check here!
  3679   __ null_check(recv);
  3681   // profile this call
  3682   __ profile_final_call(T2);
  3684   // 2014/11/24 Fu 
  3685   // T2: tmp, used for mdp
  3686   // method: callee
  3687   // T9: tmp
  3688   // is_virtual: true 
  3689   __ profile_arguments_type(T2, method, T9, true);
  3691 //  __ move(T0, recv);
  3692   __ jump_from_interpreted(method, T2);
  3694   __ bind(notFinal);
  3696   // get receiver klass
  3697   __ null_check(recv, oopDesc::klass_offset_in_bytes());
  3698   // Keep recv in ecx for callee expects it there
  3699   __ load_klass(T2, recv);
  3700   __ verify_oop(T2);
  3701   // profile this call
  3702   __ profile_virtual_call(T2, T0, T1);
  3704   // get target methodOop & entry point
  3705   const int base = InstanceKlass::vtable_start_offset() * wordSize;    
  3706   assert(vtableEntry::size() * wordSize == 8, "adjust the scaling in the code below");
  3707   __ dsll(AT, index, Address::times_8);
  3708   __ dadd(AT, T2, AT);
  3709   //this is a ualign read 
  3710   __ ld(method, AT, base + vtableEntry::method_offset_in_bytes());
  3711   __ jump_from_interpreted(method, T2);
  3715 void TemplateTable::invokevirtual(int byte_no) {
  3716   transition(vtos, vtos);
  3717   assert(byte_no == f2_byte, "use this argument");
  3718   prepare_invoke(byte_no, Rmethod, NOREG, T3, T1);
  3719   // now recv & flags in T3, T1
  3720   invokevirtual_helper(Rmethod, T3, T1);
  3723 // T9 : entry
  3724 // Rmethod : method
  3725 void TemplateTable::invokespecial(int byte_no) {
  3726   transition(vtos, vtos);
  3727   assert(byte_no == f1_byte, "use this argument");
  3728   prepare_invoke(byte_no, Rmethod, NOREG, T3);
  3729   // now recv & flags in T3, T1
  3730   __ verify_oop(T3);
  3731   __ null_check(T3);
  3732   __ profile_call(T9);
  3734   // 2014/11/24 Fu 
  3735   // T8: tmp, used for mdp
  3736   // Rmethod: callee
  3737   // T9: tmp
  3738   // is_virtual: false 
  3739   __ profile_arguments_type(T8, Rmethod, T9, false);
  3741   __ jump_from_interpreted(Rmethod, T9);
  3742   __ move(T0, T3);//aoqi ?
  3745 void TemplateTable::invokestatic(int byte_no) {
  3746   transition(vtos, vtos);
  3747   assert(byte_no == f1_byte, "use this argument");
  3748   prepare_invoke(byte_no, Rmethod, NOREG);
  3749   __ verify_oop(Rmethod);
  3751   __ profile_call(T9);
  3753   // 2014/11/24 Fu 
  3754   // T8: tmp, used for mdp
  3755   // Rmethod: callee
  3756   // T9: tmp
  3757   // is_virtual: false 
  3758   __ profile_arguments_type(T8, Rmethod, T9, false);
  3760   __ jump_from_interpreted(Rmethod, T9);
  3763 // i have no idea what to do here, now. for future change. FIXME. 
  3764 void TemplateTable::fast_invokevfinal(int byte_no) {
  3765 	transition(vtos, vtos);
  3766 	assert(byte_no == f2_byte, "use this argument");
  3767 	__ stop("fast_invokevfinal not used on x86");
  3770 // used registers : T0, T1, T2, T3, T1, A7
  3771 // T0 : itable, vtable, entry
  3772 // T1 : interface
  3773 // T3 : receiver
  3774 // T1 : flags, klass
  3775 // Rmethod : index, method, this is required by interpreter_entry
  3776 void TemplateTable::invokeinterface(int byte_no) {
  3777   transition(vtos, vtos);
  3778   //this method will use T1-T4 and T0
  3779   assert(byte_no == f1_byte, "use this argument");
  3780   prepare_invoke(byte_no, T2, Rmethod, T3, T1);
  3781   // T2: Interface
  3782   // Rmethod: index
  3783   // T3: receiver    
  3784   // T1: flags
  3785   Label notMethod;
  3786   __ move(AT, (1 << ConstantPoolCacheEntry::is_forced_virtual_shift));
  3787   __ andr(AT, T1, AT);
  3788   __ beq(AT, R0, notMethod);
  3789   __ delayed()->nop();
  3791   // Special case of invokeinterface called for virtual method of
  3792   // java.lang.Object.  See cpCacheOop.cpp for details.
  3793   // This code isn't produced by javac, but could be produced by
  3794   // another compliant java compiler.
  3795   invokevirtual_helper(Rmethod, T3, T1);
  3797   __ bind(notMethod);
  3798   // Get receiver klass into T1 - also a null check
  3799   //__ ld(T1, T3, oopDesc::klass_offset_in_bytes());
  3800   //add for compressedoops
  3801   //__ restore_locals();
  3802   //__ null_check(T3, oopDesc::klass_offset_in_bytes());
  3803   __ load_klass(T1, T3);
  3804   __ verify_oop(T1);
  3806   // profile this call
  3807   __ profile_virtual_call(T1, T0, FSR);
  3809   // Compute start of first itableOffsetEntry (which is at the end of the vtable)
  3810   // TODO: x86 add a new method lookup_interface_method  // LEE
  3811   const int base = InstanceKlass::vtable_start_offset() * wordSize;    
  3812   assert(vtableEntry::size() * wordSize == 8, "adjust the scaling in the code below");
  3813   __ lw(AT, T1, InstanceKlass::vtable_length_offset() * wordSize);
  3814   __ dsll(AT, AT, Address::times_8);
  3815   __ dadd(T0, T1, AT);
  3816   __ daddi(T0, T0, base);
  3817   if (HeapWordsPerLong > 1) {
  3818     // Round up to align_object_offset boundary
  3819     __ round_to(T0, BytesPerLong);
  3821   // now T0 is the begin of the itable
  3823   Label entry, search, interface_ok;
  3825   ///__ jmp(entry);   
  3826   __ b(entry);
  3827   __ delayed()->nop();
  3829   __ bind(search);
  3830   __ increment(T0, itableOffsetEntry::size() * wordSize);
  3832   __ bind(entry);
  3834   // Check that the entry is non-null.  A null entry means that the receiver
  3835   // class doesn't implement the interface, and wasn't the same as the
  3836   // receiver class checked when the interface was resolved.
  3837   __ ld(AT, T0, itableOffsetEntry::interface_offset_in_bytes());
  3838   __ bne(AT, R0, interface_ok);
  3839   __ delayed()->nop();
  3840   // throw exception
  3841   // the call_VM checks for exception, so we should never return here.
  3843   //__ pop();//FIXME here,			
  3844   // pop return address (pushed by prepare_invoke). 
  3845   // no need now, we just save the value in RA now
  3847   __ call_VM(NOREG, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeError));
  3848   __ should_not_reach_here();
  3850   __ bind(interface_ok);
  3851   //NOTICE here, no pop as x86 do	
  3852   //__ lw(AT, T0, itableOffsetEntry::interface_offset_in_bytes());
  3853   __ bne(AT, T2, search);
  3854   __ delayed()->nop();
  3856   // now we get vtable of the interface
  3857   __ ld(T0, T0, itableOffsetEntry::offset_offset_in_bytes());
  3858   __ daddu(T0, T1, T0);
  3859   assert(itableMethodEntry::size() * wordSize == 8, "adjust the scaling in the code below");
  3860   __ dsll(AT, Rmethod, Address::times_8);
  3861   __ daddu(AT, T0, AT);
  3862   // now we get the method
  3863   __ ld(Rmethod, AT, 0);
  3864   // Rnext: methodOop to call
  3865   // T3: receiver
  3866   // Check for abstract method error
  3867   // Note: This should be done more efficiently via a throw_abstract_method_error
  3868   //       interpreter entry point and a conditional jump to it in case of a null
  3869   //       method.
  3871     Label L;
  3872     ///__ testl(ebx, ebx);
  3873     ///__ jcc(Assembler::notZero, L);
  3874     __ bne(Rmethod, R0, L);
  3875     __ delayed()->nop();
  3877     // throw exception
  3878     // note: must restore interpreter registers to canonical
  3879     //       state for exception handling to work correctly!
  3880     ///__ popl(ebx);          // pop return address (pushed by prepare_invoke)
  3881     //__ restore_bcp();      // esi must be correct for exception handler   
  3882     //(was destroyed)
  3883     //__ restore_locals();   // make sure locals pointer 
  3884     //is correct as well (was destroyed)
  3885     ///__ call_VM(noreg, CAST_FROM_FN_PTR(address, 
  3886     //InterpreterRuntime::throw_AbstractMethodError));
  3887     __ call_VM(NOREG, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
  3888     // the call_VM checks for exception, so we should never return here.
  3889     __ should_not_reach_here();
  3890     __ bind(L);
  3893   // 2014/11/24 Fu 
  3894   // T8: tmp, used for mdp
  3895   // Rmethod: callee
  3896   // T9: tmp
  3897   // is_virtual: true
  3898   __ profile_arguments_type(T8, Rmethod, T9, true);
  3900   __ jump_from_interpreted(Rmethod, T9);
  3903 void TemplateTable::invokehandle(int byte_no) {
  3904   transition(vtos, vtos);
  3905   assert(byte_no == f1_byte, "use this argument");
  3906   const Register T2_method = Rmethod;
  3907   const Register FSR_mtype  = FSR;
  3908   const Register T3_recv   = T3;
  3910   if (!EnableInvokeDynamic) {
  3911      // rewriter does not generate this bytecode
  3912      __ should_not_reach_here();
  3913      return;
  3916    prepare_invoke(byte_no, T2_method, FSR_mtype, T3_recv);
  3917    //??__ verify_method_ptr(T2_method);
  3918    __ verify_oop(T3_recv);
  3919    __ null_check(T3_recv);
  3921    // rax: MethodType object (from cpool->resolved_references[f1], if necessary)
  3922    // rbx: MH.invokeExact_MT method (from f2)
  3924    // Note:  rax_mtype is already pushed (if necessary) by prepare_invoke
  3926    // FIXME: profile the LambdaForm also
  3927    __ profile_final_call(T9);
  3929    // 2014/11/24 Fu 
  3930    // T8: tmp, used for mdp
  3931    // T2_method: callee
  3932    // T9: tmp
  3933    // is_virtual: true
  3934    __ profile_arguments_type(T8, T2_method, T9, true);
  3936   __ jump_from_interpreted(T2_method, T9);
  3939  void TemplateTable::invokedynamic(int byte_no) {
  3940    transition(vtos, vtos);
  3941    assert(byte_no == f1_byte, "use this argument");
  3943    if (!EnableInvokeDynamic) {
  3944      // We should not encounter this bytecode if !EnableInvokeDynamic.
  3945      // The verifier will stop it.  However, if we get past the verifier,
  3946      // this will stop the thread in a reasonable way, without crashing the JVM.
  3947      __ call_VM(noreg, CAST_FROM_FN_PTR(address,
  3948                       InterpreterRuntime::throw_IncompatibleClassChangeError));
  3949      // the call_VM checks for exception, so we should never return here.
  3950      __ should_not_reach_here();
  3951      return;
  3954    //const Register Rmethod   = T2;
  3955    const Register T2_callsite = T2;
  3957    prepare_invoke(byte_no, Rmethod, T2_callsite);
  3959    // rax: CallSite object (from cpool->resolved_references[f1])
  3960    // rbx: MH.linkToCallSite method (from f2)
  3962    // Note:  rax_callsite is already pushed by prepare_invoke
  3963    // %%% should make a type profile for any invokedynamic that takes a ref argument
  3964    // profile this call
  3965    __ profile_call(T9);
  3967    // 2014/11/24 Fu 
  3968    // T8: tmp, used for mdp
  3969    // Rmethod: callee
  3970    // T9: tmp
  3971    // is_virtual: false 
  3972    __ profile_arguments_type(T8, Rmethod, T9, false);
  3974    __ verify_oop(T2_callsite);
  3976    __ jump_from_interpreted(Rmethod, T9);
  3979 //----------------------------------------------------------------------------------------------------
  3980 // Allocation
  3981 // T1 : tags & buffer end & thread
  3982 // T2 : object end
  3983 // T3 : klass
  3984 // T1 : object size
  3985 // A1 : cpool
  3986 // A2 : cp index
  3987 // return object in FSR
  3988 void TemplateTable::_new() {
  3989   transition(vtos, atos);
  3990   __ load_two_bytes_from_at_bcp(A2, AT, 1);
  3991   __ huswap(A2);
  3993   Label slow_case;
  3994   Label done;
  3995   Label initialize_header;
  3996   Label initialize_object;  // including clearing the fields
  3997   Label allocate_shared;
  3999   // get InstanceKlass in T3
  4000   __ get_cpool_and_tags(A1, T1);
  4001   __ dsll(AT, A2, Address::times_8);
  4002   __ dadd(AT, A1, AT);
  4003   __ ld(T3, AT, sizeof(ConstantPool));
  4005   // make sure the class we're about to instantiate has been resolved. 
  4006   // Note: slow_case does a pop of stack, which is why we loaded class/pushed above
  4007   const int tags_offset = Array<u1>::base_offset_in_bytes();
  4008   __ dadd(T1, T1, A2);
  4009   __ lb(AT, T1, tags_offset);
  4010   //__ addiu(AT, AT, - (int)JVM_CONSTANT_UnresolvedClass);
  4011   __ daddiu(AT, AT, - (int)JVM_CONSTANT_Class);
  4012   //__ beq(AT, R0, slow_case);
  4013   __ bne(AT, R0, slow_case);
  4014   __ delayed()->nop();
  4016   /*make sure klass is initialized & doesn't have finalizer*/
  4018   // make sure klass is fully initialized
  4019   __ lw(T1, T3, in_bytes(InstanceKlass::init_state_offset()));
  4020   __ daddiu(AT, T1, - (int)InstanceKlass::fully_initialized);
  4021   __ bne(AT, R0, slow_case);
  4022   __ delayed()->nop();
  4024   // has_finalizer
  4025   //__ lw(T1, T3, Klass::access_flags_offset() + sizeof(oopDesc));
  4026   //__ move(AT, JVM_ACC_CAN_BE_FASTPATH_ALLOCATED);
  4027   //__ andr(AT, T1, AT);
  4028   __ lw(T1, T3, in_bytes(Klass::layout_helper_offset()) );
  4029   __ andi(AT, T1, Klass::_lh_instance_slow_path_bit);
  4030   __ bne(AT, R0, slow_case);
  4031   __ delayed()->nop();
  4033   // get instance_size in InstanceKlass (already aligned) in T0, 
  4034   // be sure to preserve this value 
  4035   //__ lw(T0, T3, Klass::size_helper_offset_in_bytes() + sizeof(oopDesc));
  4036   //Klass::_size_helper is renamed Klass::_layout_helper. aoqi 
  4037   __ lw(T0, T3, in_bytes(Klass::layout_helper_offset()) );
  4039   // 
  4040   // Allocate the instance
  4041   // 1) Try to allocate in the TLAB
  4042   // 2) if fail and the object is large allocate in the shared Eden
  4043   // 3) if the above fails (or is not applicable), go to a slow case
  4044   // (creates a new TLAB, etc.)
  4046   const bool allow_shared_alloc =
  4047     Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode;
  4049   if (UseTLAB) {
  4050 #ifndef OPT_THREAD
  4051     const Register thread = T8;
  4052     __ get_thread(thread);
  4053 #else
  4054     const Register thread = TREG;
  4055 #endif
  4056     // get tlab_top
  4057     __ ld(FSR, thread, in_bytes(JavaThread::tlab_top_offset()));
  4058     __ dadd(T2, FSR, T0);
  4059     // get tlab_end
  4060     __ ld(AT, thread, in_bytes(JavaThread::tlab_end_offset()));
  4061     __ slt(AT, AT, T2);
  4062     //		__ bne(AT, R0, allocate_shared);
  4063     __ bne(AT, R0, allow_shared_alloc ? allocate_shared : slow_case);
  4064     __ delayed()->nop();
  4065     __ sd(T2, thread, in_bytes(JavaThread::tlab_top_offset()));
  4067     if (ZeroTLAB) {
  4068       // the fields have been already cleared
  4069       __ b_far(initialize_header);
  4070     } else {
  4071       // initialize both the header and fields
  4072       __ b_far(initialize_object);
  4074     __ delayed()->nop();
  4075     /*
  4077        if (CMSIncrementalMode) {
  4078     // No allocation in shared eden. 
  4079     ///__ jmp(slow_case);
  4080     __ b(slow_case);
  4081     __ delayed()->nop();
  4083      */ 
  4086   // Allocation in the shared Eden , if allowed
  4087   // T0 : instance size in words
  4088   if(allow_shared_alloc){ 
  4089     __ bind(allocate_shared);
  4090     Label retry;
  4091     //Address heap_top(T1, (int)Universe::heap()->top_addr());
  4092     Address heap_top(T1);
  4093     //__ lui(T1, Assembler::split_high((int)Universe::heap()->top_addr()));
  4094     __ li(T1, (long)Universe::heap()->top_addr());
  4096     __ ld(FSR, heap_top);
  4097     __ bind(retry);
  4098     __ dadd(T2, FSR, T0);
  4099     //__ lui(AT, Assembler::split_high((int)Universe::heap()->end_addr()));
  4100     //__ lw(AT, AT, Assembler::split_low((int)Universe::heap()->end_addr()));
  4101     __ li(AT, (long)Universe::heap()->end_addr());
  4102     __ ld(AT, AT, 0);
  4103     __ slt(AT, AT, T2);
  4104     __ bne(AT, R0, slow_case);
  4105     __ delayed()->nop();
  4107     // Compare FSR with the top addr, and if still equal, store the new
  4108     // top addr in ebx at the address of the top addr pointer. Sets ZF if was
  4109     // equal, and clears it otherwise. Use lock prefix for atomicity on MPs.
  4110     //
  4111     // FSR: object begin
  4112     // T2: object end
  4113     // T0: instance size in words
  4115     // if someone beat us on the allocation, try again, otherwise continue 
  4116     //__ lui(T1, Assembler::split_high((int)Universe::heap()->top_addr()));
  4117     __ cmpxchg(T2, heap_top, FSR);
  4118     __ beq(AT, R0, retry);
  4119     __ delayed()->nop();
  4122   if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) {
  4123     // The object is initialized before the header.  If the object size is
  4124     // zero, go directly to the header initialization.
  4125     __ bind(initialize_object);
  4126     __ li(AT, - sizeof(oopDesc));
  4127     __ daddu(T0, T0, AT);
  4128     __ beq_far(T0, R0, initialize_header);
  4129     __ delayed()->nop();
  4132     // T0 must have been multiple of 2
  4133 #ifdef ASSERT
  4134     // make sure T0 was multiple of 2
  4135     Label L;
  4136     __ andi(AT, T0, 1);
  4137     __ beq(AT, R0, L);
  4138     __ delayed()->nop();
  4139     __ stop("object size is not multiple of 2 - adjust this code");
  4140     __ bind(L);
  4141     // edx must be > 0, no extra check needed here
  4142 #endif
  4144     // initialize remaining object fields: T0 is a multiple of 2
  4146       Label loop;
  4147       __ dadd(T1, FSR, T0);
  4148       __ daddi(T1, T1, -oopSize);
  4150       __ bind(loop);
  4151       __ sd(R0, T1, sizeof(oopDesc) + 0 * oopSize);
  4152 //      __ sd(R0, T1, sizeof(oopDesc) + 1 * oopSize);
  4153       __ bne(T1, FSR, loop); //dont clear header
  4154       __ delayed()->daddi(T1, T1, -oopSize);
  4155       // actually sizeof(oopDesc)==8, so we can move  
  4156       // __ addiu(AT, AT, -8) to delay slot, and compare FSR with T1
  4158     //klass in T3, 
  4159     // initialize object header only.
  4160     __ bind(initialize_header);
  4161     if (UseBiasedLocking) {
  4162       // __ popl(ecx);   // get saved klass back in the register.
  4163       // __ movl(ebx, Address(ecx, Klass::prototype_header_offset_in_bytes() 
  4164       // + klassOopDesc::klass_part_offset_in_bytes()));
  4165       __ ld(AT, T3, in_bytes(Klass::prototype_header_offset())); 
  4166       // __ movl(Address(eax, oopDesc::mark_offset_in_bytes ()), ebx);
  4167       __ sd(AT, FSR, oopDesc::mark_offset_in_bytes ());    
  4168     } else {
  4169       __ li(AT, (long)markOopDesc::prototype());
  4170       __ sd(AT, FSR, oopDesc::mark_offset_in_bytes());
  4173     //__ sd(T3, FSR, oopDesc::klass_offset_in_bytes());
  4174     __ store_klass_gap(FSR, R0);
  4175     __ store_klass(FSR, T3);
  4178       SkipIfEqual skip_if(_masm, &DTraceAllocProbes, 0);
  4179       // Trigger dtrace event for fastpath
  4180       __ push(atos);
  4181       __ call_VM_leaf(
  4182 	  CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), FSR);
  4183       __ pop(atos);
  4185     __ b(done);
  4186     __ delayed()->nop();
  4188   // slow case
  4189   __ bind(slow_case);
  4190   call_VM(FSR, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), A1, A2);
  4192   // continue
  4193   __ bind(done);
  4196 void TemplateTable::newarray() {
  4197 	transition(itos, atos);
  4198 	__ lbu(A1, at_bcp(1));
  4199 	//type, count
  4200 	call_VM(FSR, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), A1, FSR);
  4203 void TemplateTable::anewarray() {
  4204   transition(itos, atos);
  4205   __ load_two_bytes_from_at_bcp(A2, AT, 1);
  4206   __ huswap(A2);
  4207   __ get_constant_pool(A1);
  4208   // cp, index, count
  4209   call_VM(FSR, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), A1, A2, FSR);
  4212 void TemplateTable::arraylength() {
  4213   transition(atos, itos);
  4214   __ null_check(FSR, arrayOopDesc::length_offset_in_bytes());
  4215   __ lw(FSR, FSR, arrayOopDesc::length_offset_in_bytes());
  4218 // i use T2 as ebx, T3 as ecx, T1 as edx
  4219 // when invoke gen_subtype_check, super in T3, sub in T2, object in FSR(it's always)
  4220 // T2 : sub klass
  4221 // T3 : cpool
  4222 // T3 : super klass
  4223 void TemplateTable::checkcast() {
  4224   transition(atos, atos);
  4225   Label done, is_null, ok_is_subtype, quicked, resolved;
  4226   __ beq(FSR, R0, is_null);
  4227   __ delayed()->nop();
  4229   // Get cpool & tags index
  4230   __ get_cpool_and_tags(T3, T1);
  4231   __ load_two_bytes_from_at_bcp(T2, AT, 1);
  4232   __ huswap(T2);
  4234   // See if bytecode has already been quicked
  4235   __ dadd(AT, T1, T2);
  4236   __ lb(AT, AT, Array<u1>::base_offset_in_bytes());
  4237   __ daddiu(AT, AT, - (int)JVM_CONSTANT_Class);
  4238   __ beq(AT, R0, quicked);
  4239   __ delayed()->nop();
  4241   /* 2012/6/2 Jin: In InterpreterRuntime::quicken_io_cc, lots of new classes may be loaded.
  4242    *  Then, GC will move the object in V0 to another places in heap.
  4243    *  Therefore, We should never save such an object in register.
  4244    *  Instead, we should save it in the stack. It can be modified automatically by the GC thread.
  4245    *  After GC, the object address in FSR is changed to a new place.
  4246    */
  4247   __ push(atos);
  4248   const Register thread = TREG;
  4249 #ifndef OPT_THREAD
  4250   __ get_thread(thread);
  4251 #endif
  4252   call_VM(NOREG, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
  4253   __ get_vm_result_2(T3, thread);
  4254   __ pop_ptr(FSR);
  4255   __ b(resolved);
  4256   __ delayed()->nop();
  4258   // klass already in cp, get superklass in T3
  4259   __ bind(quicked);
  4260   __ dsll(AT, T2, Address::times_8);
  4261   __ dadd(AT, T3, AT);
  4262   __ ld(T3, AT, sizeof(ConstantPool));
  4264   __ bind(resolved);
  4266   // get subklass in T2
  4267   //__ ld(T2, FSR, oopDesc::klass_offset_in_bytes());
  4268   //add for compressedoops
  4269   __ load_klass(T2, FSR);
  4270   // Superklass in T3.  Subklass in T2.
  4271   __ gen_subtype_check(T3, T2, ok_is_subtype);
  4273   // Come here on failure
  4274   // object is at FSR
  4275   __ jmp(Interpreter::_throw_ClassCastException_entry);
  4276   __ delayed()->nop();
  4278   // Come here on success
  4279   __ bind(ok_is_subtype);
  4281   // Collect counts on whether this check-cast sees NULLs a lot or not.
  4282   if (ProfileInterpreter) {
  4283 	__ b(done);
  4284 	__ delayed()->nop();
  4285 	__ bind(is_null);
  4286 	__ profile_null_seen(T3);
  4287   } else {
  4288 	__ bind(is_null);
  4290   __ bind(done);
  4293 // i use T3 as cpool, T1 as tags, T2 as index
  4294 // object always in FSR, superklass in T3, subklass in T2
  4295 void TemplateTable::instanceof() {
  4296   transition(atos, itos);
  4297   Label done, is_null, ok_is_subtype, quicked, resolved;
  4299   __ beq(FSR, R0, is_null);
  4300   __ delayed()->nop();
  4302   // Get cpool & tags index
  4303   __ get_cpool_and_tags(T3, T1);
  4304   // get index
  4305   __ load_two_bytes_from_at_bcp(T2, AT, 1);
  4306   __ hswap(T2);
  4308   // See if bytecode has already been quicked
  4309   // quicked
  4310   __ daddu(AT, T1, T2);
  4311   __ lb(AT, AT, Array<u1>::base_offset_in_bytes());
  4312   __ daddiu(AT, AT, - (int)JVM_CONSTANT_Class);
  4313   __ beq(AT, R0, quicked);
  4314   __ delayed()->nop();
  4316   // get superklass in T3
  4317   //__ move(TSR, FSR);
  4318   // sometimes S2 may be changed during the call, 
  4319   // be careful if u use TSR as a saving place
  4320   //__ push(FSR);
  4321   __ push(atos);
  4322   const Register thread = TREG;
  4323 #ifndef OPT_THREAD
  4324   __ get_thread(thread);
  4325 #endif
  4326   call_VM(NOREG, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
  4327   __ get_vm_result_2(T3, thread);
  4328   //__ lw(FSR, SP, 0);
  4329   __ pop_ptr(FSR);	
  4330   __ b(resolved);
  4331   __ delayed()->nop();
  4332   //__ move(FSR, TSR);
  4334   // get superklass in T3, subklass in T2
  4335   __ bind(quicked);
  4336   __ dsll(AT, T2, Address::times_8);
  4337   __ daddu(AT, T3, AT);
  4338   __ ld(T3, AT, sizeof(ConstantPool)); 
  4340   __ bind(resolved);
  4341   // get subklass in T2
  4342   //__ ld(T2, FSR, oopDesc::klass_offset_in_bytes());
  4343   //add for compressedoops
  4344   __ load_klass(T2, FSR);
  4346   // Superklass in T3.  Subklass in T2.
  4347   __ gen_subtype_check(T3, T2, ok_is_subtype);
  4348   // Come here on failure
  4349   __ b(done);
  4350   __ delayed(); __ move(FSR, R0);
  4352   // Come here on success
  4353   __ bind(ok_is_subtype);
  4354   __ move(FSR, 1);
  4356   // Collect counts on whether this test sees NULLs a lot or not.
  4357   if (ProfileInterpreter) {
  4358      __ beq(R0, R0, done);
  4359      __ nop();
  4360      __ bind(is_null);
  4361      __ profile_null_seen(T3);
  4362   } else {
  4363      __ bind(is_null);   // same as 'done'
  4365   __ bind(done);
  4366   // FSR = 0: obj == NULL or  obj is not an instanceof the specified klass
  4367   // FSR = 1: obj != NULL and obj is     an instanceof the specified klass
  4370 //--------------------------------------------------------
  4371 //--------------------------------------------
  4372 // Breakpoints
  4373 void TemplateTable::_breakpoint() {
  4375 	// Note: We get here even if we are single stepping..
  4376 	// jbug inists on setting breakpoints at every bytecode 
  4377 	// even if we are in single step mode.  
  4379 	transition(vtos, vtos);
  4381 	// get the unpatched byte code
  4382 	///__ get_method(ecx);
  4383 	///__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at)
  4384 	//, ecx, esi);
  4385 	///__ movl(ebx, eax);
  4386 	__ get_method(A1);
  4387 	__ call_VM(NOREG, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), 
  4388 			A1, BCP);
  4389 	__ move(Rnext, V0); // Jin: Rnext will be used in dispatch_only_normal
  4391 	// post the breakpoint event
  4392 	///__ get_method(ecx);
  4393 	///__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), ecx, esi);
  4394 	__ get_method(A1);
  4395 	__ call_VM(NOREG, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), A1, BCP);
  4397 	// complete the execution of original bytecode
  4398 	__ dispatch_only_normal(vtos);
  4401 //----------------------------------------------------------------------------------------------------
  4402 // Exceptions
  4404 void TemplateTable::athrow() {
  4405 	transition(atos, vtos);
  4406 	__ null_check(FSR);
  4407 	__ jmp(Interpreter::throw_exception_entry());
  4408 	__ delayed()->nop();
  4411 //----------------------------------------------------------------------------------------------------
  4412 // Synchronization
  4413 //
  4414 // Note: monitorenter & exit are symmetric routines; which is reflected
  4415 //       in the assembly code structure as well
  4416 //
  4417 // Stack layout:
  4418 //
  4419 // [expressions  ] <--- SP               = expression stack top
  4420 // ..
  4421 // [expressions  ]
  4422 // [monitor entry] <--- monitor block top = expression stack bot
  4423 // ..
  4424 // [monitor entry]
  4425 // [frame data   ] <--- monitor block bot
  4426 // ...
  4427 // [return addr  ] <--- FP
  4429 // we use T2 as monitor entry pointer, T3 as monitor top pointer, c_rarg0 as free slot pointer
  4430 // object always in FSR
  4431 void TemplateTable::monitorenter() {
  4432   transition(atos, vtos);
  4433   // check for NULL object
  4434   __ null_check(FSR);
  4436   const Address monitor_block_top(FP, frame::interpreter_frame_monitor_block_top_offset 
  4437       * wordSize);
  4438   const int entry_size = (frame::interpreter_frame_monitor_size()* wordSize);
  4439   Label allocated;
  4441   // initialize entry pointer
  4442   __ move(c_rarg0, R0);
  4444   // find a free slot in the monitor block (result in edx)
  4446     Label entry, loop, exit, next;
  4447     __ ld(T2, monitor_block_top);
  4448     __ b(entry);
  4449     __ delayed()->daddi(T3, FP, frame::interpreter_frame_initial_sp_offset * wordSize);
  4451     // free slot?
  4452     __ bind(loop);
  4453     __ ld(AT, T2, BasicObjectLock::obj_offset_in_bytes());
  4454     __ bne(AT, R0, next);
  4455     __ delayed()->nop();
  4456     __ move(c_rarg0, T2);
  4458     __ bind(next);
  4459     __ beq(FSR, AT, exit);
  4460     __ delayed()->nop();
  4461     __ daddi(T2, T2, entry_size);
  4463     __ bind(entry);
  4464     __ bne(T3, T2, loop);
  4465     __ delayed()->nop();
  4466     __ bind(exit);
  4469   __ bne(c_rarg0, R0, allocated);
  4470   __ delayed()->nop();
  4472   // allocate one if there's no free slot
  4474     Label entry, loop;
  4475     // 1. compute new pointers                   // SP: old expression stack top
  4476     __ ld(c_rarg0, monitor_block_top);
  4477     __ daddi(SP, SP, - entry_size);
  4478     __ daddi(c_rarg0, c_rarg0, - entry_size);
  4479     __ sd(c_rarg0, monitor_block_top);
  4480     __ b(entry);
  4481     __ delayed(); __ move(T3, SP);
  4483     // 2. move expression stack contents
  4484     __ bind(loop);
  4485     __ ld(AT, T3, entry_size);
  4486     __ sd(AT, T3, 0);
  4487     __ daddi(T3, T3, wordSize); 
  4488     __ bind(entry);
  4489     __ bne(T3, c_rarg0, loop);
  4490     __ delayed()->nop();
  4493   __ bind(allocated);
  4494   // Increment bcp to point to the next bytecode, 
  4495   // so exception handling for async. exceptions work correctly. 
  4496   // The object has already been poped from the stack, so the 
  4497   // expression stack looks correct.
  4498   __ daddi(BCP, BCP, 1); 
  4499   __ sd(FSR, c_rarg0, BasicObjectLock::obj_offset_in_bytes());
  4500   __ lock_object(c_rarg0);
  4501   // check to make sure this monitor doesn't cause stack overflow after locking
  4502   __ save_bcp();  // in case of exception
  4503   __ generate_stack_overflow_check(0);
  4504   // The bcp has already been incremented. Just need to dispatch to next instruction.
  4506   __ dispatch_next(vtos);
  4509 // T2 : top
  4510 // c_rarg0 : entry
  4511 void TemplateTable::monitorexit() {
  4512   transition(atos, vtos);
  4514   __ null_check(FSR);
  4516   const int entry_size =(frame::interpreter_frame_monitor_size()* wordSize);
  4517   Label found;
  4519   // find matching slot
  4521     Label entry, loop;
  4522     __ ld(c_rarg0, FP, frame::interpreter_frame_monitor_block_top_offset * wordSize);
  4523     __ b(entry);
  4524     __ delayed()->daddiu(T2, FP, frame::interpreter_frame_initial_sp_offset * wordSize);
  4526     __ bind(loop);
  4527     __ ld(AT, c_rarg0, BasicObjectLock::obj_offset_in_bytes());
  4528     __ beq(FSR, AT, found);
  4529     __ delayed()->nop();
  4530     __ daddiu(c_rarg0, c_rarg0, entry_size);
  4531     __ bind(entry);
  4532     __ bne(T2, c_rarg0, loop);
  4533     __ delayed()->nop();
  4536   // error handling. Unlocking was not block-structured
  4537   Label end;
  4538   __ call_VM(NOREG, CAST_FROM_FN_PTR(address, 
  4539 	InterpreterRuntime::throw_illegal_monitor_state_exception));
  4540   __ should_not_reach_here();
  4542   // call run-time routine
  4543   // c_rarg0: points to monitor entry
  4544   __ bind(found);
  4545   __ move(TSR, FSR);
  4546   __ unlock_object(c_rarg0);
  4547   __ move(FSR, TSR);
  4548   __ bind(end);
  4551 //--------------------------------------------------------------------------------------------------// Wide instructions
  4553 void TemplateTable::wide() {
  4554   transition(vtos, vtos);
  4555   // Note: the esi increment step is part of the individual wide bytecode implementations
  4556   __ lbu(Rnext, at_bcp(1));
  4557   __ dsll(T9, Rnext, Address::times_8);
  4558   __ li(AT, (long)Interpreter::_wentry_point);
  4559   __ dadd(AT, T9, AT);
  4560   __ ld(T9, AT, 0);
  4561   __ jr(T9);
  4562   __ delayed()->nop();
  4565 //--------------------------------------------------------------------------------------------------// Multi arrays
  4567 void TemplateTable::multianewarray() {
  4568   transition(vtos, atos);
  4569   // last dim is on top of stack; we want address of first one:
  4570   // first_addr = last_addr + (ndims - 1) * wordSize
  4571   __ lbu(A1, at_bcp(3));	// dimension
  4572   __ daddi(A1, A1, -1);	
  4573   __ dsll(A1, A1, Address::times_8);
  4574   __ dadd(A1, SP, A1);		// now A1 pointer to the count array on the stack
  4575   call_VM(FSR, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), A1);
  4576   __ lbu(AT, at_bcp(3));
  4577   __ dsll(AT, AT, Address::times_8);
  4578   __ dadd(SP, SP, AT);
  4581 #endif // !CC_INTERP

mercurial