src/cpu/mips/vm/macroAssembler_mips.hpp

Fri, 27 Sep 2019 11:31:13 +0800

author
huangjia
date
Fri, 27 Sep 2019 11:31:13 +0800
changeset 9705
0b27fc8adf1b
parent 9576
1cee9b02d46f
child 9932
86ea9a02a717
permissions
-rw-r--r--

#10071 MIPS Port of 8176100: [REDO][REDO] G1 Needs pre barrier on dereference of weak JNI handles
Summary: runtime/jni/CallWithJNIWeak/test.sh runtime/jni/ReturnJNIWeak/test.sh crash
Reviewed-by: aoqi

     1 /*
     2  * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
     3  * Copyright (c) 2015, 2019, Loongson Technology. All rights reserved.
     4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     5  *
     6  * This code is free software; you can redistribute it and/or modify it
     7  * under the terms of the GNU General Public License version 2 only, as
     8  * published by the Free Software Foundation.
     9  *
    10  * This code is distributed in the hope that it will be useful, but WITHOUT
    11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    13  * version 2 for more details (a copy is included in the LICENSE file that
    14  * accompanied this code).
    15  *
    16  * You should have received a copy of the GNU General Public License version
    17  * 2 along with this work; if not, write to the Free Software Foundation,
    18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    19  *
    20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    21  * or visit www.oracle.com if you need additional information or have any
    22  * questions.
    23  *
    24  */
    26 #ifndef CPU_MIPS_VM_MACROASSEMBLER_MIPS_HPP
    27 #define CPU_MIPS_VM_MACROASSEMBLER_MIPS_HPP
    29 #include "asm/assembler.hpp"
    30 #include "utilities/macros.hpp"
    31 #include "runtime/rtmLocking.hpp"
    34 // MacroAssembler extends Assembler by frequently used macros.
    35 //
    36 // Instructions for which a 'better' code sequence exists depending
    37 // on arguments should also go in here.
    39 class MacroAssembler: public Assembler {
    40   friend class LIR_Assembler;
    41   friend class Runtime1;      // as_Address()
    43  protected:
    45   Address as_Address(AddressLiteral adr);
    46   Address as_Address(ArrayAddress adr);
    48   // Support for VM calls
    49   //
    50   // This is the base routine called by the different versions of call_VM_leaf. The interpreter
    51   // may customize this version by overriding it for its purposes (e.g., to save/restore
    52   // additional registers when doing a VM call).
    53 #ifdef CC_INTERP
    54   // c++ interpreter never wants to use interp_masm version of call_VM
    55   #define VIRTUAL
    56 #else
    57   #define VIRTUAL virtual
    58 #endif
    60   VIRTUAL void call_VM_leaf_base(
    61     address entry_point,               // the entry point
    62     int     number_of_arguments        // the number of arguments to pop after the call
    63   );
    65   // This is the base routine called by the different versions of call_VM. The interpreter
    66   // may customize this version by overriding it for its purposes (e.g., to save/restore
    67   // additional registers when doing a VM call).
    68   //
    69   // If no java_thread register is specified (noreg) than TREG will be used instead. call_VM_base
    70   // returns the register which contains the thread upon return. If a thread register has been
    71   // specified, the return value will correspond to that register. If no last_java_sp is specified
    72   // (noreg) than sp will be used instead.
    73   VIRTUAL void call_VM_base(           // returns the register containing the thread upon return
    74     Register oop_result,               // where an oop-result ends up if any; use noreg otherwise
    75     Register java_thread,              // the thread if computed before     ; use noreg otherwise
    76     Register last_java_sp,             // to set up last_Java_frame in stubs; use noreg otherwise
    77     address  entry_point,              // the entry point
    78     int      number_of_arguments,      // the number of arguments (w/o thread) to pop after the call
    79     bool     check_exceptions          // whether to check for pending exceptions after return
    80   );
    82   // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
    83   // The implementation is only non-empty for the InterpreterMacroAssembler,
    84   // as only the interpreter handles PopFrame and ForceEarlyReturn requests.
    85   virtual void check_and_handle_popframe(Register java_thread);
    86   virtual void check_and_handle_earlyret(Register java_thread);
    88   void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true);
    90   // helpers for FPU flag access
    91   // tmp is a temporary register, if none is available use noreg
    93  public:
    94   static intptr_t  i[32];
    95   static float  f[32];
    96   static void print(outputStream *s);
    98   static int i_offset(unsigned int k);
    99   static int f_offset(unsigned int k);
   101   static void save_registers(MacroAssembler *masm);
   102   static void restore_registers(MacroAssembler *masm);
   104   MacroAssembler(CodeBuffer* code) : Assembler(code) {}
   106   // Support for NULL-checks
   107   //
   108   // Generates code that causes a NULL OS exception if the content of reg is NULL.
   109   // If the accessed location is M[reg + offset] and the offset is known, provide the
   110   // offset. No explicit code generation is needed if the offset is within a certain
   111   // range (0 <= offset <= page_size).
   113   void null_check(Register reg, int offset = -1);
   114   static bool needs_explicit_null_check(intptr_t offset);
   116   // Required platform-specific helpers for Label::patch_instructions.
   117   // They _shadow_ the declarations in AbstractAssembler, which are undefined.
   118   void pd_patch_instruction(address branch, address target);
   120   // Support for inc/dec with optimal instruction selection depending on value
   121   void incrementl(Register reg, int value = 1);
   122   void decrementl(Register reg, int value = 1);
   125   // Alignment
   126   void align(int modulus);
   129   // Stack frame creation/removal
   130   void enter();
   131   void leave();
   133   // Support for getting the JavaThread pointer (i.e.; a reference to thread-local information)
   134   // The pointer will be loaded into the thread register.
   135   void get_thread(Register thread);
   138   // Support for VM calls
   139   //
   140   // It is imperative that all calls into the VM are handled via the call_VM macros.
   141   // They make sure that the stack linkage is setup correctly. call_VM's correspond
   142   // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points.
   145   void call_VM(Register oop_result,
   146                address entry_point,
   147                bool check_exceptions = true);
   148   void call_VM(Register oop_result,
   149                address entry_point,
   150                Register arg_1,
   151                bool check_exceptions = true);
   152   void call_VM(Register oop_result,
   153                address entry_point,
   154                Register arg_1, Register arg_2,
   155                bool check_exceptions = true);
   156   void call_VM(Register oop_result,
   157                address entry_point,
   158                Register arg_1, Register arg_2, Register arg_3,
   159                bool check_exceptions = true);
   161   // Overloadings with last_Java_sp
   162   void call_VM(Register oop_result,
   163                Register last_java_sp,
   164                address entry_point,
   165                int number_of_arguments = 0,
   166                bool check_exceptions = true);
   167   void call_VM(Register oop_result,
   168                Register last_java_sp,
   169                address entry_point,
   170                Register arg_1, bool
   171                check_exceptions = true);
   172   void call_VM(Register oop_result,
   173                Register last_java_sp,
   174                address entry_point,
   175                Register arg_1, Register arg_2,
   176                bool check_exceptions = true);
   177   void call_VM(Register oop_result,
   178                Register last_java_sp,
   179                address entry_point,
   180                Register arg_1, Register arg_2, Register arg_3,
   181                bool check_exceptions = true);
   183   void get_vm_result  (Register oop_result, Register thread);
   184   void get_vm_result_2(Register metadata_result, Register thread);
   185   void call_VM_leaf(address entry_point,
   186                     int number_of_arguments = 0);
   187   void call_VM_leaf(address entry_point,
   188                     Register arg_1);
   189   void call_VM_leaf(address entry_point,
   190                     Register arg_1, Register arg_2);
   191   void call_VM_leaf(address entry_point,
   192                     Register arg_1, Register arg_2, Register arg_3);
   194   // Super call_VM calls - correspond to MacroAssembler::call_VM(_leaf) calls
   195   void super_call_VM_leaf(address entry_point);
   196   void super_call_VM_leaf(address entry_point, Register arg_1);
   197   void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2);
   198   void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3);
   200   // last Java Frame (fills frame anchor)
   201   void set_last_Java_frame(Register thread,
   202                            Register last_java_sp,
   203                            Register last_java_fp,
   204                            address last_java_pc);
   206   // thread in the default location (S6)
   207   void set_last_Java_frame(Register last_java_sp,
   208                            Register last_java_fp,
   209                            address last_java_pc);
   211   void reset_last_Java_frame(Register thread, bool clear_fp);
   213   // thread in the default location (S6)
   214   void reset_last_Java_frame(bool clear_fp);
   216   // Stores
   217   void store_check(Register obj);                // store check for obj - register is destroyed afterwards
   218   void store_check(Register obj, Address dst);   // same as above, dst is exact store location (reg. is destroyed)
   220  void resolve_jobject(Register value, Register thread, Register tmp);
   221  void clear_jweak_tag(Register possibly_jweak);
   223 #if INCLUDE_ALL_GCS
   225   void g1_write_barrier_pre(Register obj,
   226                             Register pre_val,
   227                             Register thread,
   228                             Register tmp,
   229                             bool tosca_live,
   230                             bool expand_call);
   232   void g1_write_barrier_post(Register store_addr,
   233                              Register new_val,
   234                              Register thread,
   235                              Register tmp,
   236                              Register tmp2);
   238 #endif // INCLUDE_ALL_GCS
   240   // split store_check(Register obj) to enhance instruction interleaving
   241   void store_check_part_1(Register obj);
   242   void store_check_part_2(Register obj);
   244   // C 'boolean' to Java boolean: x == 0 ? 0 : 1
   245   void c2bool(Register x);
   246   //add for compressedoops
   247   void load_klass(Register dst, Register src);
   248   void store_klass(Register dst, Register src);
   249   void load_prototype_header(Register dst, Register src);
   251 #ifdef _LP64
   252   void store_klass_gap(Register dst, Register src);
   254   void load_heap_oop(Register dst, Address src);
   255   void store_heap_oop(Address dst, Register src);
   256   void store_heap_oop_null(Address dst);
   257   void encode_heap_oop(Register r);
   258   void encode_heap_oop(Register dst, Register src);
   259   void decode_heap_oop(Register r);
   260   void decode_heap_oop(Register dst, Register src);
   261   void encode_heap_oop_not_null(Register r);
   262   void decode_heap_oop_not_null(Register r);
   263   void encode_heap_oop_not_null(Register dst, Register src);
   264   void decode_heap_oop_not_null(Register dst, Register src);
   266   void encode_klass_not_null(Register r);
   267   void decode_klass_not_null(Register r);
   268   void encode_klass_not_null(Register dst, Register src);
   269   void decode_klass_not_null(Register dst, Register src);
   271   // Returns the byte size of the instructions generated by decode_klass_not_null()
   272   // when compressed klass pointers are being used.
   273   static int instr_size_for_decode_klass_not_null();
   275   // if heap base register is used - reinit it with the correct value
   276   void reinit_heapbase();
   278   DEBUG_ONLY(void verify_heapbase(const char* msg);)
   280   void set_narrow_klass(Register dst, Klass* k);
   281   void set_narrow_oop(Register dst, jobject obj);
   283 #endif // _LP64
   287   void int3();
   288   // Sign extension
   289 #ifdef _LP64
   290   void sign_extend_short(Register reg)   { /*dsll32(reg, reg, 16); dsra32(reg, reg, 16);*/ seh(reg, reg); }
   291   void sign_extend_byte(Register reg)  { /*dsll32(reg, reg, 24); dsra32(reg, reg, 24);*/ seb(reg, reg); }
   292 #else
   293   void sign_extend_short(Register reg)   { /*sll(reg, reg, 16); sra(reg, reg, 16);*/ seh(reg, reg); }
   294   void sign_extend_byte(Register reg)  { /*sll(reg, reg, 24); sra(reg, reg, 24);*/ seb(reg, reg);}
   295 #endif
   296   void rem_s(FloatRegister fd, FloatRegister fs, FloatRegister ft, FloatRegister tmp);
   297   void rem_d(FloatRegister fd, FloatRegister fs, FloatRegister ft, FloatRegister tmp);
   299   void trigfunc(char trig, int num_fpu_regs_in_use = 1);
   300   // allocation
   301   void eden_allocate(
   302     Register obj,                      // result: pointer to object after successful allocation
   303     Register var_size_in_bytes,        // object size in bytes if unknown at compile time; invalid otherwise
   304     int      con_size_in_bytes,        // object size in bytes if   known at compile time
   305     Register t1,                       // temp register
   306     Register t2,
   307     Label&   slow_case                 // continuation point if fast allocation fails
   308   );
   309   void tlab_allocate(
   310     Register obj,                      // result: pointer to object after successful allocation
   311     Register var_size_in_bytes,        // object size in bytes if unknown at compile time; invalid otherwise
   312     int      con_size_in_bytes,        // object size in bytes if   known at compile time
   313     Register t1,                       // temp register
   314     Register t2,                       // temp register
   315     Label&   slow_case                 // continuation point if fast allocation fails
   316   );
   317   void tlab_refill(Label& retry_tlab, Label& try_eden, Label& slow_case);
   318   void incr_allocated_bytes(Register thread,
   319                             Register var_size_in_bytes, int con_size_in_bytes,
   320                             Register t1 = noreg);
   321   // interface method calling
   322   void lookup_interface_method(Register recv_klass,
   323                                Register intf_klass,
   324                                RegisterOrConstant itable_index,
   325                                Register method_result,
   326                                Register scan_temp,
   327                                Label& no_such_interface,
   328                                bool return_method = true);
   330   // virtual method calling
   331   void lookup_virtual_method(Register recv_klass,
   332                              RegisterOrConstant vtable_index,
   333                              Register method_result);
   335   // Test sub_klass against super_klass, with fast and slow paths.
   337   // The fast path produces a tri-state answer: yes / no / maybe-slow.
   338   // One of the three labels can be NULL, meaning take the fall-through.
   339   // If super_check_offset is -1, the value is loaded up from super_klass.
   340   // No registers are killed, except temp_reg.
   341   void check_klass_subtype_fast_path(Register sub_klass,
   342                                      Register super_klass,
   343                                      Register temp_reg,
   344                                      Label* L_success,
   345                                      Label* L_failure,
   346                                      Label* L_slow_path,
   347                 RegisterOrConstant super_check_offset = RegisterOrConstant(-1));
   349   // The rest of the type check; must be wired to a corresponding fast path.
   350   // It does not repeat the fast path logic, so don't use it standalone.
   351   // The temp_reg and temp2_reg can be noreg, if no temps are available.
   352   // Updates the sub's secondary super cache as necessary.
   353   // If set_cond_codes, condition codes will be Z on success, NZ on failure.
   354   void check_klass_subtype_slow_path(Register sub_klass,
   355                                      Register super_klass,
   356                                      Register temp_reg,
   357                                      Register temp2_reg,
   358                                      Label* L_success,
   359                                      Label* L_failure,
   360                                      bool set_cond_codes = false);
   362   // Simplified, combined version, good for typical uses.
   363   // Falls through on failure.
   364   void check_klass_subtype(Register sub_klass,
   365                            Register super_klass,
   366                            Register temp_reg,
   367                            Label& L_success);
   370   // Debugging
   372   // only if +VerifyOops
   373   void verify_oop(Register reg, const char* s = "broken oop");
   374   void verify_oop_addr(Address addr, const char * s = "broken oop addr");
   375   void verify_oop_subroutine();
   376   // TODO: verify method and klass metadata (compare against vptr?)
   377   void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {}
   378   void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line){}
   380   #define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__)
   381   #define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__)
   383   // only if +VerifyFPU
   384   void verify_FPU(int stack_depth, const char* s = "illegal FPU state");
   386   // prints msg, dumps registers and stops execution
   387   void stop(const char* msg);
   389   // prints msg and continues
   390   void warn(const char* msg);
   392   static void debug(char* msg/*, RegistersForDebugging* regs*/);
   393   static void debug64(char* msg, int64_t pc, int64_t regs[]);
   395   void print_reg(Register reg);
   396   void print_reg(FloatRegister reg);
   397   //void os_breakpoint();
   399   void untested()                                { stop("untested"); }
   401   void unimplemented(const char* what = "")      { char* b = new char[1024];  jio_snprintf(b, sizeof(b), "unimplemented: %s", what);  stop(b); }
   403   void should_not_reach_here()                   { stop("should not reach here"); }
   405   void print_CPU_state();
   407   // Stack overflow checking
   408   void bang_stack_with_offset(int offset) {
   409     // stack grows down, caller passes positive offset
   410     assert(offset > 0, "must bang with negative offset");
   411     if (offset <= 32768) {
   412       sw(A0, SP, -offset);
   413     } else {
   414 #ifdef _LP64
   415       li(AT, offset);
   416       dsub(AT, SP, AT);
   417 #else
   418       move(AT, offset);
   419       sub(AT, SP, AT);
   420 #endif
   421       sw(A0, AT, 0);
   422     }
   423   }
   425   // Writes to stack successive pages until offset reached to check for
   426   // stack overflow + shadow pages.  Also, clobbers tmp
   427   void bang_stack_size(Register size, Register tmp);
   429   virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr,
   430                                                 Register tmp,
   431                                                 int offset);
   433   // Support for serializing memory accesses between threads
   434   void serialize_memory(Register thread, Register tmp);
   436   //void verify_tlab();
   437   void verify_tlab(Register t1, Register t2);
   439   // Biased locking support
   440   // lock_reg and obj_reg must be loaded up with the appropriate values.
   441   // tmp_reg is optional. If it is supplied (i.e., != noreg) it will
   442   // be killed; if not supplied, push/pop will be used internally to
   443   // allocate a temporary (inefficient, avoid if possible).
   444   // Optional slow case is for implementations (interpreter and C1) which branch to
   445   // slow case directly. Leaves condition codes set for C2's Fast_Lock node.
   446   // Returns offset of first potentially-faulting instruction for null
   447   // check info (currently consumed only by C1). If
   448   // swap_reg_contains_mark is true then returns -1 as it is assumed
   449   // the calling code has already passed any potential faults.
   450   int biased_locking_enter(Register lock_reg, Register obj_reg,
   451                            Register swap_reg, Register tmp_reg,
   452                            bool swap_reg_contains_mark,
   453                            Label& done, Label* slow_case = NULL,
   454                            BiasedLockingCounters* counters = NULL);
   455   void biased_locking_exit (Register obj_reg, Register temp_reg, Label& done);
   456 #ifdef COMPILER2
   457   void fast_lock(Register obj, Register box, Register tmp, Register scr);
   458   void fast_unlock(Register obj, Register box, Register tmp);
   459 #endif
   462   // Arithmetics
   463   // Regular vs. d* versions
   464   inline void addu_long(Register rd, Register rs, Register rt) {
   465 #ifdef _LP64
   466     daddu(rd, rs, rt);
   467 #else
   468     addu(rd, rs, rt);
   469 #endif
   470   }
   471   inline void addu_long(Register rd, Register rs, long imm32_64) {
   472 #ifdef _LP64
   473     daddiu(rd, rs, imm32_64);
   474 #else
   475     addiu(rd, rs, imm32_64);
   476 #endif
   478   }
   480   void round_to(Register reg, int modulus) {
   481     assert_different_registers(reg, AT);
   482     increment(reg, modulus - 1);
   483     move(AT, - modulus);
   484     andr(reg, reg, AT);
   485   }
   487   // the follow two might use AT register, be sure you have no meanful data in AT before you call them
   488   void increment(Register reg, int imm);
   489   void decrement(Register reg, int imm);
   491 #ifdef _LP64
   492   void shl(Register reg, int sa)        { dsll(reg, reg, sa); }
   493   void shr(Register reg, int sa)        { dsrl(reg, reg, sa); }
   494   void sar(Register reg, int sa)        { dsra(reg, reg, sa); }
   495 #else
   496   void shl(Register reg, int sa)        { sll(reg, reg, sa); }
   497   void shr(Register reg, int sa)        { srl(reg, reg, sa); }
   498   void sar(Register reg, int sa)        { sra(reg, reg, sa); }
   499 #endif
   500   // Helper functions for statistics gathering.
   501   void atomic_inc32(address counter_addr, int inc, Register tmp_reg1, Register tmp_reg2);
   503   // Calls
   504   void call(address entry);
   505   void call(address entry, relocInfo::relocType rtype);
   506   void call(address entry, RelocationHolder& rh);
   507   // Emit the CompiledIC call idiom
   508   void ic_call(address entry);
   510   // Jumps
   511   void jmp(address entry);
   512   void jmp(address entry, relocInfo::relocType rtype);
   513   void jmp_far(Label& L); // always long jumps
   515   /* branches may exceed 16-bit offset */
   516   void b_far(address entry);
   517   void b_far(Label& L);
   519   void bne_far    (Register rs, Register rt, address entry);
   520   void bne_far    (Register rs, Register rt, Label& L);
   522   void beq_far    (Register rs, Register rt, address entry);
   523   void beq_far    (Register rs, Register rt, Label& L);
   525   // For C2 to support long branches
   526   void beq_long   (Register rs, Register rt, Label& L);
   527   void bne_long   (Register rs, Register rt, Label& L);
   528   void bc1t_long  (Label& L);
   529   void bc1f_long  (Label& L);
   531   void patchable_call(address target);
   532   void general_call(address target);
   534   void patchable_jump(address target);
   535   void general_jump(address target);
   537   static int insts_for_patchable_call(address target);
   538   static int insts_for_general_call(address target);
   540   static int insts_for_patchable_jump(address target);
   541   static int insts_for_general_jump(address target);
   543   // Floating
   544   // Data
   546   // Argument ops
   547   inline void store_int_argument(Register s, Argument &a) {
   548     if(a.is_Register()) {
   549       move(a.as_Register(), s);
   550     } else {
   551       sw(s, a.as_caller_address());
   552     }
   553   }
   555   inline void store_long_argument(Register s, Argument &a) {
   556     Argument a1 = a.successor();
   557     if(a.is_Register() && a1.is_Register()) {
   558       move(a.as_Register(), s);
   559       move(a.as_Register(), s);
   560     } else {
   561       sd(s, a.as_caller_address());
   562     }
   563   }
   565   inline void store_float_argument(FloatRegister s, Argument &a) {
   566     if(a.is_Register()) {
   567       mov_s(a.as_FloatRegister(), s);
   568     } else {
   569       swc1(s, a.as_caller_address());
   570     }
   571   }
   572   inline void store_double_argument(FloatRegister s, Argument &a) {
   573     if(a.is_Register()) {
   574       mov_d(a.as_FloatRegister(), s);
   575     } else {
   576       sdc1(s, a.as_caller_address());
   577     }
   578   }
   580   inline void store_ptr_argument(Register s, Argument &a) {
   581     if(a.is_Register()) {
   582       move(a.as_Register(), s);
   583     } else {
   584       st_ptr(s, a.as_caller_address());
   585     }
   586   }
   588   // Load and store values by size and signed-ness
   589   void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2 = noreg);
   590   void store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2 = noreg);
   592   // ld_ptr will perform lw for 32 bit VMs and ld for 64 bit VMs
   593   inline void ld_ptr(Register rt, Address a) {
   594 #ifdef _LP64
   595     ld(rt, a);
   596 #else
   597     lw(rt, a);
   598 #endif
   599   }
   601   inline void ld_ptr(Register rt, Register base, int offset16) {
   602 #ifdef _LP64
   603     ld(rt, base, offset16);
   604 #else
   605     lw(rt, base, offset16);
   606 #endif
   608   }
   610   // st_ptr will perform sw for 32 bit VMs and sd for 64 bit VMs
   611   inline void st_ptr(Register rt, Address a) {
   612 #ifdef _LP64
   613     sd(rt, a);
   614 #else
   615     sw(rt, a);
   616 #endif
   617   }
   619   inline void st_ptr(Register rt, Register base, int offset16) {
   620 #ifdef _LP64
   621     sd(rt, base, offset16);
   622 #else
   623     sw(rt, base, offset16);
   624 #endif
   625   }
   627   void ld_ptr(Register rt, Register offset, Register base);
   628   void st_ptr(Register rt, Register offset, Register base);
   630   // ld_long will perform lw for 32 bit VMs and ld for 64 bit VMs
   631   // st_long will perform sw for 32 bit VMs and sd for 64 bit VMs
   632   inline void ld_long(Register rt, Register base, int offset16);
   633   inline void st_long(Register rt, Register base, int offset16);
   634   inline void ld_long(Register rt, Address a);
   635   inline void st_long(Register rt, Address a);
   636   void ld_long(Register rt, Register offset, Register base);
   637   void st_long(Register rt, Register offset, Register base);
   639   // swap the two byte of the low 16-bit halfword
   640   // this directive will use AT, be sure the high 16-bit of reg is zero
   641   void hswap(Register reg);
   642   void huswap(Register reg);
   644   // convert big endian integer to little endian integer
   645   void swap(Register reg);
   647   // implement the x86 instruction semantic
   648   // if c_reg == *dest then *dest <= x_reg
   649   // else c_reg <= *dest
   650   // the AT indicate if xchg occurred, 1 for xchged, else  0
   651   void cmpxchg(Register x_reg, Address dest, Register c_reg);
   652 #ifdef _LP64
   653   void cmpxchg32(Register x_reg, Address dest, Register c_reg);
   654 #endif
   655   void cmpxchg8(Register x_regLo, Register x_regHi, Address dest, Register c_regLo, Register c_regHi);
   657   //pop & push, added by aoqi
   658 #ifdef _LP64
   659   void extend_sign(Register rh, Register rl) { stop("extend_sign"); }
   660   void neg(Register reg) { dsubu(reg, R0, reg); }
   661   void push (Register reg)      { sd  (reg, SP, -8); daddi(SP, SP, -8); }
   662   void push (FloatRegister reg) { sdc1(reg, SP, -8); daddi(SP, SP, -8); }
   663   void pop  (Register reg)      { ld  (reg, SP, 0);  daddi(SP, SP, 8); }
   664   void pop  (FloatRegister reg) { ldc1(reg, SP, 0);  daddi(SP, SP, 8); }
   665   void pop  ()                  { daddi(SP, SP, 8); }
   666   void pop2 ()                  { daddi(SP, SP, 16); }
   667 #else
   668   void extend_sign(Register rh, Register rl) { sra(rh, rl, 31); }
   669   void neg(Register reg) { subu(reg, R0, reg); }
   670   void push (Register reg)      { sw  (reg, SP, -4); addi(SP, SP, -4); }
   671   void push (FloatRegister reg) { swc1(reg, SP, -4); addi(SP, SP, -4); }
   672   void pop  (Register reg)      { lw  (reg, SP, 0);  addi(SP, SP, 4); }
   673   void pop  (FloatRegister reg) { lwc1(reg, SP, 0);  addi(SP, SP, 4); }
   674   void pop  ()                  { addi(SP, SP, 4); }
   675   void pop2 ()                  { addi(SP, SP, 8); }
   676 #endif
   677   void push2(Register reg1, Register reg2);
   678   void pop2 (Register reg1, Register reg2);
   679   void dpush (Register reg)     { sd  (reg, SP, -8); daddi(SP, SP, -8); }
   680   void dpop  (Register reg)     { ld  (reg, SP, 0);  daddi(SP, SP, 8); }
   681   //we need 2 fun to save and resotre general register
   682   void pushad();
   683   void popad();
   684   void pushad_except_v0();
   685   void popad_except_v0();
   687   //move an 32-bit immediate to Register
   688   void move(Register reg, int imm32)  { li32(reg, imm32); }
   689   void li  (Register rd, long imm);
   690   void li  (Register rd, address addr) { li(rd, (long)addr); }
   691   //replace move(Register reg, int imm)
   692   void li32(Register rd, int imm32); // sign-extends to 64 bits on mips64
   693 #ifdef _LP64
   694   void set64(Register d, jlong value);
   695   static int  insts_for_set64(jlong value);
   697   void patchable_set48(Register d, jlong value);
   698   void patchable_set32(Register d, jlong value);
   700   void patchable_call32(Register d, jlong value);
   702   static int call_size(address target, bool far, bool patchable);
   704   static bool reachable_from_cache(address target);
   707   void dli(Register rd, long imm) { li(rd, imm); }
   708   void li64(Register rd, long imm);
   709   void li48(Register rd, long imm);
   710 #endif
   712 #ifdef _LP64
   713   void move(Register rd, Register rs)   { dadd(rd, rs, R0); }
   714   void move_u32(Register rd, Register rs)   { addu32(rd, rs, R0); }
   715 #else
   716   void move(Register rd, Register rs)   { add(rd, rs, R0); }
   717 #endif
   718   void dmove(Register rd, Register rs)  { dadd(rd, rs, R0); }
   719   void mov_metadata(Register dst, Metadata* obj);
   720   void mov_metadata(Address dst, Metadata* obj);
   722   void store_for_type_by_register(Register src_reg,      Register tmp_reg, int disp, BasicType type, bool wide);
   723   void store_for_type_by_register(FloatRegister src_reg, Register tmp_reg, int disp, BasicType type);
   724   void store_for_type(Register src_reg,      Address addr, BasicType type = T_INT, bool wide = false);
   725   void store_for_type(FloatRegister src_reg, Address addr, BasicType type = T_INT);
   726   void load_for_type_by_register(Register dst_reg,      Register tmp_reg, int disp, BasicType type, bool wide);
   727   void load_for_type_by_register(FloatRegister dst_reg, Register tmp_reg, int disp, BasicType type);
   728   int load_for_type(Register dst_reg,      Address addr, BasicType type = T_INT, bool wide = false);
   729   int load_for_type(FloatRegister dst_reg, Address addr, BasicType type = T_INT);
   731 #ifndef PRODUCT
   732   static void pd_print_patched_instruction(address branch) {
   733     jint stub_inst = *(jint*) branch;
   734     print_instruction(stub_inst);
   735     ::tty->print("%s", " (unresolved)");
   737   }
   738 #endif
   740   //FIXME
   741   void empty_FPU_stack(){/*need implemented*/};
   744   // method handles (JSR 292)
   745   Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0);
   747 #undef VIRTUAL
   749 };
   751 /**
   752  * class SkipIfEqual:
   753  *
   754  * Instantiating this class will result in assembly code being output that will
   755  * jump around any code emitted between the creation of the instance and it's
   756  * automatic destruction at the end of a scope block, depending on the value of
   757  * the flag passed to the constructor, which will be checked at run-time.
   758  */
   759 class SkipIfEqual {
   760  private:
   761   MacroAssembler* _masm;
   762   Label _label;
   764  public:
   765    SkipIfEqual(MacroAssembler*, const bool* flag_addr, bool value);
   766    ~SkipIfEqual();
   767 };
   769 #ifdef ASSERT
   770 inline bool AbstractAssembler::pd_check_instruction_mark() { return true; }
   771 #endif
   774 #endif // CPU_MIPS_VM_MACROASSEMBLER_MIPS_HPP

mercurial