src/cpu/ppc/vm/macroAssembler_ppc.hpp

Wed, 11 Dec 2013 00:06:11 +0100

author
goetz
date
Wed, 11 Dec 2013 00:06:11 +0100
changeset 6495
67fa91961822
parent 6477
eb178e97560c
child 6511
31e80afe3fed
permissions
-rw-r--r--

8029940: PPC64 (part 122): C2 compiler port
Reviewed-by: kvn

     1 /*
     2  * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
     3  * Copyright 2012, 2013 SAP AG. All rights reserved.
     4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     5  *
     6  * This code is free software; you can redistribute it and/or modify it
     7  * under the terms of the GNU General Public License version 2 only, as
     8  * published by the Free Software Foundation.
     9  *
    10  * This code is distributed in the hope that it will be useful, but WITHOUT
    11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    13  * version 2 for more details (a copy is included in the LICENSE file that
    14  * accompanied this code).
    15  *
    16  * You should have received a copy of the GNU General Public License version
    17  * 2 along with this work; if not, write to the Free Software Foundation,
    18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    19  *
    20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    21  * or visit www.oracle.com if you need additional information or have any
    22  * questions.
    23  *
    24  */
    26 #ifndef CPU_PPC_VM_MACROASSEMBLER_PPC_HPP
    27 #define CPU_PPC_VM_MACROASSEMBLER_PPC_HPP
    29 #include "asm/assembler.hpp"
    31 // MacroAssembler extends Assembler by a few frequently used macros.
    33 class ciTypeArray;
    35 class MacroAssembler: public Assembler {
    36  public:
    37   MacroAssembler(CodeBuffer* code) : Assembler(code) {}
    39   //
    40   // Optimized instruction emitters
    41   //
    43   inline static int largeoffset_si16_si16_hi(int si31) { return (si31 + (1<<15)) >> 16; }
    44   inline static int largeoffset_si16_si16_lo(int si31) { return si31 - (((si31 + (1<<15)) >> 16) << 16); }
    46   // load d = *[a+si31]
    47   // Emits several instructions if the offset is not encodable in one instruction.
    48   void ld_largeoffset_unchecked(Register d, int si31, Register a, int emit_filler_nop);
    49   void ld_largeoffset          (Register d, int si31, Register a, int emit_filler_nop);
    50   inline static bool is_ld_largeoffset(address a);
    51   inline static int get_ld_largeoffset_offset(address a);
    53   inline void round_to(Register r, int modulus);
    55   // Load/store with type given by parameter.
    56   void load_sized_value( Register dst, RegisterOrConstant offs, Register base, size_t size_in_bytes, bool is_signed);
    57   void store_sized_value(Register dst, RegisterOrConstant offs, Register base, size_t size_in_bytes);
    59   // Move register if destination register and target register are different
    60   inline void mr_if_needed(Register rd, Register rs);
    61   inline void fmr_if_needed(FloatRegister rd, FloatRegister rs);
    62   // This is dedicated for emitting scheduled mach nodes. For better
    63   // readability of the ad file I put it here.
    64   // Endgroups are not needed if
    65   //  - the scheduler is off
    66   //  - the scheduler found that there is a natural group end, in that
    67   //    case it reduced the size of the instruction used in the test
    68   //    yielding 'needed'.
    69   inline void endgroup_if_needed(bool needed);
    71   // Memory barriers.
    72   inline void membar(int bits);
    73   inline void release();
    74   inline void acquire();
    75   inline void fence();
    77   // nop padding
    78   void align(int modulus, int max = 252, int rem = 0);
    80   //
    81   // Constants, loading constants, TOC support
    82   //
    84   // Address of the global TOC.
    85   inline static address global_toc();
    86   // Offset of given address to the global TOC.
    87   inline static int offset_to_global_toc(const address addr);
    89   // Address of TOC of the current method.
    90   inline address method_toc();
    91   // Offset of given address to TOC of the current method.
    92   inline int offset_to_method_toc(const address addr);
    94   // Global TOC.
    95   void calculate_address_from_global_toc(Register dst, address addr,
    96                                          bool hi16 = true, bool lo16 = true,
    97                                          bool add_relocation = true, bool emit_dummy_addr = false);
    98   inline void calculate_address_from_global_toc_hi16only(Register dst, address addr) {
    99     calculate_address_from_global_toc(dst, addr, true, false);
   100   };
   101   inline void calculate_address_from_global_toc_lo16only(Register dst, address addr) {
   102     calculate_address_from_global_toc(dst, addr, false, true);
   103   };
   105   inline static bool is_calculate_address_from_global_toc_at(address a, address bound);
   106   static int patch_calculate_address_from_global_toc_at(address a, address addr, address bound);
   107   static address get_address_of_calculate_address_from_global_toc_at(address a, address addr);
   109 #ifdef _LP64
   110   // Patch narrow oop constant.
   111   inline static bool is_set_narrow_oop(address a, address bound);
   112   static int patch_set_narrow_oop(address a, address bound, narrowOop data);
   113   static narrowOop get_narrow_oop(address a, address bound);
   114 #endif
   116   inline static bool is_load_const_at(address a);
   118   // Emits an oop const to the constant pool, loads the constant, and
   119   // sets a relocation info with address current_pc.
   120   void load_const_from_method_toc(Register dst, AddressLiteral& a, Register toc);
   121   void load_toc_from_toc(Register dst, AddressLiteral& a, Register toc) {
   122     assert(dst == R2_TOC, "base register must be TOC");
   123     load_const_from_method_toc(dst, a, toc);
   124   }
   126   static bool is_load_const_from_method_toc_at(address a);
   127   static int get_offset_of_load_const_from_method_toc_at(address a);
   129   // Get the 64 bit constant from a `load_const' sequence.
   130   static long get_const(address load_const);
   132   // Patch the 64 bit constant of a `load_const' sequence. This is a
   133   // low level procedure. It neither flushes the instruction cache nor
   134   // is it atomic.
   135   static void patch_const(address load_const, long x);
   137   // Metadata in code that we have to keep track of.
   138   AddressLiteral allocate_metadata_address(Metadata* obj); // allocate_index
   139   AddressLiteral constant_metadata_address(Metadata* obj); // find_index
   140   // Oops used directly in compiled code are stored in the constant pool,
   141   // and loaded from there.
   142   // Allocate new entry for oop in constant pool. Generate relocation.
   143   AddressLiteral allocate_oop_address(jobject obj);
   144   // Find oop obj in constant pool. Return relocation with it's index.
   145   AddressLiteral constant_oop_address(jobject obj);
   147   // Find oop in constant pool and emit instructions to load it.
   148   // Uses constant_oop_address.
   149   inline void set_oop_constant(jobject obj, Register d);
   150   // Same as load_address.
   151   inline void set_oop         (AddressLiteral obj_addr, Register d);
   153   // Read runtime constant:  Issue load if constant not yet established,
   154   // else use real constant.
   155   virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr,
   156                                                 Register tmp,
   157                                                 int offset);
   159   //
   160   // branch, jump
   161   //
   163   inline void pd_patch_instruction(address branch, address target);
   164   NOT_PRODUCT(static void pd_print_patched_instruction(address branch);)
   166   // Conditional far branch for destinations encodable in 24+2 bits.
   167   // Same interface as bc, e.g. no inverse boint-field.
   168   enum {
   169     bc_far_optimize_not         = 0,
   170     bc_far_optimize_on_relocate = 1
   171   };
   172   // optimize: flag for telling the conditional far branch to optimize
   173   //           itself when relocated.
   174   void bc_far(int boint, int biint, Label& dest, int optimize);
   175   // Relocation of conditional far branches.
   176   static bool    is_bc_far_at(address instruction_addr);
   177   static address get_dest_of_bc_far_at(address instruction_addr);
   178   static void    set_dest_of_bc_far_at(address instruction_addr, address dest);
   179  private:
   180   static bool inline is_bc_far_variant1_at(address instruction_addr);
   181   static bool inline is_bc_far_variant2_at(address instruction_addr);
   182   static bool inline is_bc_far_variant3_at(address instruction_addr);
   183  public:
   185   // Convenience bc_far versions.
   186   inline void blt_far(ConditionRegister crx, Label& L, int optimize);
   187   inline void bgt_far(ConditionRegister crx, Label& L, int optimize);
   188   inline void beq_far(ConditionRegister crx, Label& L, int optimize);
   189   inline void bso_far(ConditionRegister crx, Label& L, int optimize);
   190   inline void bge_far(ConditionRegister crx, Label& L, int optimize);
   191   inline void ble_far(ConditionRegister crx, Label& L, int optimize);
   192   inline void bne_far(ConditionRegister crx, Label& L, int optimize);
   193   inline void bns_far(ConditionRegister crx, Label& L, int optimize);
   195   // Emit, identify and patch a NOT mt-safe patchable 64 bit absolute call/jump.
   196  private:
   197   enum {
   198     bxx64_patchable_instruction_count = (2/*load_codecache_const*/ + 3/*5load_const*/ + 1/*mtctr*/ + 1/*bctrl*/),
   199     bxx64_patchable_size              = bxx64_patchable_instruction_count * BytesPerInstWord,
   200     bxx64_patchable_ret_addr_offset   = bxx64_patchable_size
   201   };
   202   void bxx64_patchable(address target, relocInfo::relocType rt, bool link);
   203   static bool is_bxx64_patchable_at(            address instruction_addr, bool link);
   204   // Does the instruction use a pc-relative encoding of the destination?
   205   static bool is_bxx64_patchable_pcrelative_at( address instruction_addr, bool link);
   206   static bool is_bxx64_patchable_variant1_at(   address instruction_addr, bool link);
   207   // Load destination relative to global toc.
   208   static bool is_bxx64_patchable_variant1b_at(  address instruction_addr, bool link);
   209   static bool is_bxx64_patchable_variant2_at(   address instruction_addr, bool link);
   210   static void set_dest_of_bxx64_patchable_at(   address instruction_addr, address target, bool link);
   211   static address get_dest_of_bxx64_patchable_at(address instruction_addr, bool link);
   213  public:
   214   // call
   215   enum {
   216     bl64_patchable_instruction_count = bxx64_patchable_instruction_count,
   217     bl64_patchable_size              = bxx64_patchable_size,
   218     bl64_patchable_ret_addr_offset   = bxx64_patchable_ret_addr_offset
   219   };
   220   inline void bl64_patchable(address target, relocInfo::relocType rt) {
   221     bxx64_patchable(target, rt, /*link=*/true);
   222   }
   223   inline static bool is_bl64_patchable_at(address instruction_addr) {
   224     return is_bxx64_patchable_at(instruction_addr, /*link=*/true);
   225   }
   226   inline static bool is_bl64_patchable_pcrelative_at(address instruction_addr) {
   227     return is_bxx64_patchable_pcrelative_at(instruction_addr, /*link=*/true);
   228   }
   229   inline static void set_dest_of_bl64_patchable_at(address instruction_addr, address target) {
   230     set_dest_of_bxx64_patchable_at(instruction_addr, target, /*link=*/true);
   231   }
   232   inline static address get_dest_of_bl64_patchable_at(address instruction_addr) {
   233     return get_dest_of_bxx64_patchable_at(instruction_addr, /*link=*/true);
   234   }
   235   // jump
   236   enum {
   237     b64_patchable_instruction_count = bxx64_patchable_instruction_count,
   238     b64_patchable_size              = bxx64_patchable_size,
   239   };
   240   inline void b64_patchable(address target, relocInfo::relocType rt) {
   241     bxx64_patchable(target, rt, /*link=*/false);
   242   }
   243   inline static bool is_b64_patchable_at(address instruction_addr) {
   244     return is_bxx64_patchable_at(instruction_addr, /*link=*/false);
   245   }
   246   inline static bool is_b64_patchable_pcrelative_at(address instruction_addr) {
   247     return is_bxx64_patchable_pcrelative_at(instruction_addr, /*link=*/false);
   248   }
   249   inline static void set_dest_of_b64_patchable_at(address instruction_addr, address target) {
   250     set_dest_of_bxx64_patchable_at(instruction_addr, target, /*link=*/false);
   251   }
   252   inline static address get_dest_of_b64_patchable_at(address instruction_addr) {
   253     return get_dest_of_bxx64_patchable_at(instruction_addr, /*link=*/false);
   254   }
   256   //
   257   // Support for frame handling
   258   //
   260   // some ABI-related functions
   261   void save_nonvolatile_gprs(   Register dst_base, int offset);
   262   void restore_nonvolatile_gprs(Register src_base, int offset);
   263   void save_volatile_gprs(   Register dst_base, int offset);
   264   void restore_volatile_gprs(Register src_base, int offset);
   265   void save_LR_CR(   Register tmp);     // tmp contains LR on return.
   266   void restore_LR_CR(Register tmp);
   268   // Get current PC using bl-next-instruction trick.
   269   address get_PC_trash_LR(Register result);
   271   // Resize current frame either relatively wrt to current SP or absolute.
   272   void resize_frame(Register offset, Register tmp);
   273   void resize_frame(int      offset, Register tmp);
   274   void resize_frame_absolute(Register addr, Register tmp1, Register tmp2);
   276   // Push a frame of size bytes.
   277   void push_frame(Register bytes, Register tmp);
   279   // Push a frame of size `bytes'. No abi space provided.
   280   void push_frame(unsigned int bytes, Register tmp);
   282   // Push a frame of size `bytes' plus abi112 on top.
   283   void push_frame_abi112(unsigned int bytes, Register tmp);
   285   // Setup up a new C frame with a spill area for non-volatile GPRs and additional
   286   // space for local variables
   287   void push_frame_abi112_nonvolatiles(unsigned int bytes, Register tmp);
   289   // pop current C frame
   290   void pop_frame();
   292   //
   293   // Calls
   294   //
   296  private:
   297   address _last_calls_return_pc;
   299   // Generic version of a call to C function via a function descriptor
   300   // with variable support for C calling conventions (TOC, ENV, etc.).
   301   // updates and returns _last_calls_return_pc.
   302   address branch_to(Register function_descriptor, bool and_link, bool save_toc_before_call,
   303                     bool restore_toc_after_call, bool load_toc_of_callee, bool load_env_of_callee);
   305  public:
   307   // Get the pc where the last call will return to. returns _last_calls_return_pc.
   308   inline address last_calls_return_pc();
   310   // Call a C function via a function descriptor and use full C
   311   // calling conventions. Updates and returns _last_calls_return_pc.
   312   address call_c(Register function_descriptor);
   313   // For tail calls: only branch, don't link, so callee returns to caller of this function.
   314   address call_c_and_return_to_caller(Register function_descriptor);
   315   address call_c(const FunctionDescriptor* function_descriptor, relocInfo::relocType rt);
   316   address call_c_using_toc(const FunctionDescriptor* function_descriptor, relocInfo::relocType rt,
   317                            Register toc);
   319  protected:
   321   // It is imperative that all calls into the VM are handled via the
   322   // call_VM macros. They make sure that the stack linkage is setup
   323   // correctly. call_VM's correspond to ENTRY/ENTRY_X entry points
   324   // while call_VM_leaf's correspond to LEAF entry points.
   325   //
   326   // This is the base routine called by the different versions of
   327   // call_VM. The interpreter may customize this version by overriding
   328   // it for its purposes (e.g., to save/restore additional registers
   329   // when doing a VM call).
   330   //
   331   // If no last_java_sp is specified (noreg) then SP will be used instead.
   332   virtual void call_VM_base(
   333      // where an oop-result ends up if any; use noreg otherwise
   334     Register        oop_result,
   335     // to set up last_Java_frame in stubs; use noreg otherwise
   336     Register        last_java_sp,
   337     // the entry point
   338     address         entry_point,
   339     // flag which indicates if exception should be checked
   340     bool            check_exception = true
   341   );
   343   // Support for VM calls. This is the base routine called by the
   344   // different versions of call_VM_leaf. The interpreter may customize
   345   // this version by overriding it for its purposes (e.g., to
   346   // save/restore additional registers when doing a VM call).
   347   void call_VM_leaf_base(address entry_point);
   349  public:
   350   // Call into the VM.
   351   // Passes the thread pointer (in R3_ARG1) as a prepended argument.
   352   // Makes sure oop return values are visible to the GC.
   353   void call_VM(Register oop_result, address entry_point, bool check_exceptions = true);
   354   void call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions = true);
   355   void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
   356   void call_VM_leaf(address entry_point);
   357   void call_VM_leaf(address entry_point, Register arg_1);
   358   void call_VM_leaf(address entry_point, Register arg_1, Register arg_2);
   359   void call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3);
   361   // Call a stub function via a function descriptor, but don't save
   362   // TOC before call, don't setup TOC and ENV for call, and don't
   363   // restore TOC after call. Updates and returns _last_calls_return_pc.
   364   inline address call_stub(Register function_entry);
   365   inline void call_stub_and_return_to(Register function_entry, Register return_pc);
   367   //
   368   // Java utilities
   369   //
   371   // Read from the polling page, its address is already in a register.
   372   inline void load_from_polling_page(Register polling_page_address, int offset = 0);
   373   // Check whether instruction is a read access to the polling page
   374   // which was emitted by load_from_polling_page(..).
   375   static bool is_load_from_polling_page(int instruction, void* ucontext/*may be NULL*/,
   376                                         address* polling_address_ptr = NULL);
   378   // Check whether instruction is a write access to the memory
   379   // serialization page realized by one of the instructions stw, stwu,
   380   // stwx, or stwux.
   381   static bool is_memory_serialization(int instruction, JavaThread* thread, void* ucontext);
   383   // Support for NULL-checks
   384   //
   385   // Generates code that causes a NULL OS exception if the content of reg is NULL.
   386   // If the accessed location is M[reg + offset] and the offset is known, provide the
   387   // offset. No explicit code generation is needed if the offset is within a certain
   388   // range (0 <= offset <= page_size).
   390   // Stack overflow checking
   391   void bang_stack_with_offset(int offset);
   393   // If instruction is a stack bang of the form ld, stdu, or
   394   // stdux, return the banged address. Otherwise, return 0.
   395   static address get_stack_bang_address(int instruction, void* ucontext);
   397   // Atomics
   398   // CmpxchgX sets condition register to cmpX(current, compare).
   399   // (flag == ne) => (dest_current_value != compare_value), (!swapped)
   400   // (flag == eq) => (dest_current_value == compare_value), ( swapped)
   401   static inline bool cmpxchgx_hint_acquire_lock()  { return true; }
   402   // The stxcx will probably not be succeeded by a releasing store.
   403   static inline bool cmpxchgx_hint_release_lock()  { return false; }
   404   static inline bool cmpxchgx_hint_atomic_update() { return false; }
   406   // Cmpxchg semantics
   407   enum {
   408     MemBarNone = 0,
   409     MemBarRel  = 1,
   410     MemBarAcq  = 2,
   411     MemBarFenceAfter = 4 // use powers of 2
   412   };
   413   void cmpxchgw(ConditionRegister flag,
   414                 Register dest_current_value, Register compare_value, Register exchange_value, Register addr_base,
   415                 int semantics, bool cmpxchgx_hint = false,
   416                 Register int_flag_success = noreg, bool contention_hint = false);
   417   void cmpxchgd(ConditionRegister flag,
   418                 Register dest_current_value, Register compare_value, Register exchange_value, Register addr_base,
   419                 int semantics, bool cmpxchgx_hint = false,
   420                 Register int_flag_success = noreg, Label* failed = NULL, bool contention_hint = false);
   422   // interface method calling
   423   void lookup_interface_method(Register recv_klass,
   424                                Register intf_klass,
   425                                RegisterOrConstant itable_index,
   426                                Register method_result,
   427                                Register temp_reg, Register temp2_reg,
   428                                Label& no_such_interface);
   430   // virtual method calling
   431   void lookup_virtual_method(Register recv_klass,
   432                              RegisterOrConstant vtable_index,
   433                              Register method_result);
   435   // Test sub_klass against super_klass, with fast and slow paths.
   437   // The fast path produces a tri-state answer: yes / no / maybe-slow.
   438   // One of the three labels can be NULL, meaning take the fall-through.
   439   // If super_check_offset is -1, the value is loaded up from super_klass.
   440   // No registers are killed, except temp_reg and temp2_reg.
   441   // If super_check_offset is not -1, temp2_reg is not used and can be noreg.
   442   void check_klass_subtype_fast_path(Register sub_klass,
   443                                      Register super_klass,
   444                                      Register temp1_reg,
   445                                      Register temp2_reg,
   446                                      Label& L_success,
   447                                      Label& L_failure);
   449   // The rest of the type check; must be wired to a corresponding fast path.
   450   // It does not repeat the fast path logic, so don't use it standalone.
   451   // The temp_reg can be noreg, if no temps are available.
   452   // It can also be sub_klass or super_klass, meaning it's OK to kill that one.
   453   // Updates the sub's secondary super cache as necessary.
   454   void check_klass_subtype_slow_path(Register sub_klass,
   455                                      Register super_klass,
   456                                      Register temp1_reg,
   457                                      Register temp2_reg,
   458                                      Label* L_success = NULL,
   459                                      Register result_reg = noreg);
   461   // Simplified, combined version, good for typical uses.
   462   // Falls through on failure.
   463   void check_klass_subtype(Register sub_klass,
   464                            Register super_klass,
   465                            Register temp1_reg,
   466                            Register temp2_reg,
   467                            Label& L_success);
   469   // Method handle support (JSR 292).
   470   void check_method_handle_type(Register mtype_reg, Register mh_reg, Register temp_reg, Label& wrong_method_type);
   472   RegisterOrConstant argument_offset(RegisterOrConstant arg_slot, Register temp_reg, int extra_slot_offset = 0);
   474   // Biased locking support
   475   // Upon entry,obj_reg must contain the target object, and mark_reg
   476   // must contain the target object's header.
   477   // Destroys mark_reg if an attempt is made to bias an anonymously
   478   // biased lock. In this case a failure will go either to the slow
   479   // case or fall through with the notEqual condition code set with
   480   // the expectation that the slow case in the runtime will be called.
   481   // In the fall-through case where the CAS-based lock is done,
   482   // mark_reg is not destroyed.
   483   void biased_locking_enter(ConditionRegister cr_reg, Register obj_reg, Register mark_reg, Register temp_reg,
   484                             Register temp2_reg, Label& done, Label* slow_case = NULL);
   485   // Upon entry, the base register of mark_addr must contain the oop.
   486   // Destroys temp_reg.
   487   // If allow_delay_slot_filling is set to true, the next instruction
   488   // emitted after this one will go in an annulled delay slot if the
   489   // biased locking exit case failed.
   490   void biased_locking_exit(ConditionRegister cr_reg, Register mark_addr, Register temp_reg, Label& done);
   492   void compiler_fast_lock_object(  ConditionRegister flag, Register oop, Register box, Register tmp1, Register tmp2, Register tmp3);
   493   void compiler_fast_unlock_object(ConditionRegister flag, Register oop, Register box, Register tmp1, Register tmp2, Register tmp3);
   495   // Support for serializing memory accesses between threads
   496   void serialize_memory(Register thread, Register tmp1, Register tmp2);
   498   // GC barrier support.
   499   void card_write_barrier_post(Register Rstore_addr, Register Rnew_val, Register Rtmp);
   500   void card_table_write(jbyte* byte_map_base, Register Rtmp, Register Robj);
   502 #ifndef SERIALGC
   503   // General G1 pre-barrier generator.
   504   void g1_write_barrier_pre(Register Robj, RegisterOrConstant offset, Register Rpre_val,
   505                             Register Rtmp1, Register Rtmp2, bool needs_frame = false);
   506   // General G1 post-barrier generator
   507   void g1_write_barrier_post(Register Rstore_addr, Register Rnew_val, Register Rtmp1,
   508                              Register Rtmp2, Register Rtmp3, Label *filtered_ext = NULL);
   509 #endif // SERIALGC
   511   // Support for managing the JavaThread pointer (i.e.; the reference to
   512   // thread-local information).
   514   // Support for last Java frame (but use call_VM instead where possible):
   515   // access R16_thread->last_Java_sp.
   516   void set_last_Java_frame(Register last_java_sp, Register last_Java_pc);
   517   void reset_last_Java_frame(void);
   518   void set_top_ijava_frame_at_SP_as_last_Java_frame(Register sp, Register tmp1);
   520   // Read vm result from thread: oop_result = R16_thread->result;
   521   void get_vm_result  (Register oop_result);
   522   void get_vm_result_2(Register metadata_result);
   524   static bool needs_explicit_null_check(intptr_t offset);
   526   // Trap-instruction-based checks.
   527   // Range checks can be distinguished from zero checks as they check 32 bit,
   528   // zero checks all 64 bits (tw, td).
   529   inline void trap_null_check(Register a, trap_to_bits cmp = traptoEqual);
   530   static bool is_trap_null_check(int x) {
   531     return is_tdi(x, traptoEqual,               -1/*any reg*/, 0) ||
   532            is_tdi(x, traptoGreaterThanUnsigned, -1/*any reg*/, 0);
   533   }
   535   inline void trap_zombie_not_entrant();
   536   static bool is_trap_zombie_not_entrant(int x) { return is_tdi(x, traptoUnconditional, 0/*reg 0*/, 1); }
   538   inline void trap_should_not_reach_here();
   539   static bool is_trap_should_not_reach_here(int x) { return is_tdi(x, traptoUnconditional, 0/*reg 0*/, 2); }
   541   inline void trap_ic_miss_check(Register a, Register b);
   542   static bool is_trap_ic_miss_check(int x) {
   543     return is_td(x, traptoGreaterThanUnsigned | traptoLessThanUnsigned, -1/*any reg*/, -1/*any reg*/);
   544   }
   546   // Implicit or explicit null check, jumps to static address exception_entry.
   547   inline void null_check_throw(Register a, int offset, Register temp_reg, address exception_entry);
   549   // Check accessed object for null. Use SIGTRAP-based null checks on AIX.
   550   inline void load_with_trap_null_check(Register d, int si16, Register s1);
   552   // Load heap oop and decompress. Loaded oop may not be null.
   553   inline void load_heap_oop_not_null(Register d, RegisterOrConstant offs, Register s1 = noreg);
   555   // Null allowed.
   556   inline void load_heap_oop(Register d, RegisterOrConstant offs, Register s1 = noreg);
   558   // Encode/decode heap oop. Oop may not be null, else en/decoding goes wrong.
   559   inline void encode_heap_oop_not_null(Register d);
   560   inline void decode_heap_oop_not_null(Register d);
   562   // Null allowed.
   563   inline void decode_heap_oop(Register d);
   565   // Load/Store klass oop from klass field. Compress.
   566   void load_klass(Register dst, Register src);
   567   void load_klass_with_trap_null_check(Register dst, Register src);
   568   void store_klass(Register dst_oop, Register klass, Register tmp = R0);
   569   static int instr_size_for_decode_klass_not_null();
   570   void decode_klass_not_null(Register dst, Register src = noreg);
   571   void encode_klass_not_null(Register dst, Register src = noreg);
   573   // Load common heap base into register.
   574   void reinit_heapbase(Register d, Register tmp = noreg);
   576   // SIGTRAP-based range checks for arrays.
   577   inline void trap_range_check_l(Register a, Register b);
   578   inline void trap_range_check_l(Register a, int si16);
   579   static bool is_trap_range_check_l(int x) {
   580     return (is_tw (x, traptoLessThanUnsigned, -1/*any reg*/, -1/*any reg*/) ||
   581             is_twi(x, traptoLessThanUnsigned, -1/*any reg*/)                  );
   582   }
   583   inline void trap_range_check_le(Register a, int si16);
   584   static bool is_trap_range_check_le(int x) {
   585     return is_twi(x, traptoEqual | traptoLessThanUnsigned, -1/*any reg*/);
   586   }
   587   inline void trap_range_check_g(Register a, int si16);
   588   static bool is_trap_range_check_g(int x) {
   589     return is_twi(x, traptoGreaterThanUnsigned, -1/*any reg*/);
   590   }
   591   inline void trap_range_check_ge(Register a, Register b);
   592   inline void trap_range_check_ge(Register a, int si16);
   593   static bool is_trap_range_check_ge(int x) {
   594     return (is_tw (x, traptoEqual | traptoGreaterThanUnsigned, -1/*any reg*/, -1/*any reg*/) ||
   595             is_twi(x, traptoEqual | traptoGreaterThanUnsigned, -1/*any reg*/)                  );
   596   }
   597   static bool is_trap_range_check(int x) {
   598     return is_trap_range_check_l(x) || is_trap_range_check_le(x) ||
   599            is_trap_range_check_g(x) || is_trap_range_check_ge(x);
   600   }
   602   void clear_memory_doubleword(Register base_ptr, Register cnt_dwords, Register tmp = R0);
   604   // Needle of length 1.
   605   void string_indexof_1(Register result, Register haystack, Register haycnt,
   606                         Register needle, jchar needleChar,
   607                         Register tmp1, Register tmp2);
   608   // General indexof, eventually with constant needle length.
   609   void string_indexof(Register result, Register haystack, Register haycnt,
   610                       Register needle, ciTypeArray* needle_values, Register needlecnt, int needlecntval,
   611                       Register tmp1, Register tmp2, Register tmp3, Register tmp4);
   612   void string_compare(Register str1_reg, Register str2_reg, Register cnt1_reg, Register cnt2_reg,
   613                       Register result_reg, Register tmp_reg);
   614   void char_arrays_equals(Register str1_reg, Register str2_reg, Register cnt_reg, Register result_reg,
   615                           Register tmp1_reg, Register tmp2_reg, Register tmp3_reg, Register tmp4_reg,
   616                           Register tmp5_reg);
   617   void char_arrays_equalsImm(Register str1_reg, Register str2_reg, int cntval, Register result_reg,
   618                              Register tmp1_reg, Register tmp2_reg);
   620   //
   621   // Debugging
   622   //
   624   // assert on cr0
   625   void asm_assert(bool check_equal, const char* msg, int id);
   626   void asm_assert_eq(const char* msg, int id) { asm_assert(true, msg, id); }
   627   void asm_assert_ne(const char* msg, int id) { asm_assert(false, msg, id); }
   629  private:
   630   void asm_assert_mems_zero(bool check_equal, int size, int mem_offset, Register mem_base,
   631                             const char* msg, int id);
   633  public:
   635   void asm_assert_mem8_is_zero(int mem_offset, Register mem_base, const char* msg, int id) {
   636     asm_assert_mems_zero(true,  8, mem_offset, mem_base, msg, id);
   637   }
   638   void asm_assert_mem8_isnot_zero(int mem_offset, Register mem_base, const char* msg, int id) {
   639     asm_assert_mems_zero(false, 8, mem_offset, mem_base, msg, id);
   640   }
   642   // Verify R16_thread contents.
   643   void verify_thread();
   645   // Emit code to verify that reg contains a valid oop if +VerifyOops is set.
   646   void verify_oop(Register reg, const char* s = "broken oop");
   648   // TODO: verify method and klass metadata (compare against vptr?)
   649   void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {}
   650   void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line) {}
   652 #define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__)
   653 #define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__)
   655  private:
   657   enum {
   658     stop_stop                = 0,
   659     stop_untested            = 1,
   660     stop_unimplemented       = 2,
   661     stop_shouldnotreachhere  = 3,
   662     stop_end                 = 4
   663   };
   664   void stop(int type, const char* msg, int id);
   666  public:
   667   // Prints msg, dumps registers and stops execution.
   668   void stop         (const char* msg = "", int id = 0) { stop(stop_stop,               msg, id); }
   669   void untested     (const char* msg = "", int id = 0) { stop(stop_untested,           msg, id); }
   670   void unimplemented(const char* msg = "", int id = 0) { stop(stop_unimplemented,      msg, id); }
   671   void should_not_reach_here()                         { stop(stop_shouldnotreachhere,  "", -1); }
   673   void zap_from_to(Register low, int before, Register high, int after, Register val, Register addr) PRODUCT_RETURN;
   674 };
   676 #endif // CPU_PPC_VM_MACROASSEMBLER_PPC_HPP

mercurial