src/cpu/ppc/vm/macroAssembler_ppc.inline.hpp

Wed, 15 Apr 2020 11:49:55 +0800

author
aoqi
date
Wed, 15 Apr 2020 11:49:55 +0800
changeset 9852
70aa912cebe5
parent 6876
710a3c8b516e
permissions
-rw-r--r--

Merge

     1 /*
     2  * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
     3  * Copyright 2012, 2014 SAP AG. All rights reserved.
     4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     5  *
     6  * This code is free software; you can redistribute it and/or modify it
     7  * under the terms of the GNU General Public License version 2 only, as
     8  * published by the Free Software Foundation.
     9  *
    10  * This code is distributed in the hope that it will be useful, but WITHOUT
    11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    13  * version 2 for more details (a copy is included in the LICENSE file that
    14  * accompanied this code).
    15  *
    16  * You should have received a copy of the GNU General Public License version
    17  * 2 along with this work; if not, write to the Free Software Foundation,
    18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    19  *
    20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    21  * or visit www.oracle.com if you need additional information or have any
    22  * questions.
    23  *
    24  */
    26 #ifndef CPU_PPC_VM_MACROASSEMBLER_PPC_INLINE_HPP
    27 #define CPU_PPC_VM_MACROASSEMBLER_PPC_INLINE_HPP
    29 #include "asm/assembler.inline.hpp"
    30 #include "asm/macroAssembler.hpp"
    31 #include "asm/codeBuffer.hpp"
    32 #include "code/codeCache.hpp"
    34 inline bool MacroAssembler::is_ld_largeoffset(address a) {
    35   const int inst1 = *(int *)a;
    36   const int inst2 = *(int *)(a+4);
    37   return (is_ld(inst1)) ||
    38          (is_addis(inst1) && is_ld(inst2) && inv_ra_field(inst2) == inv_rt_field(inst1));
    39 }
    41 inline int MacroAssembler::get_ld_largeoffset_offset(address a) {
    42   assert(MacroAssembler::is_ld_largeoffset(a), "must be ld with large offset");
    44   const int inst1 = *(int *)a;
    45   if (is_ld(inst1)) {
    46     return inv_d1_field(inst1);
    47   } else {
    48     const int inst2 = *(int *)(a+4);
    49     return (inv_d1_field(inst1) << 16) + inv_d1_field(inst2);
    50   }
    51 }
    53 inline void MacroAssembler::round_to(Register r, int modulus) {
    54   assert(is_power_of_2_long((jlong)modulus), "must be power of 2");
    55   addi(r, r, modulus-1);
    56   clrrdi(r, r, log2_long((jlong)modulus));
    57 }
    59 // Move register if destination register and target register are different.
    60 inline void MacroAssembler::mr_if_needed(Register rd, Register rs) {
    61   if (rs != rd) mr(rd, rs);
    62 }
    63 inline void MacroAssembler::fmr_if_needed(FloatRegister rd, FloatRegister rs) {
    64   if (rs != rd) fmr(rd, rs);
    65 }
    66 inline void MacroAssembler::endgroup_if_needed(bool needed) {
    67   if (needed) {
    68     endgroup();
    69   }
    70 }
    72 inline void MacroAssembler::membar(int bits) {
    73   // TODO: use elemental_membar(bits) for Power 8 and disable optimization of acquire-release
    74   // (Matcher::post_membar_release where we use PPC64_ONLY(xop == Op_MemBarRelease ||))
    75   if (bits & StoreLoad) sync(); else lwsync();
    76 }
    77 inline void MacroAssembler::release() { membar(LoadStore | StoreStore); }
    78 inline void MacroAssembler::acquire() { membar(LoadLoad | LoadStore); }
    79 inline void MacroAssembler::fence()   { membar(LoadLoad | LoadStore | StoreLoad | StoreStore); }
    81 // Address of the global TOC.
    82 inline address MacroAssembler::global_toc() {
    83   return CodeCache::low_bound();
    84 }
    86 // Offset of given address to the global TOC.
    87 inline int MacroAssembler::offset_to_global_toc(const address addr) {
    88   intptr_t offset = (intptr_t)addr - (intptr_t)MacroAssembler::global_toc();
    89   assert(Assembler::is_simm((long)offset, 31) && offset >= 0, "must be in range");
    90   return (int)offset;
    91 }
    93 // Address of current method's TOC.
    94 inline address MacroAssembler::method_toc() {
    95   return code()->consts()->start();
    96 }
    98 // Offset of given address to current method's TOC.
    99 inline int MacroAssembler::offset_to_method_toc(address addr) {
   100   intptr_t offset = (intptr_t)addr - (intptr_t)method_toc();
   101   assert(is_simm((long)offset, 31) && offset >= 0, "must be in range");
   102   return (int)offset;
   103 }
   105 inline bool MacroAssembler::is_calculate_address_from_global_toc_at(address a, address bound) {
   106   const address inst2_addr = a;
   107   const int inst2 = *(int *) a;
   109   // The relocation points to the second instruction, the addi.
   110   if (!is_addi(inst2)) return false;
   112   // The addi reads and writes the same register dst.
   113   const int dst = inv_rt_field(inst2);
   114   if (inv_ra_field(inst2) != dst) return false;
   116   // Now, find the preceding addis which writes to dst.
   117   int inst1 = 0;
   118   address inst1_addr = inst2_addr - BytesPerInstWord;
   119   while (inst1_addr >= bound) {
   120     inst1 = *(int *) inst1_addr;
   121     if (is_addis(inst1) && inv_rt_field(inst1) == dst) {
   122       // stop, found the addis which writes dst
   123       break;
   124     }
   125     inst1_addr -= BytesPerInstWord;
   126   }
   128   if (!(inst1 == 0 || inv_ra_field(inst1) == 29 /* R29 */)) return false;
   129   return is_addis(inst1);
   130 }
   132 #ifdef _LP64
   133 // Detect narrow oop constants.
   134 inline bool MacroAssembler::is_set_narrow_oop(address a, address bound) {
   135   const address inst2_addr = a;
   136   const int inst2 = *(int *)a;
   137   // The relocation points to the second instruction, the ori.
   138   if (!is_ori(inst2)) return false;
   140   // The ori reads and writes the same register dst.
   141   const int dst = inv_rta_field(inst2);
   142   if (inv_rs_field(inst2) != dst) return false;
   144   // Now, find the preceding addis which writes to dst.
   145   int inst1 = 0;
   146   address inst1_addr = inst2_addr - BytesPerInstWord;
   147   while (inst1_addr >= bound) {
   148     inst1 = *(int *) inst1_addr;
   149     if (is_lis(inst1) && inv_rs_field(inst1) == dst) return true;
   150     inst1_addr -= BytesPerInstWord;
   151   }
   152   return false;
   153 }
   154 #endif
   157 inline bool MacroAssembler::is_load_const_at(address a) {
   158   const int* p_inst = (int *) a;
   159   bool b = is_lis(*p_inst++);
   160   if (is_ori(*p_inst)) {
   161     p_inst++;
   162     b = b && is_rldicr(*p_inst++); // TODO: could be made more precise: `sldi'!
   163     b = b && is_oris(*p_inst++);
   164     b = b && is_ori(*p_inst);
   165   } else if (is_lis(*p_inst)) {
   166     p_inst++;
   167     b = b && is_ori(*p_inst++);
   168     b = b && is_ori(*p_inst);
   169     // TODO: could enhance reliability by adding is_insrdi
   170   } else return false;
   171   return b;
   172 }
   174 inline void MacroAssembler::set_oop_constant(jobject obj, Register d) {
   175   set_oop(constant_oop_address(obj), d);
   176 }
   178 inline void MacroAssembler::set_oop(AddressLiteral obj_addr, Register d) {
   179   assert(obj_addr.rspec().type() == relocInfo::oop_type, "must be an oop reloc");
   180   load_const(d, obj_addr);
   181 }
   183 inline void MacroAssembler::pd_patch_instruction(address branch, address target) {
   184   jint& stub_inst = *(jint*) branch;
   185   stub_inst = patched_branch(target - branch, stub_inst, 0);
   186 }
   188 // Relocation of conditional far branches.
   189 inline bool MacroAssembler::is_bc_far_variant1_at(address instruction_addr) {
   190   // Variant 1, the 1st instruction contains the destination address:
   191   //
   192   //    bcxx  DEST
   193   //    endgroup
   194   //
   195   const int instruction_1 = *(int*)(instruction_addr);
   196   const int instruction_2 = *(int*)(instruction_addr + 4);
   197   return is_bcxx(instruction_1) &&
   198          (inv_bd_field(instruction_1, (intptr_t)instruction_addr) != (intptr_t)(instruction_addr + 2*4)) &&
   199          is_endgroup(instruction_2);
   200 }
   202 // Relocation of conditional far branches.
   203 inline bool MacroAssembler::is_bc_far_variant2_at(address instruction_addr) {
   204   // Variant 2, the 2nd instruction contains the destination address:
   205   //
   206   //    b!cxx SKIP
   207   //    bxx   DEST
   208   //  SKIP:
   209   //
   210   const int instruction_1 = *(int*)(instruction_addr);
   211   const int instruction_2 = *(int*)(instruction_addr + 4);
   212   return is_bcxx(instruction_1) &&
   213          (inv_bd_field(instruction_1, (intptr_t)instruction_addr) == (intptr_t)(instruction_addr + 2*4)) &&
   214          is_bxx(instruction_2);
   215 }
   217 // Relocation for conditional branches
   218 inline bool MacroAssembler::is_bc_far_variant3_at(address instruction_addr) {
   219   // Variant 3, far cond branch to the next instruction, already patched to nops:
   220   //
   221   //    nop
   222   //    endgroup
   223   //  SKIP/DEST:
   224   //
   225   const int instruction_1 = *(int*)(instruction_addr);
   226   const int instruction_2 = *(int*)(instruction_addr + 4);
   227   return is_nop(instruction_1) &&
   228          is_endgroup(instruction_2);
   229 }
   232 // Convenience bc_far versions
   233 inline void MacroAssembler::blt_far(ConditionRegister crx, Label& L, int optimize) { MacroAssembler::bc_far(bcondCRbiIs1, bi0(crx, less), L, optimize); }
   234 inline void MacroAssembler::bgt_far(ConditionRegister crx, Label& L, int optimize) { MacroAssembler::bc_far(bcondCRbiIs1, bi0(crx, greater), L, optimize); }
   235 inline void MacroAssembler::beq_far(ConditionRegister crx, Label& L, int optimize) { MacroAssembler::bc_far(bcondCRbiIs1, bi0(crx, equal), L, optimize); }
   236 inline void MacroAssembler::bso_far(ConditionRegister crx, Label& L, int optimize) { MacroAssembler::bc_far(bcondCRbiIs1, bi0(crx, summary_overflow), L, optimize); }
   237 inline void MacroAssembler::bge_far(ConditionRegister crx, Label& L, int optimize) { MacroAssembler::bc_far(bcondCRbiIs0, bi0(crx, less), L, optimize); }
   238 inline void MacroAssembler::ble_far(ConditionRegister crx, Label& L, int optimize) { MacroAssembler::bc_far(bcondCRbiIs0, bi0(crx, greater), L, optimize); }
   239 inline void MacroAssembler::bne_far(ConditionRegister crx, Label& L, int optimize) { MacroAssembler::bc_far(bcondCRbiIs0, bi0(crx, equal), L, optimize); }
   240 inline void MacroAssembler::bns_far(ConditionRegister crx, Label& L, int optimize) { MacroAssembler::bc_far(bcondCRbiIs0, bi0(crx, summary_overflow), L, optimize); }
   242 inline address MacroAssembler::call_stub(Register function_entry) {
   243   mtctr(function_entry);
   244   bctrl();
   245   return pc();
   246 }
   248 inline void MacroAssembler::call_stub_and_return_to(Register function_entry, Register return_pc) {
   249   assert_different_registers(function_entry, return_pc);
   250   mtlr(return_pc);
   251   mtctr(function_entry);
   252   bctr();
   253 }
   255 // Get the pc where the last emitted call will return to.
   256 inline address MacroAssembler::last_calls_return_pc() {
   257   return _last_calls_return_pc;
   258 }
   260 // Read from the polling page, its address is already in a register.
   261 inline void MacroAssembler::load_from_polling_page(Register polling_page_address, int offset) {
   262   ld(R0, offset, polling_page_address);
   263 }
   265 // Trap-instruction-based checks.
   267 inline void MacroAssembler::trap_null_check(Register a, trap_to_bits cmp) {
   268   assert(TrapBasedNullChecks, "sanity");
   269   tdi(cmp, a/*reg a*/, 0);
   270 }
   271 inline void MacroAssembler::trap_zombie_not_entrant() {
   272   tdi(traptoUnconditional, 0/*reg 0*/, 1);
   273 }
   274 inline void MacroAssembler::trap_should_not_reach_here() {
   275   tdi_unchecked(traptoUnconditional, 0/*reg 0*/, 2);
   276 }
   278 inline void MacroAssembler::trap_ic_miss_check(Register a, Register b) {
   279   td(traptoGreaterThanUnsigned | traptoLessThanUnsigned, a, b);
   280 }
   282 // Do an explicit null check if access to a+offset will not raise a SIGSEGV.
   283 // Either issue a trap instruction that raises SIGTRAP, or do a compare that
   284 // branches to exception_entry.
   285 // No support for compressed oops (base page of heap). Does not distinguish
   286 // loads and stores.
   287 inline void MacroAssembler::null_check_throw(Register a, int offset, Register temp_reg,
   288                                              address exception_entry) {
   289   if (!ImplicitNullChecks || needs_explicit_null_check(offset) || !os::zero_page_read_protected()) {
   290     if (TrapBasedNullChecks) {
   291       assert(UseSIGTRAP, "sanity");
   292       trap_null_check(a);
   293     } else {
   294       Label ok;
   295       cmpdi(CCR0, a, 0);
   296       bne(CCR0, ok);
   297       load_const_optimized(temp_reg, exception_entry);
   298       mtctr(temp_reg);
   299       bctr();
   300       bind(ok);
   301     }
   302   }
   303 }
   305 inline void MacroAssembler::load_with_trap_null_check(Register d, int si16, Register s1) {
   306   if (!os::zero_page_read_protected()) {
   307     if (TrapBasedNullChecks) {
   308       trap_null_check(s1);
   309     }
   310   }
   311   ld(d, si16, s1);
   312 }
   314 inline void MacroAssembler::load_heap_oop_not_null(Register d, RegisterOrConstant offs, Register s1) {
   315   if (UseCompressedOops) {
   316     lwz(d, offs, s1);
   317     // Attention: no null check here!
   318     decode_heap_oop_not_null(d);
   319   } else {
   320     ld(d, offs, s1);
   321   }
   322 }
   324 inline void MacroAssembler::store_heap_oop_not_null(Register d, RegisterOrConstant offs, Register s1, Register tmp) {
   325   if (UseCompressedOops) {
   326     Register compressedOop = encode_heap_oop_not_null((tmp != noreg) ? tmp : d, d);
   327     stw(compressedOop, offs, s1);
   328   } else {
   329     std(d, offs, s1);
   330   }
   331 }
   333 inline void MacroAssembler::load_heap_oop(Register d, RegisterOrConstant offs, Register s1) {
   334   if (UseCompressedOops) {
   335     lwz(d, offs, s1);
   336     decode_heap_oop(d);
   337   } else {
   338     ld(d, offs, s1);
   339   }
   340 }
   342 inline Register MacroAssembler::encode_heap_oop_not_null(Register d, Register src) {
   343   Register current = (src!=noreg) ? src : d; // Compressed oop is in d if no src provided.
   344   if (Universe::narrow_oop_base() != NULL) {
   345     sub(d, current, R30);
   346     current = d;
   347   }
   348   if (Universe::narrow_oop_shift() != 0) {
   349     srdi(d, current, LogMinObjAlignmentInBytes);
   350     current = d;
   351   }
   352   return current; // Encoded oop is in this register.
   353 }
   355 inline void MacroAssembler::decode_heap_oop_not_null(Register d) {
   356   if (Universe::narrow_oop_shift() != 0) {
   357     assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
   358     sldi(d, d, LogMinObjAlignmentInBytes);
   359   }
   360   if (Universe::narrow_oop_base() != NULL) {
   361     add(d, d, R30);
   362   }
   363 }
   365 inline void MacroAssembler::decode_heap_oop(Register d) {
   366   Label isNull;
   367   if (Universe::narrow_oop_base() != NULL) {
   368     cmpwi(CCR0, d, 0);
   369     beq(CCR0, isNull);
   370   }
   371   if (Universe::narrow_oop_shift() != 0) {
   372     assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
   373     sldi(d, d, LogMinObjAlignmentInBytes);
   374   }
   375   if (Universe::narrow_oop_base() != NULL) {
   376     add(d, d, R30);
   377   }
   378   bind(isNull);
   379 }
   381 // SIGTRAP-based range checks for arrays.
   382 inline void MacroAssembler::trap_range_check_l(Register a, Register b) {
   383   tw (traptoLessThanUnsigned,                  a/*reg a*/, b/*reg b*/);
   384 }
   385 inline void MacroAssembler::trap_range_check_l(Register a, int si16) {
   386   twi(traptoLessThanUnsigned,                  a/*reg a*/, si16);
   387 }
   388 inline void MacroAssembler::trap_range_check_le(Register a, int si16) {
   389   twi(traptoEqual | traptoLessThanUnsigned,    a/*reg a*/, si16);
   390 }
   391 inline void MacroAssembler::trap_range_check_g(Register a, int si16) {
   392   twi(traptoGreaterThanUnsigned,               a/*reg a*/, si16);
   393 }
   394 inline void MacroAssembler::trap_range_check_ge(Register a, Register b) {
   395   tw (traptoEqual | traptoGreaterThanUnsigned, a/*reg a*/, b/*reg b*/);
   396 }
   397 inline void MacroAssembler::trap_range_check_ge(Register a, int si16) {
   398   twi(traptoEqual | traptoGreaterThanUnsigned, a/*reg a*/, si16);
   399 }
   401 #if defined(ABI_ELFv2)
   402 inline address MacroAssembler::function_entry() { return pc(); }
   403 #else
   404 inline address MacroAssembler::function_entry() { return emit_fd(); }
   405 #endif
   407 #endif // CPU_PPC_VM_MACROASSEMBLER_PPC_INLINE_HPP

mercurial