src/cpu/ppc/vm/macroAssembler_ppc.inline.hpp

Thu, 06 Mar 2014 10:55:28 -0800

author
goetz
date
Thu, 06 Mar 2014 10:55:28 -0800
changeset 6511
31e80afe3fed
parent 6495
67fa91961822
child 6512
fd1b9f02cc91
permissions
-rw-r--r--

8035647: PPC64: Support for elf v2 abi.
Summary: ELFv2 ABI used by the little endian PowerPC64 on Linux.
Reviewed-by: kvn
Contributed-by: asmundak@google.com

goetz@6458 1 /*
goetz@6458 2 * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
goetz@6458 3 * Copyright 2012, 2013 SAP AG. All rights reserved.
goetz@6458 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
goetz@6458 5 *
goetz@6458 6 * This code is free software; you can redistribute it and/or modify it
goetz@6458 7 * under the terms of the GNU General Public License version 2 only, as
goetz@6458 8 * published by the Free Software Foundation.
goetz@6458 9 *
goetz@6458 10 * This code is distributed in the hope that it will be useful, but WITHOUT
goetz@6458 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
goetz@6458 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
goetz@6458 13 * version 2 for more details (a copy is included in the LICENSE file that
goetz@6458 14 * accompanied this code).
goetz@6458 15 *
goetz@6458 16 * You should have received a copy of the GNU General Public License version
goetz@6458 17 * 2 along with this work; if not, write to the Free Software Foundation,
goetz@6458 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
goetz@6458 19 *
goetz@6458 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
goetz@6458 21 * or visit www.oracle.com if you need additional information or have any
goetz@6458 22 * questions.
goetz@6458 23 *
goetz@6458 24 */
goetz@6458 25
goetz@6458 26 #ifndef CPU_PPC_VM_MACROASSEMBLER_PPC_INLINE_HPP
goetz@6458 27 #define CPU_PPC_VM_MACROASSEMBLER_PPC_INLINE_HPP
goetz@6458 28
goetz@6458 29 #include "asm/assembler.inline.hpp"
goetz@6458 30 #include "asm/macroAssembler.hpp"
goetz@6458 31 #include "asm/codeBuffer.hpp"
goetz@6458 32 #include "code/codeCache.hpp"
goetz@6458 33
goetz@6458 34 inline bool MacroAssembler::is_ld_largeoffset(address a) {
goetz@6458 35 const int inst1 = *(int *)a;
goetz@6458 36 const int inst2 = *(int *)(a+4);
goetz@6458 37 return (is_ld(inst1)) ||
goetz@6458 38 (is_addis(inst1) && is_ld(inst2) && inv_ra_field(inst2) == inv_rt_field(inst1));
goetz@6458 39 }
goetz@6458 40
goetz@6458 41 inline int MacroAssembler::get_ld_largeoffset_offset(address a) {
goetz@6458 42 assert(MacroAssembler::is_ld_largeoffset(a), "must be ld with large offset");
goetz@6458 43
goetz@6458 44 const int inst1 = *(int *)a;
goetz@6458 45 if (is_ld(inst1)) {
goetz@6458 46 return inv_d1_field(inst1);
goetz@6458 47 } else {
goetz@6458 48 const int inst2 = *(int *)(a+4);
goetz@6458 49 return (inv_d1_field(inst1) << 16) + inv_d1_field(inst2);
goetz@6458 50 }
goetz@6458 51 }
goetz@6458 52
goetz@6458 53 inline void MacroAssembler::round_to(Register r, int modulus) {
goetz@6458 54 assert(is_power_of_2_long((jlong)modulus), "must be power of 2");
goetz@6458 55 addi(r, r, modulus-1);
goetz@6458 56 clrrdi(r, r, log2_long((jlong)modulus));
goetz@6458 57 }
goetz@6458 58
goetz@6458 59 // Move register if destination register and target register are different.
goetz@6458 60 inline void MacroAssembler::mr_if_needed(Register rd, Register rs) {
goetz@6495 61 if (rs != rd) mr(rd, rs);
goetz@6458 62 }
goetz@6495 63 inline void MacroAssembler::fmr_if_needed(FloatRegister rd, FloatRegister rs) {
goetz@6495 64 if (rs != rd) fmr(rd, rs);
goetz@6495 65 }
goetz@6495 66 inline void MacroAssembler::endgroup_if_needed(bool needed) {
goetz@6495 67 if (needed) {
goetz@6495 68 endgroup();
goetz@6495 69 }
goetz@6495 70 }
goetz@6495 71
goetz@6495 72 inline void MacroAssembler::membar(int bits) {
goetz@6495 73 // TODO: use elemental_membar(bits) for Power 8 and disable optimization of acquire-release
goetz@6495 74 // (Matcher::post_membar_release where we use PPC64_ONLY(xop == Op_MemBarRelease ||))
goetz@6495 75 if (bits & StoreLoad) sync(); else lwsync();
goetz@6495 76 }
goetz@6495 77 inline void MacroAssembler::release() { membar(LoadStore | StoreStore); }
goetz@6495 78 inline void MacroAssembler::acquire() { membar(LoadLoad | LoadStore); }
goetz@6495 79 inline void MacroAssembler::fence() { membar(LoadLoad | LoadStore | StoreLoad | StoreStore); }
goetz@6458 80
goetz@6458 81 // Address of the global TOC.
goetz@6458 82 inline address MacroAssembler::global_toc() {
goetz@6458 83 return CodeCache::low_bound();
goetz@6458 84 }
goetz@6458 85
goetz@6458 86 // Offset of given address to the global TOC.
goetz@6458 87 inline int MacroAssembler::offset_to_global_toc(const address addr) {
goetz@6458 88 intptr_t offset = (intptr_t)addr - (intptr_t)MacroAssembler::global_toc();
goetz@6458 89 assert(Assembler::is_simm((long)offset, 31) && offset >= 0, "must be in range");
goetz@6458 90 return (int)offset;
goetz@6458 91 }
goetz@6458 92
goetz@6458 93 // Address of current method's TOC.
goetz@6458 94 inline address MacroAssembler::method_toc() {
goetz@6458 95 return code()->consts()->start();
goetz@6458 96 }
goetz@6458 97
goetz@6458 98 // Offset of given address to current method's TOC.
goetz@6458 99 inline int MacroAssembler::offset_to_method_toc(address addr) {
goetz@6458 100 intptr_t offset = (intptr_t)addr - (intptr_t)method_toc();
goetz@6458 101 assert(is_simm((long)offset, 31) && offset >= 0, "must be in range");
goetz@6458 102 return (int)offset;
goetz@6458 103 }
goetz@6458 104
goetz@6458 105 inline bool MacroAssembler::is_calculate_address_from_global_toc_at(address a, address bound) {
goetz@6458 106 const address inst2_addr = a;
goetz@6458 107 const int inst2 = *(int *) a;
goetz@6458 108
goetz@6458 109 // The relocation points to the second instruction, the addi.
goetz@6458 110 if (!is_addi(inst2)) return false;
goetz@6458 111
goetz@6458 112 // The addi reads and writes the same register dst.
goetz@6458 113 const int dst = inv_rt_field(inst2);
goetz@6458 114 if (inv_ra_field(inst2) != dst) return false;
goetz@6458 115
goetz@6458 116 // Now, find the preceding addis which writes to dst.
goetz@6458 117 int inst1 = 0;
goetz@6458 118 address inst1_addr = inst2_addr - BytesPerInstWord;
goetz@6458 119 while (inst1_addr >= bound) {
goetz@6458 120 inst1 = *(int *) inst1_addr;
goetz@6458 121 if (is_addis(inst1) && inv_rt_field(inst1) == dst) {
goetz@6458 122 // stop, found the addis which writes dst
goetz@6458 123 break;
goetz@6458 124 }
goetz@6458 125 inst1_addr -= BytesPerInstWord;
goetz@6458 126 }
goetz@6458 127
goetz@6458 128 if (!(inst1 == 0 || inv_ra_field(inst1) == 29 /* R29 */)) return false;
goetz@6458 129 return is_addis(inst1);
goetz@6458 130 }
goetz@6458 131
goetz@6458 132 #ifdef _LP64
goetz@6458 133 // Detect narrow oop constants.
goetz@6458 134 inline bool MacroAssembler::is_set_narrow_oop(address a, address bound) {
goetz@6458 135 const address inst2_addr = a;
goetz@6458 136 const int inst2 = *(int *)a;
goetz@6495 137 // The relocation points to the second instruction, the ori.
goetz@6495 138 if (!is_ori(inst2)) return false;
goetz@6458 139
goetz@6495 140 // The ori reads and writes the same register dst.
goetz@6495 141 const int dst = inv_rta_field(inst2);
goetz@6495 142 if (inv_rs_field(inst2) != dst) return false;
goetz@6458 143
goetz@6458 144 // Now, find the preceding addis which writes to dst.
goetz@6458 145 int inst1 = 0;
goetz@6458 146 address inst1_addr = inst2_addr - BytesPerInstWord;
goetz@6458 147 while (inst1_addr >= bound) {
goetz@6458 148 inst1 = *(int *) inst1_addr;
goetz@6458 149 if (is_lis(inst1) && inv_rs_field(inst1) == dst) return true;
goetz@6458 150 inst1_addr -= BytesPerInstWord;
goetz@6458 151 }
goetz@6458 152 return false;
goetz@6458 153 }
goetz@6458 154 #endif
goetz@6458 155
goetz@6458 156
goetz@6458 157 inline bool MacroAssembler::is_load_const_at(address a) {
goetz@6458 158 const int* p_inst = (int *) a;
goetz@6458 159 bool b = is_lis(*p_inst++);
goetz@6458 160 if (is_ori(*p_inst)) {
goetz@6458 161 p_inst++;
goetz@6458 162 b = b && is_rldicr(*p_inst++); // TODO: could be made more precise: `sldi'!
goetz@6458 163 b = b && is_oris(*p_inst++);
goetz@6458 164 b = b && is_ori(*p_inst);
goetz@6458 165 } else if (is_lis(*p_inst)) {
goetz@6458 166 p_inst++;
goetz@6458 167 b = b && is_ori(*p_inst++);
goetz@6458 168 b = b && is_ori(*p_inst);
goetz@6458 169 // TODO: could enhance reliability by adding is_insrdi
goetz@6458 170 } else return false;
goetz@6458 171 return b;
goetz@6458 172 }
goetz@6458 173
goetz@6458 174 inline void MacroAssembler::set_oop_constant(jobject obj, Register d) {
goetz@6458 175 set_oop(constant_oop_address(obj), d);
goetz@6458 176 }
goetz@6458 177
goetz@6458 178 inline void MacroAssembler::set_oop(AddressLiteral obj_addr, Register d) {
goetz@6458 179 assert(obj_addr.rspec().type() == relocInfo::oop_type, "must be an oop reloc");
goetz@6458 180 load_const(d, obj_addr);
goetz@6458 181 }
goetz@6458 182
goetz@6458 183 inline void MacroAssembler::pd_patch_instruction(address branch, address target) {
goetz@6458 184 jint& stub_inst = *(jint*) branch;
goetz@6458 185 stub_inst = patched_branch(target - branch, stub_inst, 0);
goetz@6458 186 }
goetz@6458 187
goetz@6458 188 // Relocation of conditional far branches.
goetz@6458 189 inline bool MacroAssembler::is_bc_far_variant1_at(address instruction_addr) {
goetz@6458 190 // Variant 1, the 1st instruction contains the destination address:
goetz@6458 191 //
goetz@6458 192 // bcxx DEST
goetz@6458 193 // endgroup
goetz@6458 194 //
goetz@6458 195 const int instruction_1 = *(int*)(instruction_addr);
goetz@6458 196 const int instruction_2 = *(int*)(instruction_addr + 4);
goetz@6458 197 return is_bcxx(instruction_1) &&
goetz@6458 198 (inv_bd_field(instruction_1, (intptr_t)instruction_addr) != (intptr_t)(instruction_addr + 2*4)) &&
goetz@6458 199 is_endgroup(instruction_2);
goetz@6458 200 }
goetz@6458 201
goetz@6458 202 // Relocation of conditional far branches.
goetz@6458 203 inline bool MacroAssembler::is_bc_far_variant2_at(address instruction_addr) {
goetz@6458 204 // Variant 2, the 2nd instruction contains the destination address:
goetz@6458 205 //
goetz@6458 206 // b!cxx SKIP
goetz@6458 207 // bxx DEST
goetz@6458 208 // SKIP:
goetz@6458 209 //
goetz@6458 210 const int instruction_1 = *(int*)(instruction_addr);
goetz@6458 211 const int instruction_2 = *(int*)(instruction_addr + 4);
goetz@6458 212 return is_bcxx(instruction_1) &&
goetz@6458 213 (inv_bd_field(instruction_1, (intptr_t)instruction_addr) == (intptr_t)(instruction_addr + 2*4)) &&
goetz@6458 214 is_bxx(instruction_2);
goetz@6458 215 }
goetz@6458 216
goetz@6458 217 // Relocation for conditional branches
goetz@6458 218 inline bool MacroAssembler::is_bc_far_variant3_at(address instruction_addr) {
goetz@6458 219 // Variant 3, far cond branch to the next instruction, already patched to nops:
goetz@6458 220 //
goetz@6458 221 // nop
goetz@6458 222 // endgroup
goetz@6458 223 // SKIP/DEST:
goetz@6458 224 //
goetz@6458 225 const int instruction_1 = *(int*)(instruction_addr);
goetz@6458 226 const int instruction_2 = *(int*)(instruction_addr + 4);
goetz@6458 227 return is_nop(instruction_1) &&
goetz@6458 228 is_endgroup(instruction_2);
goetz@6458 229 }
goetz@6458 230
goetz@6458 231
goetz@6458 232 // Convenience bc_far versions
goetz@6458 233 inline void MacroAssembler::blt_far(ConditionRegister crx, Label& L, int optimize) { MacroAssembler::bc_far(bcondCRbiIs1, bi0(crx, less), L, optimize); }
goetz@6458 234 inline void MacroAssembler::bgt_far(ConditionRegister crx, Label& L, int optimize) { MacroAssembler::bc_far(bcondCRbiIs1, bi0(crx, greater), L, optimize); }
goetz@6458 235 inline void MacroAssembler::beq_far(ConditionRegister crx, Label& L, int optimize) { MacroAssembler::bc_far(bcondCRbiIs1, bi0(crx, equal), L, optimize); }
goetz@6458 236 inline void MacroAssembler::bso_far(ConditionRegister crx, Label& L, int optimize) { MacroAssembler::bc_far(bcondCRbiIs1, bi0(crx, summary_overflow), L, optimize); }
goetz@6458 237 inline void MacroAssembler::bge_far(ConditionRegister crx, Label& L, int optimize) { MacroAssembler::bc_far(bcondCRbiIs0, bi0(crx, less), L, optimize); }
goetz@6458 238 inline void MacroAssembler::ble_far(ConditionRegister crx, Label& L, int optimize) { MacroAssembler::bc_far(bcondCRbiIs0, bi0(crx, greater), L, optimize); }
goetz@6458 239 inline void MacroAssembler::bne_far(ConditionRegister crx, Label& L, int optimize) { MacroAssembler::bc_far(bcondCRbiIs0, bi0(crx, equal), L, optimize); }
goetz@6458 240 inline void MacroAssembler::bns_far(ConditionRegister crx, Label& L, int optimize) { MacroAssembler::bc_far(bcondCRbiIs0, bi0(crx, summary_overflow), L, optimize); }
goetz@6458 241
goetz@6458 242 inline address MacroAssembler::call_stub(Register function_entry) {
goetz@6458 243 mtctr(function_entry);
goetz@6458 244 bctrl();
goetz@6458 245 return pc();
goetz@6458 246 }
goetz@6458 247
goetz@6458 248 inline void MacroAssembler::call_stub_and_return_to(Register function_entry, Register return_pc) {
goetz@6458 249 assert_different_registers(function_entry, return_pc);
goetz@6458 250 mtlr(return_pc);
goetz@6458 251 mtctr(function_entry);
goetz@6458 252 bctr();
goetz@6458 253 }
goetz@6458 254
goetz@6458 255 // Get the pc where the last emitted call will return to.
goetz@6458 256 inline address MacroAssembler::last_calls_return_pc() {
goetz@6458 257 return _last_calls_return_pc;
goetz@6458 258 }
goetz@6458 259
goetz@6458 260 // Read from the polling page, its address is already in a register.
goetz@6458 261 inline void MacroAssembler::load_from_polling_page(Register polling_page_address, int offset) {
goetz@6458 262 ld(R0, offset, polling_page_address);
goetz@6458 263 }
goetz@6458 264
goetz@6458 265 // Trap-instruction-based checks.
goetz@6458 266
goetz@6458 267 inline void MacroAssembler::trap_null_check(Register a, trap_to_bits cmp) {
goetz@6458 268 assert(TrapBasedNullChecks, "sanity");
goetz@6458 269 tdi(cmp, a/*reg a*/, 0);
goetz@6458 270 }
goetz@6458 271 inline void MacroAssembler::trap_zombie_not_entrant() {
goetz@6458 272 tdi(traptoUnconditional, 0/*reg 0*/, 1);
goetz@6458 273 }
goetz@6458 274 inline void MacroAssembler::trap_should_not_reach_here() {
goetz@6458 275 tdi_unchecked(traptoUnconditional, 0/*reg 0*/, 2);
goetz@6458 276 }
goetz@6458 277
goetz@6458 278 inline void MacroAssembler::trap_ic_miss_check(Register a, Register b) {
goetz@6458 279 td(traptoGreaterThanUnsigned | traptoLessThanUnsigned, a, b);
goetz@6458 280 }
goetz@6458 281
goetz@6458 282 // Do an explicit null check if access to a+offset will not raise a SIGSEGV.
goetz@6458 283 // Either issue a trap instruction that raises SIGTRAP, or do a compare that
goetz@6458 284 // branches to exception_entry.
goetz@6495 285 // No support for compressed oops (base page of heap). Does not distinguish
goetz@6458 286 // loads and stores.
goetz@6495 287 inline void MacroAssembler::null_check_throw(Register a, int offset, Register temp_reg,
goetz@6495 288 address exception_entry) {
goetz@6486 289 if (!ImplicitNullChecks || needs_explicit_null_check(offset) || !os::zero_page_read_protected()) {
goetz@6458 290 if (TrapBasedNullChecks) {
goetz@6458 291 assert(UseSIGTRAP, "sanity");
goetz@6458 292 trap_null_check(a);
goetz@6458 293 } else {
goetz@6458 294 Label ok;
goetz@6458 295 cmpdi(CCR0, a, 0);
goetz@6458 296 bne(CCR0, ok);
goetz@6458 297 load_const_optimized(temp_reg, exception_entry);
goetz@6458 298 mtctr(temp_reg);
goetz@6458 299 bctr();
goetz@6458 300 bind(ok);
goetz@6458 301 }
goetz@6458 302 }
goetz@6458 303 }
goetz@6458 304
goetz@6495 305 inline void MacroAssembler::load_with_trap_null_check(Register d, int si16, Register s1) {
goetz@6486 306 if (!os::zero_page_read_protected()) {
goetz@6458 307 if (TrapBasedNullChecks) {
goetz@6458 308 trap_null_check(s1);
goetz@6458 309 }
goetz@6458 310 }
goetz@6458 311 ld(d, si16, s1);
goetz@6458 312 }
goetz@6458 313
goetz@6458 314 inline void MacroAssembler::load_heap_oop_not_null(Register d, RegisterOrConstant offs, Register s1) {
goetz@6458 315 if (UseCompressedOops) {
goetz@6458 316 lwz(d, offs, s1);
goetz@6458 317 // Attention: no null check here!
goetz@6458 318 decode_heap_oop_not_null(d);
goetz@6458 319 } else {
goetz@6458 320 ld(d, offs, s1);
goetz@6458 321 }
goetz@6458 322 }
goetz@6458 323
goetz@6458 324 inline void MacroAssembler::load_heap_oop(Register d, RegisterOrConstant offs, Register s1) {
goetz@6458 325 if (UseCompressedOops) {
goetz@6458 326 lwz(d, offs, s1);
goetz@6458 327 decode_heap_oop(d);
goetz@6458 328 } else {
goetz@6458 329 ld(d, offs, s1);
goetz@6458 330 }
goetz@6458 331 }
goetz@6458 332
goetz@6458 333 inline void MacroAssembler::encode_heap_oop_not_null(Register d) {
goetz@6458 334 if (Universe::narrow_oop_base() != NULL) {
goetz@6458 335 sub(d, d, R30);
goetz@6458 336 }
goetz@6458 337 if (Universe::narrow_oop_shift() != 0) {
goetz@6458 338 srdi(d, d, LogMinObjAlignmentInBytes);
goetz@6458 339 }
goetz@6458 340 }
goetz@6458 341
goetz@6458 342 inline void MacroAssembler::decode_heap_oop_not_null(Register d) {
goetz@6458 343 if (Universe::narrow_oop_shift() != 0) {
goetz@6458 344 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
goetz@6458 345 sldi(d, d, LogMinObjAlignmentInBytes);
goetz@6458 346 }
goetz@6458 347 if (Universe::narrow_oop_base() != NULL) {
goetz@6458 348 add(d, d, R30);
goetz@6458 349 }
goetz@6458 350 }
goetz@6458 351
goetz@6458 352 inline void MacroAssembler::decode_heap_oop(Register d) {
goetz@6458 353 Label isNull;
goetz@6458 354 if (Universe::narrow_oop_base() != NULL) {
goetz@6458 355 cmpwi(CCR0, d, 0);
goetz@6458 356 beq(CCR0, isNull);
goetz@6458 357 }
goetz@6458 358 if (Universe::narrow_oop_shift() != 0) {
goetz@6458 359 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
goetz@6458 360 sldi(d, d, LogMinObjAlignmentInBytes);
goetz@6458 361 }
goetz@6458 362 if (Universe::narrow_oop_base() != NULL) {
goetz@6458 363 add(d, d, R30);
goetz@6458 364 }
goetz@6458 365 bind(isNull);
goetz@6458 366 }
goetz@6458 367
goetz@6458 368 // SIGTRAP-based range checks for arrays.
goetz@6458 369 inline void MacroAssembler::trap_range_check_l(Register a, Register b) {
goetz@6458 370 tw (traptoLessThanUnsigned, a/*reg a*/, b/*reg b*/);
goetz@6458 371 }
goetz@6458 372 inline void MacroAssembler::trap_range_check_l(Register a, int si16) {
goetz@6458 373 twi(traptoLessThanUnsigned, a/*reg a*/, si16);
goetz@6458 374 }
goetz@6458 375 inline void MacroAssembler::trap_range_check_le(Register a, int si16) {
goetz@6458 376 twi(traptoEqual | traptoLessThanUnsigned, a/*reg a*/, si16);
goetz@6458 377 }
goetz@6458 378 inline void MacroAssembler::trap_range_check_g(Register a, int si16) {
goetz@6458 379 twi(traptoGreaterThanUnsigned, a/*reg a*/, si16);
goetz@6458 380 }
goetz@6458 381 inline void MacroAssembler::trap_range_check_ge(Register a, Register b) {
goetz@6458 382 tw (traptoEqual | traptoGreaterThanUnsigned, a/*reg a*/, b/*reg b*/);
goetz@6458 383 }
goetz@6458 384 inline void MacroAssembler::trap_range_check_ge(Register a, int si16) {
goetz@6458 385 twi(traptoEqual | traptoGreaterThanUnsigned, a/*reg a*/, si16);
goetz@6458 386 }
goetz@6458 387
goetz@6511 388 #if defined(ABI_ELFv2)
goetz@6511 389 inline address MacroAssembler::function_entry() { return pc(); }
goetz@6511 390 #else
goetz@6511 391 inline address MacroAssembler::function_entry() { return emit_fd(); }
goetz@6511 392 #endif
goetz@6511 393
goetz@6458 394 #endif // CPU_PPC_VM_MACROASSEMBLER_PPC_INLINE_HPP

mercurial