goetz@6458: /* goetz@6458: * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. goetz@6515: * Copyright 2012, 2014 SAP AG. All rights reserved. goetz@6458: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. goetz@6458: * goetz@6458: * This code is free software; you can redistribute it and/or modify it goetz@6458: * under the terms of the GNU General Public License version 2 only, as goetz@6458: * published by the Free Software Foundation. goetz@6458: * goetz@6458: * This code is distributed in the hope that it will be useful, but WITHOUT goetz@6458: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or goetz@6458: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License goetz@6458: * version 2 for more details (a copy is included in the LICENSE file that goetz@6458: * accompanied this code). goetz@6458: * goetz@6458: * You should have received a copy of the GNU General Public License version goetz@6458: * 2 along with this work; if not, write to the Free Software Foundation, goetz@6458: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. goetz@6458: * goetz@6458: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA goetz@6458: * or visit www.oracle.com if you need additional information or have any goetz@6458: * questions. goetz@6458: * goetz@6458: */ goetz@6458: goetz@6458: #include "precompiled.hpp" goetz@6458: #include "asm/macroAssembler.inline.hpp" goetz@6458: #include "compiler/disassembler.hpp" goetz@6458: #include "gc_interface/collectedHeap.inline.hpp" goetz@6458: #include "interpreter/interpreter.hpp" goetz@6458: #include "memory/cardTableModRefBS.hpp" goetz@6458: #include "memory/resourceArea.hpp" goetz@6458: #include "prims/methodHandles.hpp" goetz@6458: #include "runtime/biasedLocking.hpp" goetz@6458: #include "runtime/interfaceSupport.hpp" goetz@6458: #include "runtime/objectMonitor.hpp" goetz@6458: #include "runtime/os.hpp" goetz@6458: #include "runtime/sharedRuntime.hpp" goetz@6458: #include "runtime/stubRoutines.hpp" goetz@6458: #include "utilities/macros.hpp" goetz@6458: #if INCLUDE_ALL_GCS goetz@6458: #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" goetz@6458: #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" goetz@6458: #include "gc_implementation/g1/heapRegion.hpp" goetz@6458: #endif // INCLUDE_ALL_GCS goetz@6458: goetz@6458: #ifdef PRODUCT goetz@6458: #define BLOCK_COMMENT(str) // nothing goetz@6458: #else goetz@6458: #define BLOCK_COMMENT(str) block_comment(str) goetz@6458: #endif goetz@6458: goetz@6458: #ifdef ASSERT goetz@6458: // On RISC, there's no benefit to verifying instruction boundaries. goetz@6458: bool AbstractAssembler::pd_check_instruction_mark() { return false; } goetz@6458: #endif goetz@6458: goetz@6458: void MacroAssembler::ld_largeoffset_unchecked(Register d, int si31, Register a, int emit_filler_nop) { goetz@6458: assert(Assembler::is_simm(si31, 31) && si31 >= 0, "si31 out of range"); goetz@6458: if (Assembler::is_simm(si31, 16)) { goetz@6458: ld(d, si31, a); goetz@6458: if (emit_filler_nop) nop(); goetz@6458: } else { goetz@6458: const int hi = MacroAssembler::largeoffset_si16_si16_hi(si31); goetz@6458: const int lo = MacroAssembler::largeoffset_si16_si16_lo(si31); goetz@6458: addis(d, a, hi); goetz@6458: ld(d, lo, d); goetz@6458: } goetz@6458: } goetz@6458: goetz@6458: void MacroAssembler::ld_largeoffset(Register d, int si31, Register a, int emit_filler_nop) { goetz@6458: assert_different_registers(d, a); goetz@6458: ld_largeoffset_unchecked(d, si31, a, emit_filler_nop); goetz@6458: } goetz@6458: goetz@6458: void MacroAssembler::load_sized_value(Register dst, RegisterOrConstant offs, Register base, goetz@6458: size_t size_in_bytes, bool is_signed) { goetz@6458: switch (size_in_bytes) { goetz@6458: case 8: ld(dst, offs, base); break; goetz@6458: case 4: is_signed ? lwa(dst, offs, base) : lwz(dst, offs, base); break; goetz@6458: case 2: is_signed ? lha(dst, offs, base) : lhz(dst, offs, base); break; goetz@6458: case 1: lbz(dst, offs, base); if (is_signed) extsb(dst, dst); break; // lba doesn't exist :( goetz@6458: default: ShouldNotReachHere(); goetz@6458: } goetz@6458: } goetz@6458: goetz@6458: void MacroAssembler::store_sized_value(Register dst, RegisterOrConstant offs, Register base, goetz@6458: size_t size_in_bytes) { goetz@6458: switch (size_in_bytes) { goetz@6458: case 8: std(dst, offs, base); break; goetz@6458: case 4: stw(dst, offs, base); break; goetz@6458: case 2: sth(dst, offs, base); break; goetz@6458: case 1: stb(dst, offs, base); break; goetz@6458: default: ShouldNotReachHere(); goetz@6458: } goetz@6458: } goetz@6458: goetz@6495: void MacroAssembler::align(int modulus, int max, int rem) { goetz@6495: int padding = (rem + modulus - (offset() % modulus)) % modulus; goetz@6495: if (padding > max) return; goetz@6495: for (int c = (padding >> 2); c > 0; --c) { nop(); } goetz@6458: } goetz@6458: goetz@6458: // Issue instructions that calculate given TOC from global TOC. goetz@6458: void MacroAssembler::calculate_address_from_global_toc(Register dst, address addr, bool hi16, bool lo16, goetz@6458: bool add_relocation, bool emit_dummy_addr) { goetz@6458: int offset = -1; goetz@6458: if (emit_dummy_addr) { goetz@6458: offset = -128; // dummy address goetz@6458: } else if (addr != (address)(intptr_t)-1) { goetz@6458: offset = MacroAssembler::offset_to_global_toc(addr); goetz@6458: } goetz@6458: goetz@6458: if (hi16) { goetz@6458: addis(dst, R29, MacroAssembler::largeoffset_si16_si16_hi(offset)); goetz@6458: } goetz@6458: if (lo16) { goetz@6458: if (add_relocation) { goetz@6458: // Relocate at the addi to avoid confusion with a load from the method's TOC. goetz@6458: relocate(internal_word_Relocation::spec(addr)); goetz@6458: } goetz@6458: addi(dst, dst, MacroAssembler::largeoffset_si16_si16_lo(offset)); goetz@6458: } goetz@6458: } goetz@6458: goetz@6458: int MacroAssembler::patch_calculate_address_from_global_toc_at(address a, address bound, address addr) { goetz@6458: const int offset = MacroAssembler::offset_to_global_toc(addr); goetz@6458: goetz@6458: const address inst2_addr = a; goetz@6458: const int inst2 = *(int *)inst2_addr; goetz@6458: goetz@6458: // The relocation points to the second instruction, the addi, goetz@6458: // and the addi reads and writes the same register dst. goetz@6458: const int dst = inv_rt_field(inst2); goetz@6458: assert(is_addi(inst2) && inv_ra_field(inst2) == dst, "must be addi reading and writing dst"); goetz@6458: goetz@6458: // Now, find the preceding addis which writes to dst. goetz@6458: int inst1 = 0; goetz@6458: address inst1_addr = inst2_addr - BytesPerInstWord; goetz@6458: while (inst1_addr >= bound) { goetz@6458: inst1 = *(int *) inst1_addr; goetz@6458: if (is_addis(inst1) && inv_rt_field(inst1) == dst) { goetz@6458: // Stop, found the addis which writes dst. goetz@6458: break; goetz@6458: } goetz@6458: inst1_addr -= BytesPerInstWord; goetz@6458: } goetz@6458: goetz@6458: assert(is_addis(inst1) && inv_ra_field(inst1) == 29 /* R29 */, "source must be global TOC"); goetz@6458: set_imm((int *)inst1_addr, MacroAssembler::largeoffset_si16_si16_hi(offset)); goetz@6458: set_imm((int *)inst2_addr, MacroAssembler::largeoffset_si16_si16_lo(offset)); goetz@6458: return (int)((intptr_t)addr - (intptr_t)inst1_addr); goetz@6458: } goetz@6458: goetz@6458: address MacroAssembler::get_address_of_calculate_address_from_global_toc_at(address a, address bound) { goetz@6458: const address inst2_addr = a; goetz@6458: const int inst2 = *(int *)inst2_addr; goetz@6458: goetz@6458: // The relocation points to the second instruction, the addi, goetz@6458: // and the addi reads and writes the same register dst. goetz@6458: const int dst = inv_rt_field(inst2); goetz@6458: assert(is_addi(inst2) && inv_ra_field(inst2) == dst, "must be addi reading and writing dst"); goetz@6458: goetz@6458: // Now, find the preceding addis which writes to dst. goetz@6458: int inst1 = 0; goetz@6458: address inst1_addr = inst2_addr - BytesPerInstWord; goetz@6458: while (inst1_addr >= bound) { goetz@6458: inst1 = *(int *) inst1_addr; goetz@6458: if (is_addis(inst1) && inv_rt_field(inst1) == dst) { goetz@6458: // stop, found the addis which writes dst goetz@6458: break; goetz@6458: } goetz@6458: inst1_addr -= BytesPerInstWord; goetz@6458: } goetz@6458: goetz@6458: assert(is_addis(inst1) && inv_ra_field(inst1) == 29 /* R29 */, "source must be global TOC"); goetz@6458: goetz@6458: int offset = (get_imm(inst1_addr, 0) << 16) + get_imm(inst2_addr, 0); goetz@6458: // -1 is a special case goetz@6458: if (offset == -1) { goetz@6458: return (address)(intptr_t)-1; goetz@6458: } else { goetz@6458: return global_toc() + offset; goetz@6458: } goetz@6458: } goetz@6458: goetz@6458: #ifdef _LP64 goetz@6458: // Patch compressed oops or klass constants. goetz@6495: // Assembler sequence is goetz@6495: // 1) compressed oops: goetz@6495: // lis rx = const.hi goetz@6495: // ori rx = rx | const.lo goetz@6495: // 2) compressed klass: goetz@6495: // lis rx = const.hi goetz@6495: // clrldi rx = rx & 0xFFFFffff // clearMS32b, optional goetz@6495: // ori rx = rx | const.lo goetz@6495: // Clrldi will be passed by. goetz@6458: int MacroAssembler::patch_set_narrow_oop(address a, address bound, narrowOop data) { goetz@6458: assert(UseCompressedOops, "Should only patch compressed oops"); goetz@6458: goetz@6458: const address inst2_addr = a; goetz@6458: const int inst2 = *(int *)inst2_addr; goetz@6458: goetz@6495: // The relocation points to the second instruction, the ori, goetz@6495: // and the ori reads and writes the same register dst. goetz@6495: const int dst = inv_rta_field(inst2); goetz@6501: assert(is_ori(inst2) && inv_rs_field(inst2) == dst, "must be ori reading and writing dst"); goetz@6458: // Now, find the preceding addis which writes to dst. goetz@6458: int inst1 = 0; goetz@6458: address inst1_addr = inst2_addr - BytesPerInstWord; goetz@6458: bool inst1_found = false; goetz@6458: while (inst1_addr >= bound) { goetz@6458: inst1 = *(int *)inst1_addr; goetz@6458: if (is_lis(inst1) && inv_rs_field(inst1) == dst) { inst1_found = true; break; } goetz@6458: inst1_addr -= BytesPerInstWord; goetz@6458: } goetz@6458: assert(inst1_found, "inst is not lis"); goetz@6458: goetz@6458: int xc = (data >> 16) & 0xffff; goetz@6458: int xd = (data >> 0) & 0xffff; goetz@6458: goetz@6495: set_imm((int *)inst1_addr, (short)(xc)); // see enc_load_con_narrow_hi/_lo goetz@6501: set_imm((int *)inst2_addr, (xd)); // unsigned int goetz@6458: return (int)((intptr_t)inst2_addr - (intptr_t)inst1_addr); goetz@6458: } goetz@6458: goetz@6458: // Get compressed oop or klass constant. goetz@6458: narrowOop MacroAssembler::get_narrow_oop(address a, address bound) { goetz@6458: assert(UseCompressedOops, "Should only patch compressed oops"); goetz@6458: goetz@6458: const address inst2_addr = a; goetz@6458: const int inst2 = *(int *)inst2_addr; goetz@6458: goetz@6495: // The relocation points to the second instruction, the ori, goetz@6495: // and the ori reads and writes the same register dst. goetz@6495: const int dst = inv_rta_field(inst2); goetz@6501: assert(is_ori(inst2) && inv_rs_field(inst2) == dst, "must be ori reading and writing dst"); goetz@6458: // Now, find the preceding lis which writes to dst. goetz@6458: int inst1 = 0; goetz@6458: address inst1_addr = inst2_addr - BytesPerInstWord; goetz@6458: bool inst1_found = false; goetz@6458: goetz@6458: while (inst1_addr >= bound) { goetz@6458: inst1 = *(int *) inst1_addr; goetz@6458: if (is_lis(inst1) && inv_rs_field(inst1) == dst) { inst1_found = true; break;} goetz@6458: inst1_addr -= BytesPerInstWord; goetz@6458: } goetz@6458: assert(inst1_found, "inst is not lis"); goetz@6458: goetz@6495: uint xl = ((unsigned int) (get_imm(inst2_addr, 0) & 0xffff)); goetz@6495: uint xh = (((get_imm(inst1_addr, 0)) & 0xffff) << 16); goetz@6495: goetz@6458: return (int) (xl | xh); goetz@6458: } goetz@6458: #endif // _LP64 goetz@6458: goetz@6458: void MacroAssembler::load_const_from_method_toc(Register dst, AddressLiteral& a, Register toc) { goetz@6458: int toc_offset = 0; goetz@6458: // Use RelocationHolder::none for the constant pool entry, otherwise goetz@6458: // we will end up with a failing NativeCall::verify(x) where x is goetz@6458: // the address of the constant pool entry. goetz@6458: // FIXME: We should insert relocation information for oops at the constant goetz@6458: // pool entries instead of inserting it at the loads; patching of a constant goetz@6458: // pool entry should be less expensive. goetz@6495: address oop_address = address_constant((address)a.value(), RelocationHolder::none); goetz@6495: // Relocate at the pc of the load. goetz@6495: relocate(a.rspec()); goetz@6495: toc_offset = (int)(oop_address - code()->consts()->start()); goetz@6458: ld_largeoffset_unchecked(dst, toc_offset, toc, true); goetz@6458: } goetz@6458: goetz@6458: bool MacroAssembler::is_load_const_from_method_toc_at(address a) { goetz@6458: const address inst1_addr = a; goetz@6458: const int inst1 = *(int *)inst1_addr; goetz@6458: goetz@6458: // The relocation points to the ld or the addis. goetz@6458: return (is_ld(inst1)) || goetz@6458: (is_addis(inst1) && inv_ra_field(inst1) != 0); goetz@6458: } goetz@6458: goetz@6458: int MacroAssembler::get_offset_of_load_const_from_method_toc_at(address a) { goetz@6458: assert(is_load_const_from_method_toc_at(a), "must be load_const_from_method_toc"); goetz@6458: goetz@6458: const address inst1_addr = a; goetz@6458: const int inst1 = *(int *)inst1_addr; goetz@6458: goetz@6458: if (is_ld(inst1)) { goetz@6458: return inv_d1_field(inst1); goetz@6458: } else if (is_addis(inst1)) { goetz@6458: const int dst = inv_rt_field(inst1); goetz@6458: goetz@6458: // Now, find the succeeding ld which reads and writes to dst. goetz@6458: address inst2_addr = inst1_addr + BytesPerInstWord; goetz@6458: int inst2 = 0; goetz@6458: while (true) { goetz@6458: inst2 = *(int *) inst2_addr; goetz@6458: if (is_ld(inst2) && inv_ra_field(inst2) == dst && inv_rt_field(inst2) == dst) { goetz@6458: // Stop, found the ld which reads and writes dst. goetz@6458: break; goetz@6458: } goetz@6458: inst2_addr += BytesPerInstWord; goetz@6458: } goetz@6458: return (inv_d1_field(inst1) << 16) + inv_d1_field(inst2); goetz@6458: } goetz@6458: ShouldNotReachHere(); goetz@6458: return 0; goetz@6458: } goetz@6458: goetz@6458: // Get the constant from a `load_const' sequence. goetz@6458: long MacroAssembler::get_const(address a) { goetz@6458: assert(is_load_const_at(a), "not a load of a constant"); goetz@6458: const int *p = (const int*) a; goetz@6458: unsigned long x = (((unsigned long) (get_imm(a,0) & 0xffff)) << 48); goetz@6458: if (is_ori(*(p+1))) { goetz@6458: x |= (((unsigned long) (get_imm(a,1) & 0xffff)) << 32); goetz@6458: x |= (((unsigned long) (get_imm(a,3) & 0xffff)) << 16); goetz@6458: x |= (((unsigned long) (get_imm(a,4) & 0xffff))); goetz@6458: } else if (is_lis(*(p+1))) { goetz@6458: x |= (((unsigned long) (get_imm(a,2) & 0xffff)) << 32); goetz@6458: x |= (((unsigned long) (get_imm(a,1) & 0xffff)) << 16); goetz@6458: x |= (((unsigned long) (get_imm(a,3) & 0xffff))); goetz@6458: } else { goetz@6458: ShouldNotReachHere(); goetz@6458: return (long) 0; goetz@6458: } goetz@6458: return (long) x; goetz@6458: } goetz@6458: goetz@6458: // Patch the 64 bit constant of a `load_const' sequence. This is a low goetz@6458: // level procedure. It neither flushes the instruction cache nor is it goetz@6458: // mt safe. goetz@6458: void MacroAssembler::patch_const(address a, long x) { goetz@6458: assert(is_load_const_at(a), "not a load of a constant"); goetz@6458: int *p = (int*) a; goetz@6458: if (is_ori(*(p+1))) { goetz@6458: set_imm(0 + p, (x >> 48) & 0xffff); goetz@6458: set_imm(1 + p, (x >> 32) & 0xffff); goetz@6458: set_imm(3 + p, (x >> 16) & 0xffff); goetz@6458: set_imm(4 + p, x & 0xffff); goetz@6458: } else if (is_lis(*(p+1))) { goetz@6458: set_imm(0 + p, (x >> 48) & 0xffff); goetz@6458: set_imm(2 + p, (x >> 32) & 0xffff); goetz@6458: set_imm(1 + p, (x >> 16) & 0xffff); goetz@6458: set_imm(3 + p, x & 0xffff); goetz@6458: } else { goetz@6458: ShouldNotReachHere(); goetz@6458: } goetz@6458: } goetz@6458: goetz@6458: AddressLiteral MacroAssembler::allocate_metadata_address(Metadata* obj) { goetz@6458: assert(oop_recorder() != NULL, "this assembler needs a Recorder"); goetz@6458: int index = oop_recorder()->allocate_metadata_index(obj); goetz@6458: RelocationHolder rspec = metadata_Relocation::spec(index); goetz@6458: return AddressLiteral((address)obj, rspec); goetz@6458: } goetz@6458: goetz@6458: AddressLiteral MacroAssembler::constant_metadata_address(Metadata* obj) { goetz@6458: assert(oop_recorder() != NULL, "this assembler needs a Recorder"); goetz@6458: int index = oop_recorder()->find_index(obj); goetz@6458: RelocationHolder rspec = metadata_Relocation::spec(index); goetz@6458: return AddressLiteral((address)obj, rspec); goetz@6458: } goetz@6458: goetz@6458: AddressLiteral MacroAssembler::allocate_oop_address(jobject obj) { goetz@6458: assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); goetz@6458: int oop_index = oop_recorder()->allocate_oop_index(obj); goetz@6458: return AddressLiteral(address(obj), oop_Relocation::spec(oop_index)); goetz@6458: } goetz@6458: goetz@6458: AddressLiteral MacroAssembler::constant_oop_address(jobject obj) { goetz@6458: assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); goetz@6458: int oop_index = oop_recorder()->find_index(obj); goetz@6458: return AddressLiteral(address(obj), oop_Relocation::spec(oop_index)); goetz@6458: } goetz@6458: goetz@6458: RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr, goetz@6458: Register tmp, int offset) { goetz@6458: intptr_t value = *delayed_value_addr; goetz@6458: if (value != 0) { goetz@6458: return RegisterOrConstant(value + offset); goetz@6458: } goetz@6458: goetz@6458: // Load indirectly to solve generation ordering problem. goetz@6458: // static address, no relocation goetz@6458: int simm16_offset = load_const_optimized(tmp, delayed_value_addr, noreg, true); goetz@6458: ld(tmp, simm16_offset, tmp); // must be aligned ((xa & 3) == 0) goetz@6458: goetz@6458: if (offset != 0) { goetz@6458: addi(tmp, tmp, offset); goetz@6458: } goetz@6458: goetz@6458: return RegisterOrConstant(tmp); goetz@6458: } goetz@6458: goetz@6458: #ifndef PRODUCT goetz@6458: void MacroAssembler::pd_print_patched_instruction(address branch) { goetz@6458: Unimplemented(); // TODO: PPC port goetz@6458: } goetz@6458: #endif // ndef PRODUCT goetz@6458: goetz@6458: // Conditional far branch for destinations encodable in 24+2 bits. goetz@6458: void MacroAssembler::bc_far(int boint, int biint, Label& dest, int optimize) { goetz@6458: goetz@6458: // If requested by flag optimize, relocate the bc_far as a goetz@6458: // runtime_call and prepare for optimizing it when the code gets goetz@6458: // relocated. goetz@6458: if (optimize == bc_far_optimize_on_relocate) { goetz@6458: relocate(relocInfo::runtime_call_type); goetz@6458: } goetz@6458: goetz@6458: // variant 2: goetz@6458: // goetz@6458: // b!cxx SKIP goetz@6458: // bxx DEST goetz@6458: // SKIP: goetz@6458: // goetz@6458: goetz@6458: const int opposite_boint = add_bhint_to_boint(opposite_bhint(inv_boint_bhint(boint)), goetz@6458: opposite_bcond(inv_boint_bcond(boint))); goetz@6458: goetz@6458: // We emit two branches. goetz@6458: // First, a conditional branch which jumps around the far branch. goetz@6458: const address not_taken_pc = pc() + 2 * BytesPerInstWord; goetz@6458: const address bc_pc = pc(); goetz@6458: bc(opposite_boint, biint, not_taken_pc); goetz@6458: goetz@6458: const int bc_instr = *(int*)bc_pc; goetz@6458: assert(not_taken_pc == (address)inv_bd_field(bc_instr, (intptr_t)bc_pc), "postcondition"); goetz@6458: assert(opposite_boint == inv_bo_field(bc_instr), "postcondition"); goetz@6458: assert(boint == add_bhint_to_boint(opposite_bhint(inv_boint_bhint(inv_bo_field(bc_instr))), goetz@6458: opposite_bcond(inv_boint_bcond(inv_bo_field(bc_instr)))), goetz@6458: "postcondition"); goetz@6458: assert(biint == inv_bi_field(bc_instr), "postcondition"); goetz@6458: goetz@6458: // Second, an unconditional far branch which jumps to dest. goetz@6458: // Note: target(dest) remembers the current pc (see CodeSection::target) goetz@6458: // and returns the current pc if the label is not bound yet; when goetz@6458: // the label gets bound, the unconditional far branch will be patched. goetz@6458: const address target_pc = target(dest); goetz@6458: const address b_pc = pc(); goetz@6458: b(target_pc); goetz@6458: goetz@6458: assert(not_taken_pc == pc(), "postcondition"); goetz@6458: assert(dest.is_bound() || target_pc == b_pc, "postcondition"); goetz@6458: } goetz@6458: goetz@6458: bool MacroAssembler::is_bc_far_at(address instruction_addr) { goetz@6458: return is_bc_far_variant1_at(instruction_addr) || goetz@6458: is_bc_far_variant2_at(instruction_addr) || goetz@6458: is_bc_far_variant3_at(instruction_addr); goetz@6458: } goetz@6458: goetz@6458: address MacroAssembler::get_dest_of_bc_far_at(address instruction_addr) { goetz@6458: if (is_bc_far_variant1_at(instruction_addr)) { goetz@6458: const address instruction_1_addr = instruction_addr; goetz@6458: const int instruction_1 = *(int*)instruction_1_addr; goetz@6458: return (address)inv_bd_field(instruction_1, (intptr_t)instruction_1_addr); goetz@6458: } else if (is_bc_far_variant2_at(instruction_addr)) { goetz@6458: const address instruction_2_addr = instruction_addr + 4; goetz@6458: return bxx_destination(instruction_2_addr); goetz@6458: } else if (is_bc_far_variant3_at(instruction_addr)) { goetz@6458: return instruction_addr + 8; goetz@6458: } goetz@6458: // variant 4 ??? goetz@6458: ShouldNotReachHere(); goetz@6458: return NULL; goetz@6458: } goetz@6458: void MacroAssembler::set_dest_of_bc_far_at(address instruction_addr, address dest) { goetz@6458: goetz@6458: if (is_bc_far_variant3_at(instruction_addr)) { goetz@6458: // variant 3, far cond branch to the next instruction, already patched to nops: goetz@6458: // goetz@6458: // nop goetz@6458: // endgroup goetz@6458: // SKIP/DEST: goetz@6458: // goetz@6458: return; goetz@6458: } goetz@6458: goetz@6458: // first, extract boint and biint from the current branch goetz@6458: int boint = 0; goetz@6458: int biint = 0; goetz@6458: goetz@6458: ResourceMark rm; goetz@6458: const int code_size = 2 * BytesPerInstWord; goetz@6458: CodeBuffer buf(instruction_addr, code_size); goetz@6458: MacroAssembler masm(&buf); goetz@6458: if (is_bc_far_variant2_at(instruction_addr) && dest == instruction_addr + 8) { goetz@6458: // Far branch to next instruction: Optimize it by patching nops (produce variant 3). goetz@6458: masm.nop(); goetz@6458: masm.endgroup(); goetz@6458: } else { goetz@6458: if (is_bc_far_variant1_at(instruction_addr)) { goetz@6458: // variant 1, the 1st instruction contains the destination address: goetz@6458: // goetz@6458: // bcxx DEST goetz@6458: // endgroup goetz@6458: // goetz@6458: const int instruction_1 = *(int*)(instruction_addr); goetz@6458: boint = inv_bo_field(instruction_1); goetz@6458: biint = inv_bi_field(instruction_1); goetz@6458: } else if (is_bc_far_variant2_at(instruction_addr)) { goetz@6458: // variant 2, the 2nd instruction contains the destination address: goetz@6458: // goetz@6458: // b!cxx SKIP goetz@6458: // bxx DEST goetz@6458: // SKIP: goetz@6458: // goetz@6458: const int instruction_1 = *(int*)(instruction_addr); goetz@6458: boint = add_bhint_to_boint(opposite_bhint(inv_boint_bhint(inv_bo_field(instruction_1))), goetz@6458: opposite_bcond(inv_boint_bcond(inv_bo_field(instruction_1)))); goetz@6458: biint = inv_bi_field(instruction_1); goetz@6458: } else { goetz@6458: // variant 4??? goetz@6458: ShouldNotReachHere(); goetz@6458: } goetz@6458: goetz@6458: // second, set the new branch destination and optimize the code goetz@6458: if (dest != instruction_addr + 4 && // the bc_far is still unbound! goetz@6458: masm.is_within_range_of_bcxx(dest, instruction_addr)) { goetz@6458: // variant 1: goetz@6458: // goetz@6458: // bcxx DEST goetz@6458: // endgroup goetz@6458: // goetz@6458: masm.bc(boint, biint, dest); goetz@6458: masm.endgroup(); goetz@6458: } else { goetz@6458: // variant 2: goetz@6458: // goetz@6458: // b!cxx SKIP goetz@6458: // bxx DEST goetz@6458: // SKIP: goetz@6458: // goetz@6458: const int opposite_boint = add_bhint_to_boint(opposite_bhint(inv_boint_bhint(boint)), goetz@6458: opposite_bcond(inv_boint_bcond(boint))); goetz@6458: const address not_taken_pc = masm.pc() + 2 * BytesPerInstWord; goetz@6458: masm.bc(opposite_boint, biint, not_taken_pc); goetz@6458: masm.b(dest); goetz@6458: } goetz@6458: } goetz@6495: ICache::ppc64_flush_icache_bytes(instruction_addr, code_size); goetz@6458: } goetz@6458: goetz@6458: // Emit a NOT mt-safe patchable 64 bit absolute call/jump. goetz@6458: void MacroAssembler::bxx64_patchable(address dest, relocInfo::relocType rt, bool link) { goetz@6458: // get current pc goetz@6458: uint64_t start_pc = (uint64_t) pc(); goetz@6458: goetz@6458: const address pc_of_bl = (address) (start_pc + (6*BytesPerInstWord)); // bl is last goetz@6458: const address pc_of_b = (address) (start_pc + (0*BytesPerInstWord)); // b is first goetz@6458: goetz@6458: // relocate here goetz@6458: if (rt != relocInfo::none) { goetz@6458: relocate(rt); goetz@6458: } goetz@6458: goetz@6458: if ( ReoptimizeCallSequences && goetz@6458: (( link && is_within_range_of_b(dest, pc_of_bl)) || goetz@6458: (!link && is_within_range_of_b(dest, pc_of_b)))) { goetz@6458: // variant 2: goetz@6458: // Emit an optimized, pc-relative call/jump. goetz@6458: goetz@6458: if (link) { goetz@6458: // some padding goetz@6458: nop(); goetz@6458: nop(); goetz@6458: nop(); goetz@6458: nop(); goetz@6458: nop(); goetz@6458: nop(); goetz@6458: goetz@6458: // do the call goetz@6458: assert(pc() == pc_of_bl, "just checking"); goetz@6458: bl(dest, relocInfo::none); goetz@6458: } else { goetz@6458: // do the jump goetz@6458: assert(pc() == pc_of_b, "just checking"); goetz@6458: b(dest, relocInfo::none); goetz@6458: goetz@6458: // some padding goetz@6458: nop(); goetz@6458: nop(); goetz@6458: nop(); goetz@6458: nop(); goetz@6458: nop(); goetz@6458: nop(); goetz@6458: } goetz@6458: goetz@6458: // Assert that we can identify the emitted call/jump. goetz@6458: assert(is_bxx64_patchable_variant2_at((address)start_pc, link), goetz@6458: "can't identify emitted call"); goetz@6458: } else { goetz@6458: // variant 1: goetz@6511: #if defined(ABI_ELFv2) goetz@6511: nop(); goetz@6511: calculate_address_from_global_toc(R12, dest, true, true, false); goetz@6511: mtctr(R12); goetz@6511: nop(); goetz@6511: nop(); goetz@6511: #else goetz@6458: mr(R0, R11); // spill R11 -> R0. goetz@6458: goetz@6458: // Load the destination address into CTR, goetz@6458: // calculate destination relative to global toc. goetz@6458: calculate_address_from_global_toc(R11, dest, true, true, false); goetz@6458: goetz@6458: mtctr(R11); goetz@6458: mr(R11, R0); // spill R11 <- R0. goetz@6458: nop(); goetz@6511: #endif goetz@6458: goetz@6458: // do the call/jump goetz@6458: if (link) { goetz@6458: bctrl(); goetz@6458: } else{ goetz@6458: bctr(); goetz@6458: } goetz@6458: // Assert that we can identify the emitted call/jump. goetz@6458: assert(is_bxx64_patchable_variant1b_at((address)start_pc, link), goetz@6458: "can't identify emitted call"); goetz@6458: } goetz@6458: goetz@6458: // Assert that we can identify the emitted call/jump. goetz@6458: assert(is_bxx64_patchable_at((address)start_pc, link), goetz@6458: "can't identify emitted call"); goetz@6458: assert(get_dest_of_bxx64_patchable_at((address)start_pc, link) == dest, goetz@6458: "wrong encoding of dest address"); goetz@6458: } goetz@6458: goetz@6458: // Identify a bxx64_patchable instruction. goetz@6458: bool MacroAssembler::is_bxx64_patchable_at(address instruction_addr, bool link) { goetz@6458: return is_bxx64_patchable_variant1b_at(instruction_addr, link) goetz@6458: //|| is_bxx64_patchable_variant1_at(instruction_addr, link) goetz@6458: || is_bxx64_patchable_variant2_at(instruction_addr, link); goetz@6458: } goetz@6458: goetz@6458: // Does the call64_patchable instruction use a pc-relative encoding of goetz@6458: // the call destination? goetz@6458: bool MacroAssembler::is_bxx64_patchable_pcrelative_at(address instruction_addr, bool link) { goetz@6458: // variant 2 is pc-relative goetz@6458: return is_bxx64_patchable_variant2_at(instruction_addr, link); goetz@6458: } goetz@6458: goetz@6458: // Identify variant 1. goetz@6458: bool MacroAssembler::is_bxx64_patchable_variant1_at(address instruction_addr, bool link) { goetz@6458: unsigned int* instr = (unsigned int*) instruction_addr; goetz@6458: return (link ? is_bctrl(instr[6]) : is_bctr(instr[6])) // bctr[l] goetz@6458: && is_mtctr(instr[5]) // mtctr goetz@6458: && is_load_const_at(instruction_addr); goetz@6458: } goetz@6458: goetz@6458: // Identify variant 1b: load destination relative to global toc. goetz@6458: bool MacroAssembler::is_bxx64_patchable_variant1b_at(address instruction_addr, bool link) { goetz@6458: unsigned int* instr = (unsigned int*) instruction_addr; goetz@6458: return (link ? is_bctrl(instr[6]) : is_bctr(instr[6])) // bctr[l] goetz@6458: && is_mtctr(instr[3]) // mtctr goetz@6458: && is_calculate_address_from_global_toc_at(instruction_addr + 2*BytesPerInstWord, instruction_addr); goetz@6458: } goetz@6458: goetz@6458: // Identify variant 2. goetz@6458: bool MacroAssembler::is_bxx64_patchable_variant2_at(address instruction_addr, bool link) { goetz@6458: unsigned int* instr = (unsigned int*) instruction_addr; goetz@6458: if (link) { goetz@6458: return is_bl (instr[6]) // bl dest is last goetz@6458: && is_nop(instr[0]) // nop goetz@6458: && is_nop(instr[1]) // nop goetz@6458: && is_nop(instr[2]) // nop goetz@6458: && is_nop(instr[3]) // nop goetz@6458: && is_nop(instr[4]) // nop goetz@6458: && is_nop(instr[5]); // nop goetz@6458: } else { goetz@6458: return is_b (instr[0]) // b dest is first goetz@6458: && is_nop(instr[1]) // nop goetz@6458: && is_nop(instr[2]) // nop goetz@6458: && is_nop(instr[3]) // nop goetz@6458: && is_nop(instr[4]) // nop goetz@6458: && is_nop(instr[5]) // nop goetz@6458: && is_nop(instr[6]); // nop goetz@6458: } goetz@6458: } goetz@6458: goetz@6458: // Set dest address of a bxx64_patchable instruction. goetz@6458: void MacroAssembler::set_dest_of_bxx64_patchable_at(address instruction_addr, address dest, bool link) { goetz@6458: ResourceMark rm; goetz@6458: int code_size = MacroAssembler::bxx64_patchable_size; goetz@6458: CodeBuffer buf(instruction_addr, code_size); goetz@6458: MacroAssembler masm(&buf); goetz@6458: masm.bxx64_patchable(dest, relocInfo::none, link); goetz@6495: ICache::ppc64_flush_icache_bytes(instruction_addr, code_size); goetz@6458: } goetz@6458: goetz@6458: // Get dest address of a bxx64_patchable instruction. goetz@6458: address MacroAssembler::get_dest_of_bxx64_patchable_at(address instruction_addr, bool link) { goetz@6458: if (is_bxx64_patchable_variant1_at(instruction_addr, link)) { goetz@6458: return (address) (unsigned long) get_const(instruction_addr); goetz@6458: } else if (is_bxx64_patchable_variant2_at(instruction_addr, link)) { goetz@6458: unsigned int* instr = (unsigned int*) instruction_addr; goetz@6458: if (link) { goetz@6458: const int instr_idx = 6; // bl is last goetz@6458: int branchoffset = branch_destination(instr[instr_idx], 0); goetz@6458: return instruction_addr + branchoffset + instr_idx*BytesPerInstWord; goetz@6458: } else { goetz@6458: const int instr_idx = 0; // b is first goetz@6458: int branchoffset = branch_destination(instr[instr_idx], 0); goetz@6458: return instruction_addr + branchoffset + instr_idx*BytesPerInstWord; goetz@6458: } goetz@6458: // Load dest relative to global toc. goetz@6458: } else if (is_bxx64_patchable_variant1b_at(instruction_addr, link)) { goetz@6458: return get_address_of_calculate_address_from_global_toc_at(instruction_addr + 2*BytesPerInstWord, goetz@6458: instruction_addr); goetz@6458: } else { goetz@6458: ShouldNotReachHere(); goetz@6458: return NULL; goetz@6458: } goetz@6458: } goetz@6458: goetz@6458: // Uses ordering which corresponds to ABI: goetz@6458: // _savegpr0_14: std r14,-144(r1) goetz@6458: // _savegpr0_15: std r15,-136(r1) goetz@6458: // _savegpr0_16: std r16,-128(r1) goetz@6458: void MacroAssembler::save_nonvolatile_gprs(Register dst, int offset) { goetz@6458: std(R14, offset, dst); offset += 8; goetz@6458: std(R15, offset, dst); offset += 8; goetz@6458: std(R16, offset, dst); offset += 8; goetz@6458: std(R17, offset, dst); offset += 8; goetz@6458: std(R18, offset, dst); offset += 8; goetz@6458: std(R19, offset, dst); offset += 8; goetz@6458: std(R20, offset, dst); offset += 8; goetz@6458: std(R21, offset, dst); offset += 8; goetz@6458: std(R22, offset, dst); offset += 8; goetz@6458: std(R23, offset, dst); offset += 8; goetz@6458: std(R24, offset, dst); offset += 8; goetz@6458: std(R25, offset, dst); offset += 8; goetz@6458: std(R26, offset, dst); offset += 8; goetz@6458: std(R27, offset, dst); offset += 8; goetz@6458: std(R28, offset, dst); offset += 8; goetz@6458: std(R29, offset, dst); offset += 8; goetz@6458: std(R30, offset, dst); offset += 8; goetz@6458: std(R31, offset, dst); offset += 8; goetz@6458: goetz@6458: stfd(F14, offset, dst); offset += 8; goetz@6458: stfd(F15, offset, dst); offset += 8; goetz@6458: stfd(F16, offset, dst); offset += 8; goetz@6458: stfd(F17, offset, dst); offset += 8; goetz@6458: stfd(F18, offset, dst); offset += 8; goetz@6458: stfd(F19, offset, dst); offset += 8; goetz@6458: stfd(F20, offset, dst); offset += 8; goetz@6458: stfd(F21, offset, dst); offset += 8; goetz@6458: stfd(F22, offset, dst); offset += 8; goetz@6458: stfd(F23, offset, dst); offset += 8; goetz@6458: stfd(F24, offset, dst); offset += 8; goetz@6458: stfd(F25, offset, dst); offset += 8; goetz@6458: stfd(F26, offset, dst); offset += 8; goetz@6458: stfd(F27, offset, dst); offset += 8; goetz@6458: stfd(F28, offset, dst); offset += 8; goetz@6458: stfd(F29, offset, dst); offset += 8; goetz@6458: stfd(F30, offset, dst); offset += 8; goetz@6458: stfd(F31, offset, dst); goetz@6458: } goetz@6458: goetz@6458: // Uses ordering which corresponds to ABI: goetz@6458: // _restgpr0_14: ld r14,-144(r1) goetz@6458: // _restgpr0_15: ld r15,-136(r1) goetz@6458: // _restgpr0_16: ld r16,-128(r1) goetz@6458: void MacroAssembler::restore_nonvolatile_gprs(Register src, int offset) { goetz@6458: ld(R14, offset, src); offset += 8; goetz@6458: ld(R15, offset, src); offset += 8; goetz@6458: ld(R16, offset, src); offset += 8; goetz@6458: ld(R17, offset, src); offset += 8; goetz@6458: ld(R18, offset, src); offset += 8; goetz@6458: ld(R19, offset, src); offset += 8; goetz@6458: ld(R20, offset, src); offset += 8; goetz@6458: ld(R21, offset, src); offset += 8; goetz@6458: ld(R22, offset, src); offset += 8; goetz@6458: ld(R23, offset, src); offset += 8; goetz@6458: ld(R24, offset, src); offset += 8; goetz@6458: ld(R25, offset, src); offset += 8; goetz@6458: ld(R26, offset, src); offset += 8; goetz@6458: ld(R27, offset, src); offset += 8; goetz@6458: ld(R28, offset, src); offset += 8; goetz@6458: ld(R29, offset, src); offset += 8; goetz@6458: ld(R30, offset, src); offset += 8; goetz@6458: ld(R31, offset, src); offset += 8; goetz@6458: goetz@6458: // FP registers goetz@6458: lfd(F14, offset, src); offset += 8; goetz@6458: lfd(F15, offset, src); offset += 8; goetz@6458: lfd(F16, offset, src); offset += 8; goetz@6458: lfd(F17, offset, src); offset += 8; goetz@6458: lfd(F18, offset, src); offset += 8; goetz@6458: lfd(F19, offset, src); offset += 8; goetz@6458: lfd(F20, offset, src); offset += 8; goetz@6458: lfd(F21, offset, src); offset += 8; goetz@6458: lfd(F22, offset, src); offset += 8; goetz@6458: lfd(F23, offset, src); offset += 8; goetz@6458: lfd(F24, offset, src); offset += 8; goetz@6458: lfd(F25, offset, src); offset += 8; goetz@6458: lfd(F26, offset, src); offset += 8; goetz@6458: lfd(F27, offset, src); offset += 8; goetz@6458: lfd(F28, offset, src); offset += 8; goetz@6458: lfd(F29, offset, src); offset += 8; goetz@6458: lfd(F30, offset, src); offset += 8; goetz@6458: lfd(F31, offset, src); goetz@6458: } goetz@6458: goetz@6458: // For verify_oops. goetz@6458: void MacroAssembler::save_volatile_gprs(Register dst, int offset) { goetz@6458: std(R3, offset, dst); offset += 8; goetz@6458: std(R4, offset, dst); offset += 8; goetz@6458: std(R5, offset, dst); offset += 8; goetz@6458: std(R6, offset, dst); offset += 8; goetz@6458: std(R7, offset, dst); offset += 8; goetz@6458: std(R8, offset, dst); offset += 8; goetz@6458: std(R9, offset, dst); offset += 8; goetz@6458: std(R10, offset, dst); offset += 8; goetz@6458: std(R11, offset, dst); offset += 8; goetz@6458: std(R12, offset, dst); goetz@6458: } goetz@6458: goetz@6458: // For verify_oops. goetz@6458: void MacroAssembler::restore_volatile_gprs(Register src, int offset) { goetz@6458: ld(R3, offset, src); offset += 8; goetz@6458: ld(R4, offset, src); offset += 8; goetz@6458: ld(R5, offset, src); offset += 8; goetz@6458: ld(R6, offset, src); offset += 8; goetz@6458: ld(R7, offset, src); offset += 8; goetz@6458: ld(R8, offset, src); offset += 8; goetz@6458: ld(R9, offset, src); offset += 8; goetz@6458: ld(R10, offset, src); offset += 8; goetz@6458: ld(R11, offset, src); offset += 8; goetz@6458: ld(R12, offset, src); goetz@6458: } goetz@6458: goetz@6458: void MacroAssembler::save_LR_CR(Register tmp) { goetz@6458: mfcr(tmp); goetz@6458: std(tmp, _abi(cr), R1_SP); goetz@6458: mflr(tmp); goetz@6458: std(tmp, _abi(lr), R1_SP); goetz@6458: // Tmp must contain lr on exit! (see return_addr and prolog in ppc64.ad) goetz@6458: } goetz@6458: goetz@6458: void MacroAssembler::restore_LR_CR(Register tmp) { goetz@6458: assert(tmp != R1_SP, "must be distinct"); goetz@6458: ld(tmp, _abi(lr), R1_SP); goetz@6458: mtlr(tmp); goetz@6458: ld(tmp, _abi(cr), R1_SP); goetz@6458: mtcr(tmp); goetz@6458: } goetz@6458: goetz@6458: address MacroAssembler::get_PC_trash_LR(Register result) { goetz@6458: Label L; goetz@6458: bl(L); goetz@6458: bind(L); goetz@6458: address lr_pc = pc(); goetz@6458: mflr(result); goetz@6458: return lr_pc; goetz@6458: } goetz@6458: goetz@6458: void MacroAssembler::resize_frame(Register offset, Register tmp) { goetz@6458: #ifdef ASSERT goetz@6458: assert_different_registers(offset, tmp, R1_SP); goetz@6458: andi_(tmp, offset, frame::alignment_in_bytes-1); goetz@6458: asm_assert_eq("resize_frame: unaligned", 0x204); goetz@6458: #endif goetz@6458: goetz@6458: // tmp <- *(SP) goetz@6458: ld(tmp, _abi(callers_sp), R1_SP); goetz@6458: // addr <- SP + offset; goetz@6458: // *(addr) <- tmp; goetz@6458: // SP <- addr goetz@6458: stdux(tmp, R1_SP, offset); goetz@6458: } goetz@6458: goetz@6458: void MacroAssembler::resize_frame(int offset, Register tmp) { goetz@6458: assert(is_simm(offset, 16), "too big an offset"); goetz@6458: assert_different_registers(tmp, R1_SP); goetz@6458: assert((offset & (frame::alignment_in_bytes-1))==0, "resize_frame: unaligned"); goetz@6458: // tmp <- *(SP) goetz@6458: ld(tmp, _abi(callers_sp), R1_SP); goetz@6458: // addr <- SP + offset; goetz@6458: // *(addr) <- tmp; goetz@6458: // SP <- addr goetz@6458: stdu(tmp, offset, R1_SP); goetz@6458: } goetz@6458: goetz@6458: void MacroAssembler::resize_frame_absolute(Register addr, Register tmp1, Register tmp2) { goetz@6458: // (addr == tmp1) || (addr == tmp2) is allowed here! goetz@6458: assert(tmp1 != tmp2, "must be distinct"); goetz@6458: goetz@6458: // compute offset w.r.t. current stack pointer goetz@6458: // tmp_1 <- addr - SP (!) goetz@6458: subf(tmp1, R1_SP, addr); goetz@6458: goetz@6458: // atomically update SP keeping back link. goetz@6458: resize_frame(tmp1/* offset */, tmp2/* tmp */); goetz@6458: } goetz@6458: goetz@6458: void MacroAssembler::push_frame(Register bytes, Register tmp) { goetz@6458: #ifdef ASSERT goetz@6458: assert(bytes != R0, "r0 not allowed here"); goetz@6458: andi_(R0, bytes, frame::alignment_in_bytes-1); goetz@6458: asm_assert_eq("push_frame(Reg, Reg): unaligned", 0x203); goetz@6458: #endif goetz@6458: neg(tmp, bytes); goetz@6458: stdux(R1_SP, R1_SP, tmp); goetz@6458: } goetz@6458: goetz@6458: // Push a frame of size `bytes'. goetz@6458: void MacroAssembler::push_frame(unsigned int bytes, Register tmp) { goetz@6458: long offset = align_addr(bytes, frame::alignment_in_bytes); goetz@6458: if (is_simm(-offset, 16)) { goetz@6458: stdu(R1_SP, -offset, R1_SP); goetz@6458: } else { goetz@6458: load_const(tmp, -offset); goetz@6458: stdux(R1_SP, R1_SP, tmp); goetz@6458: } goetz@6458: } goetz@6458: goetz@6511: // Push a frame of size `bytes' plus abi_reg_args on top. goetz@6511: void MacroAssembler::push_frame_reg_args(unsigned int bytes, Register tmp) { goetz@6511: push_frame(bytes + frame::abi_reg_args_size, tmp); goetz@6458: } goetz@6458: goetz@6458: // Setup up a new C frame with a spill area for non-volatile GPRs and goetz@6458: // additional space for local variables. goetz@6511: void MacroAssembler::push_frame_reg_args_nonvolatiles(unsigned int bytes, goetz@6511: Register tmp) { goetz@6511: push_frame(bytes + frame::abi_reg_args_size + frame::spill_nonvolatiles_size, tmp); goetz@6458: } goetz@6458: goetz@6458: // Pop current C frame. goetz@6458: void MacroAssembler::pop_frame() { goetz@6458: ld(R1_SP, _abi(callers_sp), R1_SP); goetz@6458: } goetz@6458: goetz@6511: #if defined(ABI_ELFv2) goetz@6511: address MacroAssembler::branch_to(Register r_function_entry, bool and_link) { goetz@6511: // TODO(asmundak): make sure the caller uses R12 as function descriptor goetz@6511: // most of the times. goetz@6511: if (R12 != r_function_entry) { goetz@6511: mr(R12, r_function_entry); goetz@6511: } goetz@6511: mtctr(R12); goetz@6511: // Do a call or a branch. goetz@6511: if (and_link) { goetz@6511: bctrl(); goetz@6511: } else { goetz@6511: bctr(); goetz@6511: } goetz@6511: _last_calls_return_pc = pc(); goetz@6511: goetz@6511: return _last_calls_return_pc; goetz@6511: } goetz@6511: goetz@6511: // Call a C function via a function descriptor and use full C goetz@6511: // calling conventions. Updates and returns _last_calls_return_pc. goetz@6511: address MacroAssembler::call_c(Register r_function_entry) { goetz@6511: return branch_to(r_function_entry, /*and_link=*/true); goetz@6511: } goetz@6511: goetz@6511: // For tail calls: only branch, don't link, so callee returns to caller of this function. goetz@6511: address MacroAssembler::call_c_and_return_to_caller(Register r_function_entry) { goetz@6511: return branch_to(r_function_entry, /*and_link=*/false); goetz@6511: } goetz@6511: goetz@6511: address MacroAssembler::call_c(address function_entry, relocInfo::relocType rt) { goetz@6511: load_const(R12, function_entry, R0); goetz@6511: return branch_to(R12, /*and_link=*/true); goetz@6511: } goetz@6511: goetz@6511: #else goetz@6458: // Generic version of a call to C function via a function descriptor goetz@6458: // with variable support for C calling conventions (TOC, ENV, etc.). goetz@6458: // Updates and returns _last_calls_return_pc. goetz@6458: address MacroAssembler::branch_to(Register function_descriptor, bool and_link, bool save_toc_before_call, goetz@6458: bool restore_toc_after_call, bool load_toc_of_callee, bool load_env_of_callee) { goetz@6458: // we emit standard ptrgl glue code here goetz@6458: assert((function_descriptor != R0), "function_descriptor cannot be R0"); goetz@6458: goetz@6458: // retrieve necessary entries from the function descriptor goetz@6458: ld(R0, in_bytes(FunctionDescriptor::entry_offset()), function_descriptor); goetz@6458: mtctr(R0); goetz@6458: goetz@6458: if (load_toc_of_callee) { goetz@6458: ld(R2_TOC, in_bytes(FunctionDescriptor::toc_offset()), function_descriptor); goetz@6458: } goetz@6458: if (load_env_of_callee) { goetz@6458: ld(R11, in_bytes(FunctionDescriptor::env_offset()), function_descriptor); goetz@6458: } else if (load_toc_of_callee) { goetz@6458: li(R11, 0); goetz@6458: } goetz@6458: goetz@6458: // do a call or a branch goetz@6458: if (and_link) { goetz@6458: bctrl(); goetz@6458: } else { goetz@6458: bctr(); goetz@6458: } goetz@6458: _last_calls_return_pc = pc(); goetz@6458: goetz@6458: return _last_calls_return_pc; goetz@6458: } goetz@6458: goetz@6458: // Call a C function via a function descriptor and use full C calling goetz@6458: // conventions. goetz@6458: // We don't use the TOC in generated code, so there is no need to save goetz@6458: // and restore its value. goetz@6458: address MacroAssembler::call_c(Register fd) { goetz@6458: return branch_to(fd, /*and_link=*/true, goetz@6458: /*save toc=*/false, goetz@6458: /*restore toc=*/false, goetz@6458: /*load toc=*/true, goetz@6458: /*load env=*/true); goetz@6458: } goetz@6458: goetz@6495: address MacroAssembler::call_c_and_return_to_caller(Register fd) { goetz@6495: return branch_to(fd, /*and_link=*/false, goetz@6495: /*save toc=*/false, goetz@6495: /*restore toc=*/false, goetz@6495: /*load toc=*/true, goetz@6495: /*load env=*/true); goetz@6495: } goetz@6495: goetz@6458: address MacroAssembler::call_c(const FunctionDescriptor* fd, relocInfo::relocType rt) { goetz@6458: if (rt != relocInfo::none) { goetz@6458: // this call needs to be relocatable goetz@6458: if (!ReoptimizeCallSequences goetz@6458: || (rt != relocInfo::runtime_call_type && rt != relocInfo::none) goetz@6458: || fd == NULL // support code-size estimation goetz@6458: || !fd->is_friend_function() goetz@6458: || fd->entry() == NULL) { goetz@6458: // it's not a friend function as defined by class FunctionDescriptor, goetz@6458: // so do a full call-c here. goetz@6458: load_const(R11, (address)fd, R0); goetz@6458: goetz@6458: bool has_env = (fd != NULL && fd->env() != NULL); goetz@6458: return branch_to(R11, /*and_link=*/true, goetz@6501: /*save toc=*/false, goetz@6501: /*restore toc=*/false, goetz@6501: /*load toc=*/true, goetz@6501: /*load env=*/has_env); goetz@6458: } else { goetz@6458: // It's a friend function. Load the entry point and don't care about goetz@6458: // toc and env. Use an optimizable call instruction, but ensure the goetz@6458: // same code-size as in the case of a non-friend function. goetz@6458: nop(); goetz@6458: nop(); goetz@6458: nop(); goetz@6458: bl64_patchable(fd->entry(), rt); goetz@6458: _last_calls_return_pc = pc(); goetz@6458: return _last_calls_return_pc; goetz@6458: } goetz@6458: } else { goetz@6458: // This call does not need to be relocatable, do more aggressive goetz@6458: // optimizations. goetz@6458: if (!ReoptimizeCallSequences goetz@6458: || !fd->is_friend_function()) { goetz@6458: // It's not a friend function as defined by class FunctionDescriptor, goetz@6458: // so do a full call-c here. goetz@6458: load_const(R11, (address)fd, R0); goetz@6458: return branch_to(R11, /*and_link=*/true, goetz@6501: /*save toc=*/false, goetz@6501: /*restore toc=*/false, goetz@6501: /*load toc=*/true, goetz@6501: /*load env=*/true); goetz@6458: } else { goetz@6458: // it's a friend function, load the entry point and don't care about goetz@6458: // toc and env. goetz@6458: address dest = fd->entry(); goetz@6458: if (is_within_range_of_b(dest, pc())) { goetz@6458: bl(dest); goetz@6458: } else { goetz@6458: bl64_patchable(dest, rt); goetz@6458: } goetz@6458: _last_calls_return_pc = pc(); goetz@6458: return _last_calls_return_pc; goetz@6458: } goetz@6458: } goetz@6458: } goetz@6458: goetz@6458: // Call a C function. All constants needed reside in TOC. goetz@6458: // goetz@6458: // Read the address to call from the TOC. goetz@6458: // Read env from TOC, if fd specifies an env. goetz@6458: // Read new TOC from TOC. goetz@6458: address MacroAssembler::call_c_using_toc(const FunctionDescriptor* fd, goetz@6458: relocInfo::relocType rt, Register toc) { goetz@6458: if (!ReoptimizeCallSequences goetz@6458: || (rt != relocInfo::runtime_call_type && rt != relocInfo::none) goetz@6458: || !fd->is_friend_function()) { goetz@6458: // It's not a friend function as defined by class FunctionDescriptor, goetz@6458: // so do a full call-c here. goetz@6458: assert(fd->entry() != NULL, "function must be linked"); goetz@6458: goetz@6458: AddressLiteral fd_entry(fd->entry()); goetz@6458: load_const_from_method_toc(R11, fd_entry, toc); goetz@6458: mtctr(R11); goetz@6458: if (fd->env() == NULL) { goetz@6458: li(R11, 0); goetz@6458: nop(); goetz@6458: } else { goetz@6458: AddressLiteral fd_env(fd->env()); goetz@6458: load_const_from_method_toc(R11, fd_env, toc); goetz@6458: } goetz@6458: AddressLiteral fd_toc(fd->toc()); goetz@6458: load_toc_from_toc(R2_TOC, fd_toc, toc); goetz@6458: // R2_TOC is killed. goetz@6458: bctrl(); goetz@6458: _last_calls_return_pc = pc(); goetz@6458: } else { goetz@6458: // It's a friend function, load the entry point and don't care about goetz@6458: // toc and env. Use an optimizable call instruction, but ensure the goetz@6458: // same code-size as in the case of a non-friend function. goetz@6458: nop(); goetz@6458: bl64_patchable(fd->entry(), rt); goetz@6458: _last_calls_return_pc = pc(); goetz@6458: } goetz@6458: return _last_calls_return_pc; goetz@6458: } goetz@6515: #endif // ABI_ELFv2 goetz@6458: goetz@6458: void MacroAssembler::call_VM_base(Register oop_result, goetz@6458: Register last_java_sp, goetz@6458: address entry_point, goetz@6458: bool check_exceptions) { goetz@6458: BLOCK_COMMENT("call_VM {"); goetz@6458: // Determine last_java_sp register. goetz@6458: if (!last_java_sp->is_valid()) { goetz@6458: last_java_sp = R1_SP; goetz@6458: } goetz@6458: set_top_ijava_frame_at_SP_as_last_Java_frame(last_java_sp, R11_scratch1); goetz@6458: goetz@6458: // ARG1 must hold thread address. goetz@6458: mr(R3_ARG1, R16_thread); goetz@6511: #if defined(ABI_ELFv2) goetz@6511: address return_pc = call_c(entry_point, relocInfo::none); goetz@6511: #else goetz@6458: address return_pc = call_c((FunctionDescriptor*)entry_point, relocInfo::none); goetz@6511: #endif goetz@6458: goetz@6458: reset_last_Java_frame(); goetz@6458: goetz@6458: // Check for pending exceptions. goetz@6458: if (check_exceptions) { goetz@6458: // We don't check for exceptions here. goetz@6458: ShouldNotReachHere(); goetz@6458: } goetz@6458: goetz@6458: // Get oop result if there is one and reset the value in the thread. goetz@6458: if (oop_result->is_valid()) { goetz@6458: get_vm_result(oop_result); goetz@6458: } goetz@6458: goetz@6458: _last_calls_return_pc = return_pc; goetz@6458: BLOCK_COMMENT("} call_VM"); goetz@6458: } goetz@6458: goetz@6458: void MacroAssembler::call_VM_leaf_base(address entry_point) { goetz@6458: BLOCK_COMMENT("call_VM_leaf {"); goetz@6511: #if defined(ABI_ELFv2) goetz@6511: call_c(entry_point, relocInfo::none); goetz@6511: #else goetz@6458: call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, entry_point), relocInfo::none); goetz@6511: #endif goetz@6458: BLOCK_COMMENT("} call_VM_leaf"); goetz@6458: } goetz@6458: goetz@6458: void MacroAssembler::call_VM(Register oop_result, address entry_point, bool check_exceptions) { goetz@6458: call_VM_base(oop_result, noreg, entry_point, check_exceptions); goetz@6458: } goetz@6458: goetz@6458: void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, goetz@6458: bool check_exceptions) { goetz@6458: // R3_ARG1 is reserved for the thread. goetz@6458: mr_if_needed(R4_ARG2, arg_1); goetz@6458: call_VM(oop_result, entry_point, check_exceptions); goetz@6458: } goetz@6458: goetz@6458: void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, goetz@6458: bool check_exceptions) { goetz@6458: // R3_ARG1 is reserved for the thread goetz@6458: mr_if_needed(R4_ARG2, arg_1); goetz@6458: assert(arg_2 != R4_ARG2, "smashed argument"); goetz@6458: mr_if_needed(R5_ARG3, arg_2); goetz@6458: call_VM(oop_result, entry_point, check_exceptions); goetz@6458: } goetz@6458: goetz@6458: void MacroAssembler::call_VM_leaf(address entry_point) { goetz@6458: call_VM_leaf_base(entry_point); goetz@6458: } goetz@6458: goetz@6458: void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1) { goetz@6458: mr_if_needed(R3_ARG1, arg_1); goetz@6458: call_VM_leaf(entry_point); goetz@6458: } goetz@6458: goetz@6458: void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1, Register arg_2) { goetz@6458: mr_if_needed(R3_ARG1, arg_1); goetz@6458: assert(arg_2 != R3_ARG1, "smashed argument"); goetz@6458: mr_if_needed(R4_ARG2, arg_2); goetz@6458: call_VM_leaf(entry_point); goetz@6458: } goetz@6458: goetz@6458: void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3) { goetz@6458: mr_if_needed(R3_ARG1, arg_1); goetz@6458: assert(arg_2 != R3_ARG1, "smashed argument"); goetz@6458: mr_if_needed(R4_ARG2, arg_2); goetz@6458: assert(arg_3 != R3_ARG1 && arg_3 != R4_ARG2, "smashed argument"); goetz@6458: mr_if_needed(R5_ARG3, arg_3); goetz@6458: call_VM_leaf(entry_point); goetz@6458: } goetz@6458: goetz@6458: // Check whether instruction is a read access to the polling page goetz@6458: // which was emitted by load_from_polling_page(..). goetz@6458: bool MacroAssembler::is_load_from_polling_page(int instruction, void* ucontext, goetz@6458: address* polling_address_ptr) { goetz@6458: if (!is_ld(instruction)) goetz@6458: return false; // It's not a ld. Fail. goetz@6458: goetz@6458: int rt = inv_rt_field(instruction); goetz@6458: int ra = inv_ra_field(instruction); goetz@6458: int ds = inv_ds_field(instruction); goetz@6458: if (!(ds == 0 && ra != 0 && rt == 0)) { goetz@6458: return false; // It's not a ld(r0, X, ra). Fail. goetz@6458: } goetz@6458: goetz@6458: if (!ucontext) { goetz@6458: // Set polling address. goetz@6458: if (polling_address_ptr != NULL) { goetz@6458: *polling_address_ptr = NULL; goetz@6458: } goetz@6458: return true; // No ucontext given. Can't check value of ra. Assume true. goetz@6458: } goetz@6458: goetz@6458: #ifdef LINUX goetz@6458: // Ucontext given. Check that register ra contains the address of goetz@6458: // the safepoing polling page. goetz@6458: ucontext_t* uc = (ucontext_t*) ucontext; goetz@6458: // Set polling address. goetz@6458: address addr = (address)uc->uc_mcontext.regs->gpr[ra] + (ssize_t)ds; goetz@6458: if (polling_address_ptr != NULL) { goetz@6458: *polling_address_ptr = addr; goetz@6458: } goetz@6458: return os::is_poll_address(addr); goetz@6458: #else goetz@6458: // Not on Linux, ucontext must be NULL. goetz@6458: ShouldNotReachHere(); goetz@6458: return false; goetz@6458: #endif goetz@6458: } goetz@6458: goetz@6458: bool MacroAssembler::is_memory_serialization(int instruction, JavaThread* thread, void* ucontext) { goetz@6458: #ifdef LINUX goetz@6458: ucontext_t* uc = (ucontext_t*) ucontext; goetz@6458: goetz@6458: if (is_stwx(instruction) || is_stwux(instruction)) { goetz@6458: int ra = inv_ra_field(instruction); goetz@6458: int rb = inv_rb_field(instruction); goetz@6458: goetz@6458: // look up content of ra and rb in ucontext goetz@6458: address ra_val=(address)uc->uc_mcontext.regs->gpr[ra]; goetz@6458: long rb_val=(long)uc->uc_mcontext.regs->gpr[rb]; goetz@6458: return os::is_memory_serialize_page(thread, ra_val+rb_val); goetz@6458: } else if (is_stw(instruction) || is_stwu(instruction)) { goetz@6458: int ra = inv_ra_field(instruction); goetz@6458: int d1 = inv_d1_field(instruction); goetz@6458: goetz@6458: // look up content of ra in ucontext goetz@6458: address ra_val=(address)uc->uc_mcontext.regs->gpr[ra]; goetz@6458: return os::is_memory_serialize_page(thread, ra_val+d1); goetz@6458: } else { goetz@6458: return false; goetz@6458: } goetz@6458: #else goetz@6458: // workaround not needed on !LINUX :-) goetz@6458: ShouldNotCallThis(); goetz@6458: return false; goetz@6458: #endif goetz@6458: } goetz@6458: goetz@6458: void MacroAssembler::bang_stack_with_offset(int offset) { goetz@6458: // When increasing the stack, the old stack pointer will be written goetz@6458: // to the new top of stack according to the PPC64 abi. goetz@6458: // Therefore, stack banging is not necessary when increasing goetz@6458: // the stack by <= os::vm_page_size() bytes. goetz@6458: // When increasing the stack by a larger amount, this method is goetz@6458: // called repeatedly to bang the intermediate pages. goetz@6458: goetz@6458: // Stack grows down, caller passes positive offset. goetz@6458: assert(offset > 0, "must bang with positive offset"); goetz@6458: goetz@6458: long stdoffset = -offset; goetz@6458: goetz@6458: if (is_simm(stdoffset, 16)) { goetz@6458: // Signed 16 bit offset, a simple std is ok. goetz@6458: if (UseLoadInstructionsForStackBangingPPC64) { goetz@6458: ld(R0, (int)(signed short)stdoffset, R1_SP); goetz@6458: } else { goetz@6458: std(R0,(int)(signed short)stdoffset, R1_SP); goetz@6458: } goetz@6458: } else if (is_simm(stdoffset, 31)) { goetz@6458: const int hi = MacroAssembler::largeoffset_si16_si16_hi(stdoffset); goetz@6458: const int lo = MacroAssembler::largeoffset_si16_si16_lo(stdoffset); goetz@6458: goetz@6458: Register tmp = R11; goetz@6458: addis(tmp, R1_SP, hi); goetz@6458: if (UseLoadInstructionsForStackBangingPPC64) { goetz@6458: ld(R0, lo, tmp); goetz@6458: } else { goetz@6458: std(R0, lo, tmp); goetz@6458: } goetz@6458: } else { goetz@6458: ShouldNotReachHere(); goetz@6458: } goetz@6458: } goetz@6458: goetz@6458: // If instruction is a stack bang of the form goetz@6458: // std R0, x(Ry), (see bang_stack_with_offset()) goetz@6458: // stdu R1_SP, x(R1_SP), (see push_frame(), resize_frame()) goetz@6458: // or stdux R1_SP, Rx, R1_SP (see push_frame(), resize_frame()) goetz@6458: // return the banged address. Otherwise, return 0. goetz@6458: address MacroAssembler::get_stack_bang_address(int instruction, void *ucontext) { goetz@6458: #ifdef LINUX goetz@6458: ucontext_t* uc = (ucontext_t*) ucontext; goetz@6458: int rs = inv_rs_field(instruction); goetz@6458: int ra = inv_ra_field(instruction); goetz@6458: if ( (is_ld(instruction) && rs == 0 && UseLoadInstructionsForStackBangingPPC64) goetz@6458: || (is_std(instruction) && rs == 0 && !UseLoadInstructionsForStackBangingPPC64) goetz@6458: || (is_stdu(instruction) && rs == 1)) { goetz@6458: int ds = inv_ds_field(instruction); goetz@6458: // return banged address goetz@6458: return ds+(address)uc->uc_mcontext.regs->gpr[ra]; goetz@6458: } else if (is_stdux(instruction) && rs == 1) { goetz@6458: int rb = inv_rb_field(instruction); goetz@6458: address sp = (address)uc->uc_mcontext.regs->gpr[1]; goetz@6458: long rb_val = (long)uc->uc_mcontext.regs->gpr[rb]; goetz@6458: return ra != 1 || rb_val >= 0 ? NULL // not a stack bang goetz@6458: : sp + rb_val; // banged address goetz@6458: } goetz@6458: return NULL; // not a stack bang goetz@6458: #else goetz@6458: // workaround not needed on !LINUX :-) goetz@6458: ShouldNotCallThis(); goetz@6458: return NULL; goetz@6458: #endif goetz@6458: } goetz@6458: goetz@6458: // CmpxchgX sets condition register to cmpX(current, compare). goetz@6458: void MacroAssembler::cmpxchgw(ConditionRegister flag, Register dest_current_value, goetz@6458: Register compare_value, Register exchange_value, goetz@6458: Register addr_base, int semantics, bool cmpxchgx_hint, goetz@6458: Register int_flag_success, bool contention_hint) { goetz@6458: Label retry; goetz@6458: Label failed; goetz@6458: Label done; goetz@6458: goetz@6458: // Save one branch if result is returned via register and goetz@6458: // result register is different from the other ones. goetz@6458: bool use_result_reg = (int_flag_success != noreg); goetz@6458: bool preset_result_reg = (int_flag_success != dest_current_value && int_flag_success != compare_value && goetz@6458: int_flag_success != exchange_value && int_flag_success != addr_base); goetz@6458: goetz@6458: // release/fence semantics goetz@6458: if (semantics & MemBarRel) { goetz@6458: release(); goetz@6458: } goetz@6458: goetz@6458: if (use_result_reg && preset_result_reg) { goetz@6458: li(int_flag_success, 0); // preset (assume cas failed) goetz@6458: } goetz@6458: goetz@6458: // Add simple guard in order to reduce risk of starving under high contention (recommended by IBM). goetz@6458: if (contention_hint) { // Don't try to reserve if cmp fails. goetz@6458: lwz(dest_current_value, 0, addr_base); goetz@6458: cmpw(flag, dest_current_value, compare_value); goetz@6458: bne(flag, failed); goetz@6458: } goetz@6458: goetz@6458: // atomic emulation loop goetz@6458: bind(retry); goetz@6458: goetz@6458: lwarx(dest_current_value, addr_base, cmpxchgx_hint); goetz@6458: cmpw(flag, dest_current_value, compare_value); goetz@6458: if (UseStaticBranchPredictionInCompareAndSwapPPC64) { goetz@6458: bne_predict_not_taken(flag, failed); goetz@6458: } else { goetz@6458: bne( flag, failed); goetz@6458: } goetz@6458: // branch to done => (flag == ne), (dest_current_value != compare_value) goetz@6458: // fall through => (flag == eq), (dest_current_value == compare_value) goetz@6458: goetz@6458: stwcx_(exchange_value, addr_base); goetz@6458: if (UseStaticBranchPredictionInCompareAndSwapPPC64) { goetz@6458: bne_predict_not_taken(CCR0, retry); // StXcx_ sets CCR0. goetz@6458: } else { goetz@6458: bne( CCR0, retry); // StXcx_ sets CCR0. goetz@6458: } goetz@6458: // fall through => (flag == eq), (dest_current_value == compare_value), (swapped) goetz@6458: goetz@6458: // Result in register (must do this at the end because int_flag_success can be the goetz@6458: // same register as one above). goetz@6458: if (use_result_reg) { goetz@6458: li(int_flag_success, 1); goetz@6458: } goetz@6458: goetz@6458: if (semantics & MemBarFenceAfter) { goetz@6458: fence(); goetz@6458: } else if (semantics & MemBarAcq) { goetz@6458: isync(); goetz@6458: } goetz@6458: goetz@6458: if (use_result_reg && !preset_result_reg) { goetz@6458: b(done); goetz@6458: } goetz@6458: goetz@6458: bind(failed); goetz@6458: if (use_result_reg && !preset_result_reg) { goetz@6458: li(int_flag_success, 0); goetz@6458: } goetz@6458: goetz@6458: bind(done); goetz@6458: // (flag == ne) => (dest_current_value != compare_value), (!swapped) goetz@6458: // (flag == eq) => (dest_current_value == compare_value), ( swapped) goetz@6458: } goetz@6458: goetz@6458: // Preforms atomic compare exchange: goetz@6458: // if (compare_value == *addr_base) goetz@6458: // *addr_base = exchange_value goetz@6458: // int_flag_success = 1; goetz@6458: // else goetz@6458: // int_flag_success = 0; goetz@6458: // goetz@6458: // ConditionRegister flag = cmp(compare_value, *addr_base) goetz@6458: // Register dest_current_value = *addr_base goetz@6458: // Register compare_value Used to compare with value in memory goetz@6458: // Register exchange_value Written to memory if compare_value == *addr_base goetz@6458: // Register addr_base The memory location to compareXChange goetz@6458: // Register int_flag_success Set to 1 if exchange_value was written to *addr_base goetz@6458: // goetz@6458: // To avoid the costly compare exchange the value is tested beforehand. goetz@6458: // Several special cases exist to avoid that unnecessary information is generated. goetz@6458: // goetz@6458: void MacroAssembler::cmpxchgd(ConditionRegister flag, goetz@6458: Register dest_current_value, Register compare_value, Register exchange_value, goetz@6458: Register addr_base, int semantics, bool cmpxchgx_hint, goetz@6458: Register int_flag_success, Label* failed_ext, bool contention_hint) { goetz@6458: Label retry; goetz@6458: Label failed_int; goetz@6458: Label& failed = (failed_ext != NULL) ? *failed_ext : failed_int; goetz@6458: Label done; goetz@6458: goetz@6458: // Save one branch if result is returned via register and result register is different from the other ones. goetz@6458: bool use_result_reg = (int_flag_success!=noreg); goetz@6458: bool preset_result_reg = (int_flag_success!=dest_current_value && int_flag_success!=compare_value && goetz@6458: int_flag_success!=exchange_value && int_flag_success!=addr_base); goetz@6458: assert(int_flag_success == noreg || failed_ext == NULL, "cannot have both"); goetz@6458: goetz@6458: // release/fence semantics goetz@6458: if (semantics & MemBarRel) { goetz@6458: release(); goetz@6458: } goetz@6458: goetz@6458: if (use_result_reg && preset_result_reg) { goetz@6458: li(int_flag_success, 0); // preset (assume cas failed) goetz@6458: } goetz@6458: goetz@6458: // Add simple guard in order to reduce risk of starving under high contention (recommended by IBM). goetz@6458: if (contention_hint) { // Don't try to reserve if cmp fails. goetz@6458: ld(dest_current_value, 0, addr_base); goetz@6458: cmpd(flag, dest_current_value, compare_value); goetz@6458: bne(flag, failed); goetz@6458: } goetz@6458: goetz@6458: // atomic emulation loop goetz@6458: bind(retry); goetz@6458: goetz@6458: ldarx(dest_current_value, addr_base, cmpxchgx_hint); goetz@6458: cmpd(flag, dest_current_value, compare_value); goetz@6458: if (UseStaticBranchPredictionInCompareAndSwapPPC64) { goetz@6458: bne_predict_not_taken(flag, failed); goetz@6458: } else { goetz@6458: bne( flag, failed); goetz@6458: } goetz@6458: goetz@6458: stdcx_(exchange_value, addr_base); goetz@6458: if (UseStaticBranchPredictionInCompareAndSwapPPC64) { goetz@6458: bne_predict_not_taken(CCR0, retry); // stXcx_ sets CCR0 goetz@6458: } else { goetz@6458: bne( CCR0, retry); // stXcx_ sets CCR0 goetz@6458: } goetz@6458: goetz@6458: // result in register (must do this at the end because int_flag_success can be the same register as one above) goetz@6458: if (use_result_reg) { goetz@6458: li(int_flag_success, 1); goetz@6458: } goetz@6458: goetz@6458: // POWER6 doesn't need isync in CAS. goetz@6458: // Always emit isync to be on the safe side. goetz@6458: if (semantics & MemBarFenceAfter) { goetz@6458: fence(); goetz@6458: } else if (semantics & MemBarAcq) { goetz@6458: isync(); goetz@6458: } goetz@6458: goetz@6458: if (use_result_reg && !preset_result_reg) { goetz@6458: b(done); goetz@6458: } goetz@6458: goetz@6458: bind(failed_int); goetz@6458: if (use_result_reg && !preset_result_reg) { goetz@6458: li(int_flag_success, 0); goetz@6458: } goetz@6458: goetz@6458: bind(done); goetz@6458: // (flag == ne) => (dest_current_value != compare_value), (!swapped) goetz@6458: // (flag == eq) => (dest_current_value == compare_value), ( swapped) goetz@6458: } goetz@6458: goetz@6458: // Look up the method for a megamorphic invokeinterface call. goetz@6458: // The target method is determined by . goetz@6458: // The receiver klass is in recv_klass. goetz@6458: // On success, the result will be in method_result, and execution falls through. goetz@6458: // On failure, execution transfers to the given label. goetz@6458: void MacroAssembler::lookup_interface_method(Register recv_klass, goetz@6458: Register intf_klass, goetz@6458: RegisterOrConstant itable_index, goetz@6458: Register method_result, goetz@6458: Register scan_temp, goetz@6458: Register sethi_temp, goetz@6458: Label& L_no_such_interface) { goetz@6458: assert_different_registers(recv_klass, intf_klass, method_result, scan_temp); goetz@6458: assert(itable_index.is_constant() || itable_index.as_register() == method_result, goetz@6458: "caller must use same register for non-constant itable index as for method"); goetz@6458: goetz@6458: // Compute start of first itableOffsetEntry (which is at the end of the vtable). goetz@6458: int vtable_base = InstanceKlass::vtable_start_offset() * wordSize; goetz@6458: int itentry_off = itableMethodEntry::method_offset_in_bytes(); goetz@6458: int logMEsize = exact_log2(itableMethodEntry::size() * wordSize); goetz@6458: int scan_step = itableOffsetEntry::size() * wordSize; goetz@6458: int log_vte_size= exact_log2(vtableEntry::size() * wordSize); goetz@6458: goetz@6458: lwz(scan_temp, InstanceKlass::vtable_length_offset() * wordSize, recv_klass); goetz@6458: // %%% We should store the aligned, prescaled offset in the klassoop. goetz@6458: // Then the next several instructions would fold away. goetz@6458: goetz@6458: sldi(scan_temp, scan_temp, log_vte_size); goetz@6458: addi(scan_temp, scan_temp, vtable_base); goetz@6458: add(scan_temp, recv_klass, scan_temp); goetz@6458: goetz@6458: // Adjust recv_klass by scaled itable_index, so we can free itable_index. goetz@6458: if (itable_index.is_register()) { goetz@6458: Register itable_offset = itable_index.as_register(); goetz@6458: sldi(itable_offset, itable_offset, logMEsize); goetz@6458: if (itentry_off) addi(itable_offset, itable_offset, itentry_off); goetz@6458: add(recv_klass, itable_offset, recv_klass); goetz@6458: } else { goetz@6458: long itable_offset = (long)itable_index.as_constant(); goetz@6458: load_const_optimized(sethi_temp, (itable_offset<itable(); scan->interface() != NULL; scan += scan_step) { goetz@6458: // if (scan->interface() == intf) { goetz@6458: // result = (klass + scan->offset() + itable_index); goetz@6458: // } goetz@6458: // } goetz@6458: Label search, found_method; goetz@6458: goetz@6458: for (int peel = 1; peel >= 0; peel--) { goetz@6458: // %%%% Could load both offset and interface in one ldx, if they were goetz@6458: // in the opposite order. This would save a load. goetz@6458: ld(method_result, itableOffsetEntry::interface_offset_in_bytes(), scan_temp); goetz@6458: goetz@6458: // Check that this entry is non-null. A null entry means that goetz@6458: // the receiver class doesn't implement the interface, and wasn't the goetz@6458: // same as when the caller was compiled. goetz@6458: cmpd(CCR0, method_result, intf_klass); goetz@6458: goetz@6458: if (peel) { goetz@6458: beq(CCR0, found_method); goetz@6458: } else { goetz@6458: bne(CCR0, search); goetz@6458: // (invert the test to fall through to found_method...) goetz@6458: } goetz@6458: goetz@6458: if (!peel) break; goetz@6458: goetz@6458: bind(search); goetz@6458: goetz@6458: cmpdi(CCR0, method_result, 0); goetz@6458: beq(CCR0, L_no_such_interface); goetz@6458: addi(scan_temp, scan_temp, scan_step); goetz@6458: } goetz@6458: goetz@6458: bind(found_method); goetz@6458: goetz@6458: // Got a hit. goetz@6458: int ito_offset = itableOffsetEntry::offset_offset_in_bytes(); goetz@6458: lwz(scan_temp, ito_offset, scan_temp); goetz@6458: ldx(method_result, scan_temp, recv_klass); goetz@6458: } goetz@6458: goetz@6458: // virtual method calling goetz@6458: void MacroAssembler::lookup_virtual_method(Register recv_klass, goetz@6458: RegisterOrConstant vtable_index, goetz@6458: Register method_result) { goetz@6458: goetz@6458: assert_different_registers(recv_klass, method_result, vtable_index.register_or_noreg()); goetz@6458: goetz@6458: const int base = InstanceKlass::vtable_start_offset() * wordSize; goetz@6458: assert(vtableEntry::size() * wordSize == wordSize, "adjust the scaling in the code below"); goetz@6458: goetz@6458: if (vtable_index.is_register()) { goetz@6458: sldi(vtable_index.as_register(), vtable_index.as_register(), LogBytesPerWord); goetz@6458: add(recv_klass, vtable_index.as_register(), recv_klass); goetz@6458: } else { goetz@6458: addi(recv_klass, recv_klass, vtable_index.as_constant() << LogBytesPerWord); goetz@6458: } goetz@6458: ld(R19_method, base + vtableEntry::method_offset_in_bytes(), recv_klass); goetz@6458: } goetz@6458: goetz@6458: /////////////////////////////////////////// subtype checking //////////////////////////////////////////// goetz@6458: goetz@6458: void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass, goetz@6458: Register super_klass, goetz@6458: Register temp1_reg, goetz@6458: Register temp2_reg, goetz@6458: Label& L_success, goetz@6458: Label& L_failure) { goetz@6458: goetz@6458: const Register check_cache_offset = temp1_reg; goetz@6458: const Register cached_super = temp2_reg; goetz@6458: goetz@6458: assert_different_registers(sub_klass, super_klass, check_cache_offset, cached_super); goetz@6458: goetz@6458: int sco_offset = in_bytes(Klass::super_check_offset_offset()); goetz@6458: int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); goetz@6458: goetz@6458: // If the pointers are equal, we are done (e.g., String[] elements). goetz@6458: // This self-check enables sharing of secondary supertype arrays among goetz@6458: // non-primary types such as array-of-interface. Otherwise, each such goetz@6458: // type would need its own customized SSA. goetz@6458: // We move this check to the front of the fast path because many goetz@6458: // type checks are in fact trivially successful in this manner, goetz@6458: // so we get a nicely predicted branch right at the start of the check. goetz@6458: cmpd(CCR0, sub_klass, super_klass); goetz@6458: beq(CCR0, L_success); goetz@6458: goetz@6458: // Check the supertype display: goetz@6458: lwz(check_cache_offset, sco_offset, super_klass); goetz@6458: // The loaded value is the offset from KlassOopDesc. goetz@6458: goetz@6458: ldx(cached_super, check_cache_offset, sub_klass); goetz@6458: cmpd(CCR0, cached_super, super_klass); goetz@6458: beq(CCR0, L_success); goetz@6458: goetz@6458: // This check has worked decisively for primary supers. goetz@6458: // Secondary supers are sought in the super_cache ('super_cache_addr'). goetz@6458: // (Secondary supers are interfaces and very deeply nested subtypes.) goetz@6458: // This works in the same check above because of a tricky aliasing goetz@6458: // between the super_cache and the primary super display elements. goetz@6458: // (The 'super_check_addr' can address either, as the case requires.) goetz@6458: // Note that the cache is updated below if it does not help us find goetz@6458: // what we need immediately. goetz@6458: // So if it was a primary super, we can just fail immediately. goetz@6458: // Otherwise, it's the slow path for us (no success at this point). goetz@6458: goetz@6458: cmpwi(CCR0, check_cache_offset, sc_offset); goetz@6458: bne(CCR0, L_failure); goetz@6458: // bind(slow_path); // fallthru goetz@6458: } goetz@6458: goetz@6458: void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass, goetz@6458: Register super_klass, goetz@6458: Register temp1_reg, goetz@6458: Register temp2_reg, goetz@6458: Label* L_success, goetz@6458: Register result_reg) { goetz@6458: const Register array_ptr = temp1_reg; // current value from cache array goetz@6458: const Register temp = temp2_reg; goetz@6458: goetz@6458: assert_different_registers(sub_klass, super_klass, array_ptr, temp); goetz@6458: goetz@6458: int source_offset = in_bytes(Klass::secondary_supers_offset()); goetz@6458: int target_offset = in_bytes(Klass::secondary_super_cache_offset()); goetz@6458: goetz@6458: int length_offset = Array::length_offset_in_bytes(); goetz@6458: int base_offset = Array::base_offset_in_bytes(); goetz@6458: goetz@6458: Label hit, loop, failure, fallthru; goetz@6458: goetz@6458: ld(array_ptr, source_offset, sub_klass); goetz@6458: goetz@6458: //assert(4 == arrayOopDesc::length_length_in_bytes(), "precondition violated."); goetz@6458: lwz(temp, length_offset, array_ptr); goetz@6458: cmpwi(CCR0, temp, 0); goetz@6458: beq(CCR0, result_reg!=noreg ? failure : fallthru); // length 0 goetz@6458: goetz@6458: mtctr(temp); // load ctr goetz@6458: goetz@6458: bind(loop); goetz@6458: // Oops in table are NO MORE compressed. goetz@6458: ld(temp, base_offset, array_ptr); goetz@6458: cmpd(CCR0, temp, super_klass); goetz@6458: beq(CCR0, hit); goetz@6458: addi(array_ptr, array_ptr, BytesPerWord); goetz@6458: bdnz(loop); goetz@6458: goetz@6458: bind(failure); goetz@6458: if (result_reg!=noreg) li(result_reg, 1); // load non-zero result (indicates a miss) goetz@6458: b(fallthru); goetz@6458: goetz@6458: bind(hit); goetz@6458: std(super_klass, target_offset, sub_klass); // save result to cache goetz@6458: if (result_reg != noreg) li(result_reg, 0); // load zero result (indicates a hit) goetz@6458: if (L_success != NULL) b(*L_success); goetz@6458: goetz@6458: bind(fallthru); goetz@6458: } goetz@6458: goetz@6458: // Try fast path, then go to slow one if not successful goetz@6458: void MacroAssembler::check_klass_subtype(Register sub_klass, goetz@6458: Register super_klass, goetz@6458: Register temp1_reg, goetz@6458: Register temp2_reg, goetz@6458: Label& L_success) { goetz@6458: Label L_failure; goetz@6458: check_klass_subtype_fast_path(sub_klass, super_klass, temp1_reg, temp2_reg, L_success, L_failure); goetz@6458: check_klass_subtype_slow_path(sub_klass, super_klass, temp1_reg, temp2_reg, &L_success); goetz@6458: bind(L_failure); // Fallthru if not successful. goetz@6458: } goetz@6458: goetz@6458: void MacroAssembler::check_method_handle_type(Register mtype_reg, Register mh_reg, goetz@6458: Register temp_reg, goetz@6458: Label& wrong_method_type) { goetz@6458: assert_different_registers(mtype_reg, mh_reg, temp_reg); goetz@6458: // Compare method type against that of the receiver. goetz@6458: load_heap_oop_not_null(temp_reg, delayed_value(java_lang_invoke_MethodHandle::type_offset_in_bytes, temp_reg), mh_reg); goetz@6458: cmpd(CCR0, temp_reg, mtype_reg); goetz@6458: bne(CCR0, wrong_method_type); goetz@6458: } goetz@6458: goetz@6458: RegisterOrConstant MacroAssembler::argument_offset(RegisterOrConstant arg_slot, goetz@6458: Register temp_reg, goetz@6458: int extra_slot_offset) { goetz@6458: // cf. TemplateTable::prepare_invoke(), if (load_receiver). goetz@6458: int stackElementSize = Interpreter::stackElementSize; goetz@6458: int offset = extra_slot_offset * stackElementSize; goetz@6458: if (arg_slot.is_constant()) { goetz@6458: offset += arg_slot.as_constant() * stackElementSize; goetz@6458: return offset; goetz@6458: } else { goetz@6458: assert(temp_reg != noreg, "must specify"); goetz@6458: sldi(temp_reg, arg_slot.as_register(), exact_log2(stackElementSize)); goetz@6458: if (offset != 0) goetz@6458: addi(temp_reg, temp_reg, offset); goetz@6458: return temp_reg; goetz@6458: } goetz@6458: } goetz@6458: goetz@6458: void MacroAssembler::biased_locking_enter(ConditionRegister cr_reg, Register obj_reg, goetz@6458: Register mark_reg, Register temp_reg, goetz@6458: Register temp2_reg, Label& done, Label* slow_case) { goetz@6458: assert(UseBiasedLocking, "why call this otherwise?"); goetz@6458: goetz@6458: #ifdef ASSERT goetz@6458: assert_different_registers(obj_reg, mark_reg, temp_reg, temp2_reg); goetz@6458: #endif goetz@6458: goetz@6458: Label cas_label; goetz@6458: goetz@6458: // Branch to done if fast path fails and no slow_case provided. goetz@6458: Label *slow_case_int = (slow_case != NULL) ? slow_case : &done; goetz@6458: goetz@6458: // Biased locking goetz@6458: // See whether the lock is currently biased toward our thread and goetz@6458: // whether the epoch is still valid goetz@6458: // Note that the runtime guarantees sufficient alignment of JavaThread goetz@6458: // pointers to allow age to be placed into low bits goetz@6458: assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, goetz@6458: "biased locking makes assumptions about bit layout"); goetz@6458: goetz@6458: if (PrintBiasedLockingStatistics) { goetz@6458: load_const(temp_reg, (address) BiasedLocking::total_entry_count_addr(), temp2_reg); goetz@6458: lwz(temp2_reg, 0, temp_reg); goetz@6458: addi(temp2_reg, temp2_reg, 1); goetz@6458: stw(temp2_reg, 0, temp_reg); goetz@6458: } goetz@6458: goetz@6458: andi(temp_reg, mark_reg, markOopDesc::biased_lock_mask_in_place); goetz@6458: cmpwi(cr_reg, temp_reg, markOopDesc::biased_lock_pattern); goetz@6458: bne(cr_reg, cas_label); goetz@6458: goetz@6515: load_klass(temp_reg, obj_reg); goetz@6458: goetz@6458: load_const_optimized(temp2_reg, ~((int) markOopDesc::age_mask_in_place)); goetz@6458: ld(temp_reg, in_bytes(Klass::prototype_header_offset()), temp_reg); goetz@6458: orr(temp_reg, R16_thread, temp_reg); goetz@6458: xorr(temp_reg, mark_reg, temp_reg); goetz@6458: andr(temp_reg, temp_reg, temp2_reg); goetz@6458: cmpdi(cr_reg, temp_reg, 0); goetz@6458: if (PrintBiasedLockingStatistics) { goetz@6458: Label l; goetz@6458: bne(cr_reg, l); goetz@6458: load_const(mark_reg, (address) BiasedLocking::biased_lock_entry_count_addr()); goetz@6458: lwz(temp2_reg, 0, mark_reg); goetz@6458: addi(temp2_reg, temp2_reg, 1); goetz@6458: stw(temp2_reg, 0, mark_reg); goetz@6458: // restore mark_reg goetz@6458: ld(mark_reg, oopDesc::mark_offset_in_bytes(), obj_reg); goetz@6458: bind(l); goetz@6458: } goetz@6458: beq(cr_reg, done); goetz@6458: goetz@6458: Label try_revoke_bias; goetz@6458: Label try_rebias; goetz@6458: goetz@6458: // At this point we know that the header has the bias pattern and goetz@6458: // that we are not the bias owner in the current epoch. We need to goetz@6458: // figure out more details about the state of the header in order to goetz@6458: // know what operations can be legally performed on the object's goetz@6458: // header. goetz@6458: goetz@6458: // If the low three bits in the xor result aren't clear, that means goetz@6458: // the prototype header is no longer biased and we have to revoke goetz@6458: // the bias on this object. goetz@6458: andi(temp2_reg, temp_reg, markOopDesc::biased_lock_mask_in_place); goetz@6458: cmpwi(cr_reg, temp2_reg, 0); goetz@6458: bne(cr_reg, try_revoke_bias); goetz@6458: goetz@6458: // Biasing is still enabled for this data type. See whether the goetz@6458: // epoch of the current bias is still valid, meaning that the epoch goetz@6458: // bits of the mark word are equal to the epoch bits of the goetz@6458: // prototype header. (Note that the prototype header's epoch bits goetz@6458: // only change at a safepoint.) If not, attempt to rebias the object goetz@6458: // toward the current thread. Note that we must be absolutely sure goetz@6458: // that the current epoch is invalid in order to do this because goetz@6458: // otherwise the manipulations it performs on the mark word are goetz@6458: // illegal. goetz@6458: goetz@6458: int shift_amount = 64 - markOopDesc::epoch_shift; goetz@6458: // rotate epoch bits to right (little) end and set other bits to 0 goetz@6458: // [ big part | epoch | little part ] -> [ 0..0 | epoch ] goetz@6458: rldicl_(temp2_reg, temp_reg, shift_amount, 64 - markOopDesc::epoch_bits); goetz@6458: // branch if epoch bits are != 0, i.e. they differ, because the epoch has been incremented goetz@6458: bne(CCR0, try_rebias); goetz@6458: goetz@6458: // The epoch of the current bias is still valid but we know nothing goetz@6458: // about the owner; it might be set or it might be clear. Try to goetz@6458: // acquire the bias of the object using an atomic operation. If this goetz@6458: // fails we will go in to the runtime to revoke the object's bias. goetz@6458: // Note that we first construct the presumed unbiased header so we goetz@6458: // don't accidentally blow away another thread's valid bias. goetz@6458: andi(mark_reg, mark_reg, (markOopDesc::biased_lock_mask_in_place | goetz@6458: markOopDesc::age_mask_in_place | goetz@6458: markOopDesc::epoch_mask_in_place)); goetz@6458: orr(temp_reg, R16_thread, mark_reg); goetz@6458: goetz@6458: assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0"); goetz@6458: goetz@6458: // CmpxchgX sets cr_reg to cmpX(temp2_reg, mark_reg). goetz@6458: fence(); // TODO: replace by MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq ? goetz@6458: cmpxchgd(/*flag=*/cr_reg, /*current_value=*/temp2_reg, goetz@6458: /*compare_value=*/mark_reg, /*exchange_value=*/temp_reg, goetz@6458: /*where=*/obj_reg, goetz@6458: MacroAssembler::MemBarAcq, goetz@6458: MacroAssembler::cmpxchgx_hint_acquire_lock(), goetz@6458: noreg, slow_case_int); // bail out if failed goetz@6458: goetz@6458: // If the biasing toward our thread failed, this means that goetz@6458: // another thread succeeded in biasing it toward itself and we goetz@6458: // need to revoke that bias. The revocation will occur in the goetz@6458: // interpreter runtime in the slow case. goetz@6458: if (PrintBiasedLockingStatistics) { goetz@6458: load_const(temp_reg, (address) BiasedLocking::anonymously_biased_lock_entry_count_addr(), temp2_reg); goetz@6458: lwz(temp2_reg, 0, temp_reg); goetz@6458: addi(temp2_reg, temp2_reg, 1); goetz@6458: stw(temp2_reg, 0, temp_reg); goetz@6458: } goetz@6458: b(done); goetz@6458: goetz@6458: bind(try_rebias); goetz@6458: // At this point we know the epoch has expired, meaning that the goetz@6458: // current "bias owner", if any, is actually invalid. Under these goetz@6458: // circumstances _only_, we are allowed to use the current header's goetz@6458: // value as the comparison value when doing the cas to acquire the goetz@6458: // bias in the current epoch. In other words, we allow transfer of goetz@6458: // the bias from one thread to another directly in this situation. goetz@6458: andi(temp_reg, mark_reg, markOopDesc::age_mask_in_place); goetz@6458: orr(temp_reg, R16_thread, temp_reg); goetz@6515: load_klass(temp2_reg, obj_reg); goetz@6458: ld(temp2_reg, in_bytes(Klass::prototype_header_offset()), temp2_reg); goetz@6458: orr(temp_reg, temp_reg, temp2_reg); goetz@6458: goetz@6458: assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0"); goetz@6458: goetz@6458: // CmpxchgX sets cr_reg to cmpX(temp2_reg, mark_reg). goetz@6458: fence(); // TODO: replace by MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq ? goetz@6458: cmpxchgd(/*flag=*/cr_reg, /*current_value=*/temp2_reg, goetz@6458: /*compare_value=*/mark_reg, /*exchange_value=*/temp_reg, goetz@6458: /*where=*/obj_reg, goetz@6458: MacroAssembler::MemBarAcq, goetz@6458: MacroAssembler::cmpxchgx_hint_acquire_lock(), goetz@6458: noreg, slow_case_int); // bail out if failed goetz@6458: goetz@6458: // If the biasing toward our thread failed, this means that goetz@6458: // another thread succeeded in biasing it toward itself and we goetz@6458: // need to revoke that bias. The revocation will occur in the goetz@6458: // interpreter runtime in the slow case. goetz@6458: if (PrintBiasedLockingStatistics) { goetz@6458: load_const(temp_reg, (address) BiasedLocking::rebiased_lock_entry_count_addr(), temp2_reg); goetz@6458: lwz(temp2_reg, 0, temp_reg); goetz@6458: addi(temp2_reg, temp2_reg, 1); goetz@6458: stw(temp2_reg, 0, temp_reg); goetz@6458: } goetz@6458: b(done); goetz@6458: goetz@6458: bind(try_revoke_bias); goetz@6458: // The prototype mark in the klass doesn't have the bias bit set any goetz@6458: // more, indicating that objects of this data type are not supposed goetz@6458: // to be biased any more. We are going to try to reset the mark of goetz@6458: // this object to the prototype value and fall through to the goetz@6458: // CAS-based locking scheme. Note that if our CAS fails, it means goetz@6458: // that another thread raced us for the privilege of revoking the goetz@6458: // bias of this particular object, so it's okay to continue in the goetz@6458: // normal locking code. goetz@6515: load_klass(temp_reg, obj_reg); goetz@6458: ld(temp_reg, in_bytes(Klass::prototype_header_offset()), temp_reg); goetz@6458: andi(temp2_reg, mark_reg, markOopDesc::age_mask_in_place); goetz@6458: orr(temp_reg, temp_reg, temp2_reg); goetz@6458: goetz@6458: assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0"); goetz@6458: goetz@6458: // CmpxchgX sets cr_reg to cmpX(temp2_reg, mark_reg). goetz@6458: fence(); // TODO: replace by MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq ? goetz@6458: cmpxchgd(/*flag=*/cr_reg, /*current_value=*/temp2_reg, goetz@6458: /*compare_value=*/mark_reg, /*exchange_value=*/temp_reg, goetz@6458: /*where=*/obj_reg, goetz@6458: MacroAssembler::MemBarAcq, goetz@6458: MacroAssembler::cmpxchgx_hint_acquire_lock()); goetz@6458: goetz@6458: // reload markOop in mark_reg before continuing with lightweight locking goetz@6458: ld(mark_reg, oopDesc::mark_offset_in_bytes(), obj_reg); goetz@6458: goetz@6458: // Fall through to the normal CAS-based lock, because no matter what goetz@6458: // the result of the above CAS, some thread must have succeeded in goetz@6458: // removing the bias bit from the object's header. goetz@6458: if (PrintBiasedLockingStatistics) { goetz@6458: Label l; goetz@6458: bne(cr_reg, l); goetz@6458: load_const(temp_reg, (address) BiasedLocking::revoked_lock_entry_count_addr(), temp2_reg); goetz@6458: lwz(temp2_reg, 0, temp_reg); goetz@6458: addi(temp2_reg, temp2_reg, 1); goetz@6458: stw(temp2_reg, 0, temp_reg); goetz@6458: bind(l); goetz@6458: } goetz@6458: goetz@6458: bind(cas_label); goetz@6458: } goetz@6458: goetz@6458: void MacroAssembler::biased_locking_exit (ConditionRegister cr_reg, Register mark_addr, Register temp_reg, Label& done) { goetz@6458: // Check for biased locking unlock case, which is a no-op goetz@6458: // Note: we do not have to check the thread ID for two reasons. goetz@6458: // First, the interpreter checks for IllegalMonitorStateException at goetz@6458: // a higher level. Second, if the bias was revoked while we held the goetz@6458: // lock, the object could not be rebiased toward another thread, so goetz@6458: // the bias bit would be clear. goetz@6458: goetz@6458: ld(temp_reg, 0, mark_addr); goetz@6458: andi(temp_reg, temp_reg, markOopDesc::biased_lock_mask_in_place); goetz@6458: goetz@6458: cmpwi(cr_reg, temp_reg, markOopDesc::biased_lock_pattern); goetz@6458: beq(cr_reg, done); goetz@6458: } goetz@6458: goetz@6458: // "The box" is the space on the stack where we copy the object mark. goetz@6458: void MacroAssembler::compiler_fast_lock_object(ConditionRegister flag, Register oop, Register box, goetz@6458: Register temp, Register displaced_header, Register current_header) { goetz@6458: assert_different_registers(oop, box, temp, displaced_header, current_header); goetz@6458: assert(flag != CCR0, "bad condition register"); goetz@6458: Label cont; goetz@6458: Label object_has_monitor; goetz@6458: Label cas_failed; goetz@6458: goetz@6458: // Load markOop from object into displaced_header. goetz@6458: ld(displaced_header, oopDesc::mark_offset_in_bytes(), oop); goetz@6458: goetz@6458: goetz@6458: // Always do locking in runtime. goetz@6458: if (EmitSync & 0x01) { goetz@6458: cmpdi(flag, oop, 0); // Oop can't be 0 here => always false. goetz@6458: return; goetz@6458: } goetz@6458: goetz@6458: if (UseBiasedLocking) { goetz@6458: biased_locking_enter(flag, oop, displaced_header, temp, current_header, cont); goetz@6458: } goetz@6458: goetz@6458: // Handle existing monitor. goetz@6458: if ((EmitSync & 0x02) == 0) { goetz@6458: // The object has an existing monitor iff (mark & monitor_value) != 0. goetz@6458: andi_(temp, displaced_header, markOopDesc::monitor_value); goetz@6458: bne(CCR0, object_has_monitor); goetz@6458: } goetz@6458: goetz@6458: // Set displaced_header to be (markOop of object | UNLOCK_VALUE). goetz@6458: ori(displaced_header, displaced_header, markOopDesc::unlocked_value); goetz@6458: goetz@6458: // Load Compare Value application register. goetz@6458: goetz@6458: // Initialize the box. (Must happen before we update the object mark!) goetz@6458: std(displaced_header, BasicLock::displaced_header_offset_in_bytes(), box); goetz@6458: goetz@6458: // Must fence, otherwise, preceding store(s) may float below cmpxchg. goetz@6458: // Compare object markOop with mark and if equal exchange scratch1 with object markOop. goetz@6458: // CmpxchgX sets cr_reg to cmpX(current, displaced). goetz@6501: membar(Assembler::StoreStore); goetz@6458: cmpxchgd(/*flag=*/flag, goetz@6458: /*current_value=*/current_header, goetz@6458: /*compare_value=*/displaced_header, goetz@6458: /*exchange_value=*/box, goetz@6458: /*where=*/oop, goetz@6501: MacroAssembler::MemBarAcq, goetz@6458: MacroAssembler::cmpxchgx_hint_acquire_lock(), goetz@6458: noreg, goetz@6458: &cas_failed); goetz@6458: assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0"); goetz@6458: goetz@6458: // If the compare-and-exchange succeeded, then we found an unlocked goetz@6458: // object and we have now locked it. goetz@6458: b(cont); goetz@6458: goetz@6458: bind(cas_failed); goetz@6458: // We did not see an unlocked object so try the fast recursive case. goetz@6458: goetz@6458: // Check if the owner is self by comparing the value in the markOop of object goetz@6458: // (current_header) with the stack pointer. goetz@6458: sub(current_header, current_header, R1_SP); goetz@6458: load_const_optimized(temp, (address) (~(os::vm_page_size()-1) | goetz@6458: markOopDesc::lock_mask_in_place)); goetz@6458: goetz@6458: and_(R0/*==0?*/, current_header, temp); goetz@6458: // If condition is true we are cont and hence we can store 0 as the goetz@6458: // displaced header in the box, which indicates that it is a recursive lock. goetz@6458: mcrf(flag,CCR0); goetz@6458: std(R0/*==0, perhaps*/, BasicLock::displaced_header_offset_in_bytes(), box); goetz@6458: goetz@6458: // Handle existing monitor. goetz@6458: if ((EmitSync & 0x02) == 0) { goetz@6458: b(cont); goetz@6458: goetz@6458: bind(object_has_monitor); goetz@6458: // The object's monitor m is unlocked iff m->owner == NULL, goetz@6458: // otherwise m->owner may contain a thread or a stack address. goetz@6458: // goetz@6458: // Try to CAS m->owner from NULL to current thread. goetz@6458: addi(temp, displaced_header, ObjectMonitor::owner_offset_in_bytes()-markOopDesc::monitor_value); goetz@6458: li(displaced_header, 0); goetz@6458: // CmpxchgX sets flag to cmpX(current, displaced). goetz@6458: cmpxchgd(/*flag=*/flag, goetz@6458: /*current_value=*/current_header, goetz@6458: /*compare_value=*/displaced_header, goetz@6458: /*exchange_value=*/R16_thread, goetz@6458: /*where=*/temp, goetz@6458: MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq, goetz@6458: MacroAssembler::cmpxchgx_hint_acquire_lock()); goetz@6458: goetz@6458: // Store a non-null value into the box. goetz@6458: std(box, BasicLock::displaced_header_offset_in_bytes(), box); goetz@6458: goetz@6458: # ifdef ASSERT goetz@6458: bne(flag, cont); goetz@6458: // We have acquired the monitor, check some invariants. goetz@6458: addi(/*monitor=*/temp, temp, -ObjectMonitor::owner_offset_in_bytes()); goetz@6458: // Invariant 1: _recursions should be 0. goetz@6458: //assert(ObjectMonitor::recursions_size_in_bytes() == 8, "unexpected size"); goetz@6458: asm_assert_mem8_is_zero(ObjectMonitor::recursions_offset_in_bytes(), temp, goetz@6458: "monitor->_recursions should be 0", -1); goetz@6458: // Invariant 2: OwnerIsThread shouldn't be 0. goetz@6458: //assert(ObjectMonitor::OwnerIsThread_size_in_bytes() == 4, "unexpected size"); goetz@6458: //asm_assert_mem4_isnot_zero(ObjectMonitor::OwnerIsThread_offset_in_bytes(), temp, goetz@6458: // "monitor->OwnerIsThread shouldn't be 0", -1); goetz@6458: # endif goetz@6458: } goetz@6458: goetz@6458: bind(cont); goetz@6458: // flag == EQ indicates success goetz@6458: // flag == NE indicates failure goetz@6458: } goetz@6458: goetz@6458: void MacroAssembler::compiler_fast_unlock_object(ConditionRegister flag, Register oop, Register box, goetz@6458: Register temp, Register displaced_header, Register current_header) { goetz@6458: assert_different_registers(oop, box, temp, displaced_header, current_header); goetz@6458: assert(flag != CCR0, "bad condition register"); goetz@6458: Label cont; goetz@6458: Label object_has_monitor; goetz@6458: goetz@6458: // Always do locking in runtime. goetz@6458: if (EmitSync & 0x01) { goetz@6458: cmpdi(flag, oop, 0); // Oop can't be 0 here => always false. goetz@6458: return; goetz@6458: } goetz@6458: goetz@6458: if (UseBiasedLocking) { goetz@6458: biased_locking_exit(flag, oop, current_header, cont); goetz@6458: } goetz@6458: goetz@6458: // Find the lock address and load the displaced header from the stack. goetz@6458: ld(displaced_header, BasicLock::displaced_header_offset_in_bytes(), box); goetz@6458: goetz@6458: // If the displaced header is 0, we have a recursive unlock. goetz@6458: cmpdi(flag, displaced_header, 0); goetz@6458: beq(flag, cont); goetz@6458: goetz@6458: // Handle existing monitor. goetz@6458: if ((EmitSync & 0x02) == 0) { goetz@6458: // The object has an existing monitor iff (mark & monitor_value) != 0. goetz@6458: ld(current_header, oopDesc::mark_offset_in_bytes(), oop); goetz@6458: andi(temp, current_header, markOopDesc::monitor_value); goetz@6458: cmpdi(flag, temp, 0); goetz@6458: bne(flag, object_has_monitor); goetz@6458: } goetz@6458: goetz@6458: goetz@6458: // Check if it is still a light weight lock, this is is true if we see goetz@6458: // the stack address of the basicLock in the markOop of the object. goetz@6458: // Cmpxchg sets flag to cmpd(current_header, box). goetz@6458: cmpxchgd(/*flag=*/flag, goetz@6458: /*current_value=*/current_header, goetz@6458: /*compare_value=*/box, goetz@6458: /*exchange_value=*/displaced_header, goetz@6458: /*where=*/oop, goetz@6458: MacroAssembler::MemBarRel, goetz@6458: MacroAssembler::cmpxchgx_hint_release_lock(), goetz@6458: noreg, goetz@6458: &cont); goetz@6458: goetz@6458: assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0"); goetz@6458: goetz@6458: // Handle existing monitor. goetz@6458: if ((EmitSync & 0x02) == 0) { goetz@6458: b(cont); goetz@6458: goetz@6458: bind(object_has_monitor); goetz@6458: addi(current_header, current_header, -markOopDesc::monitor_value); // monitor goetz@6458: ld(temp, ObjectMonitor::owner_offset_in_bytes(), current_header); goetz@6458: ld(displaced_header, ObjectMonitor::recursions_offset_in_bytes(), current_header); goetz@6458: xorr(temp, R16_thread, temp); // Will be 0 if we are the owner. goetz@6458: orr(temp, temp, displaced_header); // Will be 0 if there are 0 recursions. goetz@6458: cmpdi(flag, temp, 0); goetz@6458: bne(flag, cont); goetz@6458: goetz@6458: ld(temp, ObjectMonitor::EntryList_offset_in_bytes(), current_header); goetz@6458: ld(displaced_header, ObjectMonitor::cxq_offset_in_bytes(), current_header); goetz@6458: orr(temp, temp, displaced_header); // Will be 0 if both are 0. goetz@6458: cmpdi(flag, temp, 0); goetz@6458: bne(flag, cont); goetz@6458: release(); goetz@6458: std(temp, ObjectMonitor::owner_offset_in_bytes(), current_header); goetz@6458: } goetz@6458: goetz@6458: bind(cont); goetz@6458: // flag == EQ indicates success goetz@6458: // flag == NE indicates failure goetz@6458: } goetz@6458: goetz@6458: // Write serialization page so VM thread can do a pseudo remote membar. goetz@6458: // We use the current thread pointer to calculate a thread specific goetz@6458: // offset to write to within the page. This minimizes bus traffic goetz@6458: // due to cache line collision. goetz@6458: void MacroAssembler::serialize_memory(Register thread, Register tmp1, Register tmp2) { goetz@6458: srdi(tmp2, thread, os::get_serialize_page_shift_count()); goetz@6458: goetz@6458: int mask = os::vm_page_size() - sizeof(int); goetz@6458: if (Assembler::is_simm(mask, 16)) { goetz@6458: andi(tmp2, tmp2, mask); goetz@6458: } else { goetz@6458: lis(tmp1, (int)((signed short) (mask >> 16))); goetz@6458: ori(tmp1, tmp1, mask & 0x0000ffff); goetz@6458: andr(tmp2, tmp2, tmp1); goetz@6458: } goetz@6458: goetz@6458: load_const(tmp1, (long) os::get_memory_serialize_page()); goetz@6458: release(); goetz@6458: stwx(R0, tmp1, tmp2); goetz@6458: } goetz@6458: goetz@6458: goetz@6458: // GC barrier helper macros goetz@6458: goetz@6458: // Write the card table byte if needed. goetz@6458: void MacroAssembler::card_write_barrier_post(Register Rstore_addr, Register Rnew_val, Register Rtmp) { goetz@6458: CardTableModRefBS* bs = (CardTableModRefBS*) Universe::heap()->barrier_set(); goetz@6458: assert(bs->kind() == BarrierSet::CardTableModRef || goetz@6458: bs->kind() == BarrierSet::CardTableExtension, "wrong barrier"); goetz@6458: #ifdef ASSERT goetz@6458: cmpdi(CCR0, Rnew_val, 0); goetz@6458: asm_assert_ne("null oop not allowed", 0x321); goetz@6458: #endif goetz@6458: card_table_write(bs->byte_map_base, Rtmp, Rstore_addr); goetz@6458: } goetz@6458: goetz@6458: // Write the card table byte. goetz@6458: void MacroAssembler::card_table_write(jbyte* byte_map_base, Register Rtmp, Register Robj) { goetz@6458: assert_different_registers(Robj, Rtmp, R0); goetz@6458: load_const_optimized(Rtmp, (address)byte_map_base, R0); goetz@6458: srdi(Robj, Robj, CardTableModRefBS::card_shift); goetz@6458: li(R0, 0); // dirty goetz@6501: if (UseConcMarkSweepGC) membar(Assembler::StoreStore); goetz@6458: stbx(R0, Rtmp, Robj); goetz@6458: } goetz@6458: goetz@6515: #if INCLUDE_ALL_GCS goetz@6458: // General G1 pre-barrier generator. goetz@6458: // Goal: record the previous value if it is not null. goetz@6458: void MacroAssembler::g1_write_barrier_pre(Register Robj, RegisterOrConstant offset, Register Rpre_val, goetz@6458: Register Rtmp1, Register Rtmp2, bool needs_frame) { goetz@6458: Label runtime, filtered; goetz@6458: goetz@6458: // Is marking active? goetz@6458: if (in_bytes(PtrQueue::byte_width_of_active()) == 4) { goetz@6458: lwz(Rtmp1, in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_active()), R16_thread); goetz@6458: } else { goetz@6458: guarantee(in_bytes(PtrQueue::byte_width_of_active()) == 1, "Assumption"); goetz@6458: lbz(Rtmp1, in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_active()), R16_thread); goetz@6458: } goetz@6458: cmpdi(CCR0, Rtmp1, 0); goetz@6458: beq(CCR0, filtered); goetz@6458: goetz@6458: // Do we need to load the previous value? goetz@6458: if (Robj != noreg) { goetz@6458: // Load the previous value... goetz@6458: if (UseCompressedOops) { goetz@6458: lwz(Rpre_val, offset, Robj); goetz@6458: } else { goetz@6458: ld(Rpre_val, offset, Robj); goetz@6458: } goetz@6458: // Previous value has been loaded into Rpre_val. goetz@6458: } goetz@6458: assert(Rpre_val != noreg, "must have a real register"); goetz@6458: goetz@6458: // Is the previous value null? goetz@6458: cmpdi(CCR0, Rpre_val, 0); goetz@6458: beq(CCR0, filtered); goetz@6458: goetz@6458: if (Robj != noreg && UseCompressedOops) { goetz@6458: decode_heap_oop_not_null(Rpre_val); goetz@6458: } goetz@6458: goetz@6458: // OK, it's not filtered, so we'll need to call enqueue. In the normal goetz@6458: // case, pre_val will be a scratch G-reg, but there are some cases in goetz@6458: // which it's an O-reg. In the first case, do a normal call. In the goetz@6458: // latter, do a save here and call the frameless version. goetz@6458: goetz@6458: // Can we store original value in the thread's buffer? goetz@6458: // Is index == 0? goetz@6458: // (The index field is typed as size_t.) goetz@6458: const Register Rbuffer = Rtmp1, Rindex = Rtmp2; goetz@6458: goetz@6458: ld(Rindex, in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_index()), R16_thread); goetz@6458: cmpdi(CCR0, Rindex, 0); goetz@6458: beq(CCR0, runtime); // If index == 0, goto runtime. goetz@6458: ld(Rbuffer, in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_buf()), R16_thread); goetz@6458: goetz@6458: addi(Rindex, Rindex, -wordSize); // Decrement index. goetz@6458: std(Rindex, in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_index()), R16_thread); goetz@6458: goetz@6458: // Record the previous value. goetz@6458: stdx(Rpre_val, Rbuffer, Rindex); goetz@6458: b(filtered); goetz@6458: goetz@6458: bind(runtime); goetz@6458: goetz@6458: // VM call need frame to access(write) O register. goetz@6458: if (needs_frame) { goetz@6458: save_LR_CR(Rtmp1); goetz@6511: push_frame_reg_args(0, Rtmp2); goetz@6458: } goetz@6458: goetz@6458: if (Rpre_val->is_volatile() && Robj == noreg) mr(R31, Rpre_val); // Save pre_val across C call if it was preloaded. goetz@6458: call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), Rpre_val, R16_thread); goetz@6458: if (Rpre_val->is_volatile() && Robj == noreg) mr(Rpre_val, R31); // restore goetz@6458: goetz@6458: if (needs_frame) { goetz@6458: pop_frame(); goetz@6458: restore_LR_CR(Rtmp1); goetz@6458: } goetz@6458: goetz@6458: bind(filtered); goetz@6458: } goetz@6458: goetz@6458: // General G1 post-barrier generator goetz@6458: // Store cross-region card. goetz@6458: void MacroAssembler::g1_write_barrier_post(Register Rstore_addr, Register Rnew_val, Register Rtmp1, Register Rtmp2, Register Rtmp3, Label *filtered_ext) { goetz@6458: Label runtime, filtered_int; goetz@6458: Label& filtered = (filtered_ext != NULL) ? *filtered_ext : filtered_int; goetz@6458: assert_different_registers(Rstore_addr, Rnew_val, Rtmp1, Rtmp2); goetz@6458: goetz@6458: G1SATBCardTableModRefBS* bs = (G1SATBCardTableModRefBS*) Universe::heap()->barrier_set(); goetz@6458: assert(bs->kind() == BarrierSet::G1SATBCT || goetz@6458: bs->kind() == BarrierSet::G1SATBCTLogging, "wrong barrier"); goetz@6458: goetz@6458: // Does store cross heap regions? goetz@6458: if (G1RSBarrierRegionFilter) { goetz@6458: xorr(Rtmp1, Rstore_addr, Rnew_val); goetz@6458: srdi_(Rtmp1, Rtmp1, HeapRegion::LogOfHRGrainBytes); goetz@6458: beq(CCR0, filtered); goetz@6458: } goetz@6458: goetz@6458: // Crosses regions, storing NULL? goetz@6458: #ifdef ASSERT goetz@6458: cmpdi(CCR0, Rnew_val, 0); goetz@6458: asm_assert_ne("null oop not allowed (G1)", 0x322); // Checked by caller on PPC64, so following branch is obsolete: goetz@6458: //beq(CCR0, filtered); goetz@6458: #endif goetz@6458: goetz@6458: // Storing region crossing non-NULL, is card already dirty? goetz@6458: assert(sizeof(*bs->byte_map_base) == sizeof(jbyte), "adjust this code"); goetz@6458: const Register Rcard_addr = Rtmp1; goetz@6458: Register Rbase = Rtmp2; goetz@6458: load_const_optimized(Rbase, (address)bs->byte_map_base, /*temp*/ Rtmp3); goetz@6458: goetz@6458: srdi(Rcard_addr, Rstore_addr, CardTableModRefBS::card_shift); goetz@6458: goetz@6458: // Get the address of the card. goetz@6458: lbzx(/*card value*/ Rtmp3, Rbase, Rcard_addr); goetz@6515: cmpwi(CCR0, Rtmp3, (int)G1SATBCardTableModRefBS::g1_young_card_val()); goetz@6515: beq(CCR0, filtered); goetz@6515: goetz@6515: membar(Assembler::StoreLoad); goetz@6515: lbzx(/*card value*/ Rtmp3, Rbase, Rcard_addr); // Reload after membar. goetz@6515: cmpwi(CCR0, Rtmp3 /* card value */, CardTableModRefBS::dirty_card_val()); goetz@6458: beq(CCR0, filtered); goetz@6458: goetz@6458: // Storing a region crossing, non-NULL oop, card is clean. goetz@6458: // Dirty card and log. goetz@6515: li(Rtmp3, CardTableModRefBS::dirty_card_val()); goetz@6458: //release(); // G1: oops are allowed to get visible after dirty marking. goetz@6458: stbx(Rtmp3, Rbase, Rcard_addr); goetz@6458: goetz@6458: add(Rcard_addr, Rbase, Rcard_addr); // This is the address which needs to get enqueued. goetz@6458: Rbase = noreg; // end of lifetime goetz@6458: goetz@6458: const Register Rqueue_index = Rtmp2, goetz@6458: Rqueue_buf = Rtmp3; goetz@6458: ld(Rqueue_index, in_bytes(JavaThread::dirty_card_queue_offset() + PtrQueue::byte_offset_of_index()), R16_thread); goetz@6458: cmpdi(CCR0, Rqueue_index, 0); goetz@6458: beq(CCR0, runtime); // index == 0 then jump to runtime goetz@6458: ld(Rqueue_buf, in_bytes(JavaThread::dirty_card_queue_offset() + PtrQueue::byte_offset_of_buf()), R16_thread); goetz@6458: goetz@6458: addi(Rqueue_index, Rqueue_index, -wordSize); // decrement index goetz@6458: std(Rqueue_index, in_bytes(JavaThread::dirty_card_queue_offset() + PtrQueue::byte_offset_of_index()), R16_thread); goetz@6458: goetz@6458: stdx(Rcard_addr, Rqueue_buf, Rqueue_index); // store card goetz@6458: b(filtered); goetz@6458: goetz@6458: bind(runtime); goetz@6458: goetz@6458: // Save the live input values. goetz@6458: call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), Rcard_addr, R16_thread); goetz@6458: goetz@6458: bind(filtered_int); goetz@6458: } goetz@6515: #endif // INCLUDE_ALL_GCS goetz@6458: goetz@6458: // Values for last_Java_pc, and last_Java_sp must comply to the rules goetz@6458: // in frame_ppc64.hpp. goetz@6458: void MacroAssembler::set_last_Java_frame(Register last_Java_sp, Register last_Java_pc) { goetz@6458: // Always set last_Java_pc and flags first because once last_Java_sp goetz@6458: // is visible has_last_Java_frame is true and users will look at the goetz@6458: // rest of the fields. (Note: flags should always be zero before we goetz@6458: // get here so doesn't need to be set.) goetz@6458: goetz@6458: // Verify that last_Java_pc was zeroed on return to Java goetz@6458: asm_assert_mem8_is_zero(in_bytes(JavaThread::last_Java_pc_offset()), R16_thread, goetz@6458: "last_Java_pc not zeroed before leaving Java", 0x200); goetz@6458: goetz@6458: // When returning from calling out from Java mode the frame anchor's goetz@6458: // last_Java_pc will always be set to NULL. It is set here so that goetz@6458: // if we are doing a call to native (not VM) that we capture the goetz@6458: // known pc and don't have to rely on the native call having a goetz@6458: // standard frame linkage where we can find the pc. goetz@6458: if (last_Java_pc != noreg) goetz@6458: std(last_Java_pc, in_bytes(JavaThread::last_Java_pc_offset()), R16_thread); goetz@6458: goetz@6495: // Set last_Java_sp last. goetz@6458: std(last_Java_sp, in_bytes(JavaThread::last_Java_sp_offset()), R16_thread); goetz@6458: } goetz@6458: goetz@6458: void MacroAssembler::reset_last_Java_frame(void) { goetz@6458: asm_assert_mem8_isnot_zero(in_bytes(JavaThread::last_Java_sp_offset()), goetz@6458: R16_thread, "SP was not set, still zero", 0x202); goetz@6458: goetz@6458: BLOCK_COMMENT("reset_last_Java_frame {"); goetz@6458: li(R0, 0); goetz@6458: goetz@6458: // _last_Java_sp = 0 goetz@6458: std(R0, in_bytes(JavaThread::last_Java_sp_offset()), R16_thread); goetz@6458: goetz@6458: // _last_Java_pc = 0 goetz@6458: std(R0, in_bytes(JavaThread::last_Java_pc_offset()), R16_thread); goetz@6458: BLOCK_COMMENT("} reset_last_Java_frame"); goetz@6458: } goetz@6458: goetz@6458: void MacroAssembler::set_top_ijava_frame_at_SP_as_last_Java_frame(Register sp, Register tmp1) { goetz@6458: assert_different_registers(sp, tmp1); goetz@6458: goetz@6458: // sp points to a TOP_IJAVA_FRAME, retrieve frame's PC via goetz@6458: // TOP_IJAVA_FRAME_ABI. goetz@6458: // FIXME: assert that we really have a TOP_IJAVA_FRAME here! goetz@6458: #ifdef CC_INTERP goetz@6458: ld(tmp1/*pc*/, _top_ijava_frame_abi(frame_manager_lr), sp); goetz@6458: #else goetz@6512: address entry = pc(); goetz@6512: load_const_optimized(tmp1, entry); goetz@6458: #endif goetz@6458: goetz@6458: set_last_Java_frame(/*sp=*/sp, /*pc=*/tmp1); goetz@6458: } goetz@6458: goetz@6458: void MacroAssembler::get_vm_result(Register oop_result) { goetz@6458: // Read: goetz@6458: // R16_thread goetz@6458: // R16_thread->in_bytes(JavaThread::vm_result_offset()) goetz@6458: // goetz@6458: // Updated: goetz@6458: // oop_result goetz@6458: // R16_thread->in_bytes(JavaThread::vm_result_offset()) goetz@6458: goetz@6458: ld(oop_result, in_bytes(JavaThread::vm_result_offset()), R16_thread); goetz@6458: li(R0, 0); goetz@6458: std(R0, in_bytes(JavaThread::vm_result_offset()), R16_thread); goetz@6458: goetz@6458: verify_oop(oop_result); goetz@6458: } goetz@6458: goetz@6458: void MacroAssembler::get_vm_result_2(Register metadata_result) { goetz@6458: // Read: goetz@6458: // R16_thread goetz@6458: // R16_thread->in_bytes(JavaThread::vm_result_2_offset()) goetz@6458: // goetz@6458: // Updated: goetz@6458: // metadata_result goetz@6458: // R16_thread->in_bytes(JavaThread::vm_result_2_offset()) goetz@6458: goetz@6458: ld(metadata_result, in_bytes(JavaThread::vm_result_2_offset()), R16_thread); goetz@6458: li(R0, 0); goetz@6458: std(R0, in_bytes(JavaThread::vm_result_2_offset()), R16_thread); goetz@6458: } goetz@6458: goetz@6458: goetz@6458: void MacroAssembler::encode_klass_not_null(Register dst, Register src) { goetz@6501: Register current = (src != noreg) ? src : dst; // Klass is in dst if no src provided. goetz@6477: if (Universe::narrow_klass_base() != 0) { goetz@6515: // Use dst as temp if it is free. goetz@6515: load_const(R0, Universe::narrow_klass_base(), (dst != current && dst != R0) ? dst : noreg); goetz@6501: sub(dst, current, R0); goetz@6501: current = dst; goetz@6477: } goetz@6501: if (Universe::narrow_klass_shift() != 0) { goetz@6501: srdi(dst, current, Universe::narrow_klass_shift()); goetz@6501: current = dst; goetz@6458: } goetz@6501: mr_if_needed(dst, current); // Move may be required. goetz@6458: } goetz@6458: goetz@6458: void MacroAssembler::store_klass(Register dst_oop, Register klass, Register ck) { goetz@6474: if (UseCompressedClassPointers) { goetz@6458: encode_klass_not_null(ck, klass); goetz@6458: stw(ck, oopDesc::klass_offset_in_bytes(), dst_oop); goetz@6458: } else { goetz@6458: std(klass, oopDesc::klass_offset_in_bytes(), dst_oop); goetz@6458: } goetz@6458: } goetz@6458: goetz@6512: void MacroAssembler::store_klass_gap(Register dst_oop, Register val) { goetz@6512: if (UseCompressedClassPointers) { goetz@6512: if (val == noreg) { goetz@6512: val = R0; goetz@6512: li(val, 0); goetz@6512: } goetz@6512: stw(val, oopDesc::klass_gap_offset_in_bytes(), dst_oop); // klass gap if compressed goetz@6512: } goetz@6512: } goetz@6512: goetz@6477: int MacroAssembler::instr_size_for_decode_klass_not_null() { goetz@6477: if (!UseCompressedClassPointers) return 0; goetz@6477: int num_instrs = 1; // shift or move goetz@6477: if (Universe::narrow_klass_base() != 0) num_instrs = 7; // shift + load const + add goetz@6477: return num_instrs * BytesPerInstWord; goetz@6477: } goetz@6477: goetz@6458: void MacroAssembler::decode_klass_not_null(Register dst, Register src) { goetz@6458: if (src == noreg) src = dst; goetz@6463: Register shifted_src = src; goetz@6477: if (Universe::narrow_klass_shift() != 0 || goetz@6477: Universe::narrow_klass_base() == 0 && src != dst) { // Move required. goetz@6463: shifted_src = dst; goetz@6463: sldi(shifted_src, src, Universe::narrow_klass_shift()); goetz@6458: } goetz@6477: if (Universe::narrow_klass_base() != 0) { goetz@6477: load_const(R0, Universe::narrow_klass_base()); goetz@6477: add(dst, shifted_src, R0); goetz@6477: } goetz@6458: } goetz@6458: goetz@6458: void MacroAssembler::load_klass(Register dst, Register src) { goetz@6474: if (UseCompressedClassPointers) { goetz@6458: lwz(dst, oopDesc::klass_offset_in_bytes(), src); goetz@6458: // Attention: no null check here! goetz@6458: decode_klass_not_null(dst, dst); goetz@6458: } else { goetz@6458: ld(dst, oopDesc::klass_offset_in_bytes(), src); goetz@6458: } goetz@6458: } goetz@6458: goetz@6458: void MacroAssembler::load_klass_with_trap_null_check(Register dst, Register src) { goetz@6486: if (!os::zero_page_read_protected()) { goetz@6458: if (TrapBasedNullChecks) { goetz@6458: trap_null_check(src); goetz@6458: } goetz@6458: } goetz@6458: load_klass(dst, src); goetz@6458: } goetz@6458: goetz@6458: void MacroAssembler::reinit_heapbase(Register d, Register tmp) { goetz@6463: if (Universe::heap() != NULL) { goetz@6463: if (Universe::narrow_oop_base() == NULL) { goetz@6463: Assembler::xorr(R30, R30, R30); goetz@6463: } else { goetz@6463: load_const(R30, Universe::narrow_ptrs_base(), tmp); goetz@6463: } goetz@6463: } else { goetz@6458: load_const(R30, Universe::narrow_ptrs_base_addr(), tmp); goetz@6458: ld(R30, 0, R30); goetz@6458: } goetz@6458: } goetz@6458: goetz@6495: // Clear Array goetz@6495: // Kills both input registers. tmp == R0 is allowed. goetz@6495: void MacroAssembler::clear_memory_doubleword(Register base_ptr, Register cnt_dwords, Register tmp) { goetz@6495: // Procedure for large arrays (uses data cache block zero instruction). goetz@6495: Label startloop, fast, fastloop, small_rest, restloop, done; goetz@6495: const int cl_size = VM_Version::get_cache_line_size(), goetz@6495: cl_dwords = cl_size>>3, goetz@6495: cl_dw_addr_bits = exact_log2(cl_dwords), goetz@6495: dcbz_min = 1; // Min count of dcbz executions, needs to be >0. goetz@6495: goetz@6495: //2: goetz@6495: cmpdi(CCR1, cnt_dwords, ((dcbz_min+1)<=dcbz_min lines included). goetz@6495: blt(CCR1, small_rest); // Too small. goetz@6495: rldicl_(tmp, base_ptr, 64-3, 64-cl_dw_addr_bits); // Extract dword offset within first cache line. goetz@6495: beq(CCR0, fast); // Already 128byte aligned. goetz@6495: goetz@6495: subfic(tmp, tmp, cl_dwords); goetz@6495: mtctr(tmp); // Set ctr to hit 128byte boundary (00). goetz@6495: andi(cnt_dwords, cnt_dwords, cl_dwords-1); // Rest in dwords. goetz@6495: mtctr(tmp); // Load counter. goetz@6495: //16: goetz@6495: bind(fastloop); goetz@6495: dcbz(base_ptr); // Clear 128byte aligned block. goetz@6495: addi(base_ptr, base_ptr, cl_size); goetz@6495: bdnz(fastloop); goetz@6495: if (InsertEndGroupPPC64) { endgroup(); } else { nop(); } goetz@6495: //20: goetz@6495: bind(small_rest); goetz@6495: cmpdi(CCR0, cnt_dwords, 0); // size 0? goetz@6495: beq(CCR0, done); // rest == 0 goetz@6495: li(tmp, 0); goetz@6495: mtctr(cnt_dwords); // Load counter. goetz@6495: //24: goetz@6495: bind(restloop); // Clear rest. goetz@6495: std(tmp, 0, base_ptr); // Clear 8byte aligned block. goetz@6495: addi(base_ptr, base_ptr, 8); goetz@6495: bdnz(restloop); goetz@6495: //27: goetz@6495: bind(done); goetz@6495: } goetz@6495: goetz@6458: /////////////////////////////////////////// String intrinsics //////////////////////////////////////////// goetz@6458: goetz@6458: // Search for a single jchar in an jchar[]. goetz@6458: // goetz@6458: // Assumes that result differs from all other registers. goetz@6458: // goetz@6458: // Haystack, needle are the addresses of jchar-arrays. goetz@6458: // NeedleChar is needle[0] if it is known at compile time. goetz@6458: // Haycnt is the length of the haystack. We assume haycnt >=1. goetz@6458: // goetz@6458: // Preserves haystack, haycnt, kills all other registers. goetz@6458: // goetz@6458: // If needle == R0, we search for the constant needleChar. goetz@6458: void MacroAssembler::string_indexof_1(Register result, Register haystack, Register haycnt, goetz@6458: Register needle, jchar needleChar, goetz@6458: Register tmp1, Register tmp2) { goetz@6458: goetz@6458: assert_different_registers(result, haystack, haycnt, needle, tmp1, tmp2); goetz@6458: goetz@6458: Label L_InnerLoop, L_FinalCheck, L_Found1, L_Found2, L_Found3, L_NotFound, L_End; goetz@6458: Register needle0 = needle, // Contains needle[0]. goetz@6458: addr = tmp1, goetz@6458: ch1 = tmp2, goetz@6458: ch2 = R0; goetz@6458: goetz@6458: //2 (variable) or 3 (const): goetz@6458: if (needle != R0) lhz(needle0, 0, needle); // Preload needle character, needle has len==1. goetz@6458: dcbtct(haystack, 0x00); // Indicate R/O access to haystack. goetz@6458: goetz@6458: srwi_(tmp2, haycnt, 1); // Shift right by exact_log2(UNROLL_FACTOR). goetz@6458: mr(addr, haystack); goetz@6458: beq(CCR0, L_FinalCheck); goetz@6458: mtctr(tmp2); // Move to count register. goetz@6458: //8: goetz@6458: bind(L_InnerLoop); // Main work horse (2x unrolled search loop). goetz@6458: lhz(ch1, 0, addr); // Load characters from haystack. goetz@6458: lhz(ch2, 2, addr); goetz@6458: (needle != R0) ? cmpw(CCR0, ch1, needle0) : cmplwi(CCR0, ch1, needleChar); goetz@6458: (needle != R0) ? cmpw(CCR1, ch2, needle0) : cmplwi(CCR1, ch2, needleChar); goetz@6458: beq(CCR0, L_Found1); // Did we find the needle? goetz@6458: beq(CCR1, L_Found2); goetz@6458: addi(addr, addr, 4); goetz@6458: bdnz(L_InnerLoop); goetz@6458: //16: goetz@6458: bind(L_FinalCheck); goetz@6458: andi_(R0, haycnt, 1); goetz@6458: beq(CCR0, L_NotFound); goetz@6458: lhz(ch1, 0, addr); // One position left at which we have to compare. goetz@6458: (needle != R0) ? cmpw(CCR1, ch1, needle0) : cmplwi(CCR1, ch1, needleChar); goetz@6458: beq(CCR1, L_Found3); goetz@6458: //21: goetz@6458: bind(L_NotFound); goetz@6458: li(result, -1); // Not found. goetz@6458: b(L_End); goetz@6458: goetz@6458: bind(L_Found2); goetz@6458: addi(addr, addr, 2); goetz@6458: //24: goetz@6458: bind(L_Found1); goetz@6458: bind(L_Found3); // Return index ... goetz@6458: subf(addr, haystack, addr); // relative to haystack, goetz@6458: srdi(result, addr, 1); // in characters. goetz@6458: bind(L_End); goetz@6458: } goetz@6458: goetz@6458: goetz@6458: // Implementation of IndexOf for jchar arrays. goetz@6458: // goetz@6458: // The length of haystack and needle are not constant, i.e. passed in a register. goetz@6458: // goetz@6458: // Preserves registers haystack, needle. goetz@6458: // Kills registers haycnt, needlecnt. goetz@6458: // Assumes that result differs from all other registers. goetz@6458: // Haystack, needle are the addresses of jchar-arrays. goetz@6458: // Haycnt, needlecnt are the lengths of them, respectively. goetz@6458: // goetz@6458: // Needlecntval must be zero or 15-bit unsigned immediate and > 1. goetz@6458: void MacroAssembler::string_indexof(Register result, Register haystack, Register haycnt, goetz@6458: Register needle, ciTypeArray* needle_values, Register needlecnt, int needlecntval, goetz@6458: Register tmp1, Register tmp2, Register tmp3, Register tmp4) { goetz@6458: goetz@6458: // Ensure 0=2, bail out otherwise. goetz@6458: // ************************************************************************************************** goetz@6458: goetz@6458: //1 (variable) or 3 (const): goetz@6458: dcbtct(needle, 0x00); // Indicate R/O access to str1. goetz@6458: dcbtct(haystack, 0x00); // Indicate R/O access to str2. goetz@6458: goetz@6458: // Compute last haystack addr to use if no match gets found. goetz@6458: if (needlecntval == 0) { // variable needlecnt goetz@6458: //3: goetz@6458: subf(ch1, needlecnt, haycnt); // Last character index to compare is haycnt-needlecnt. goetz@6458: addi(addr, haystack, -2); // Accesses use pre-increment. goetz@6458: cmpwi(CCR6, needlecnt, 2); goetz@6458: blt(CCR6, L_TooShort); // Variable needlecnt: handle short needle separately. goetz@6458: slwi(ch1, ch1, 1); // Scale to number of bytes. goetz@6458: lwz(n_start, 0, needle); // Load first 2 characters of needle. goetz@6458: add(last_addr, haystack, ch1); // Point to last address to compare (haystack+2*(haycnt-needlecnt)). goetz@6458: addi(needlecnt, needlecnt, -2); // Rest of needle. goetz@6458: } else { // constant needlecnt goetz@6458: guarantee(needlecntval != 1, "IndexOf with single-character needle must be handled separately"); goetz@6458: assert((needlecntval & 0x7fff) == needlecntval, "wrong immediate"); goetz@6458: //5: goetz@6458: addi(ch1, haycnt, -needlecntval); // Last character index to compare is haycnt-needlecnt. goetz@6458: lwz(n_start, 0, needle); // Load first 2 characters of needle. goetz@6458: addi(addr, haystack, -2); // Accesses use pre-increment. goetz@6458: slwi(ch1, ch1, 1); // Scale to number of bytes. goetz@6458: add(last_addr, haystack, ch1); // Point to last address to compare (haystack+2*(haycnt-needlecnt)). goetz@6458: li(needlecnt, needlecntval-2); // Rest of needle. goetz@6458: } goetz@6458: goetz@6458: // Main Loop (now we have at least 3 characters). goetz@6458: //11: goetz@6458: Label L_OuterLoop, L_InnerLoop, L_FinalCheck, L_Comp1, L_Comp2, L_Comp3; goetz@6458: bind(L_OuterLoop); // Search for 1st 2 characters. goetz@6458: Register addr_diff = tmp4; goetz@6458: subf(addr_diff, addr, last_addr); // Difference between already checked address and last address to check. goetz@6458: addi(addr, addr, 2); // This is the new address we want to use for comparing. goetz@6458: srdi_(ch2, addr_diff, 2); goetz@6458: beq(CCR0, L_FinalCheck); // 2 characters left? goetz@6458: mtctr(ch2); // addr_diff/4 goetz@6458: //16: goetz@6458: bind(L_InnerLoop); // Main work horse (2x unrolled search loop) goetz@6458: lwz(ch1, 0, addr); // Load 2 characters of haystack (ignore alignment). goetz@6458: lwz(ch2, 2, addr); goetz@6458: cmpw(CCR0, ch1, n_start); // Compare 2 characters (1 would be sufficient but try to reduce branches to CompLoop). goetz@6458: cmpw(CCR1, ch2, n_start); goetz@6458: beq(CCR0, L_Comp1); // Did we find the needle start? goetz@6458: beq(CCR1, L_Comp2); goetz@6458: addi(addr, addr, 4); goetz@6458: bdnz(L_InnerLoop); goetz@6458: //24: goetz@6458: bind(L_FinalCheck); goetz@6458: rldicl_(addr_diff, addr_diff, 64-1, 63); // Remaining characters not covered by InnerLoop: (addr_diff>>1)&1. goetz@6458: beq(CCR0, L_NotFound); goetz@6458: lwz(ch1, 0, addr); // One position left at which we have to compare. goetz@6458: cmpw(CCR1, ch1, n_start); goetz@6458: beq(CCR1, L_Comp3); goetz@6458: //29: goetz@6458: bind(L_NotFound); goetz@6458: li(result, -1); // not found goetz@6458: b(L_End); goetz@6458: goetz@6458: goetz@6458: // ************************************************************************************************** goetz@6458: // Special Case: unfortunately, the variable needle case can be called with needlecnt<2 goetz@6458: // ************************************************************************************************** goetz@6458: //31: goetz@6458: if ((needlecntval>>1) !=1 ) { // Const needlecnt is 2 or 3? Reduce code size. goetz@6458: int nopcnt = 5; goetz@6458: if (needlecntval !=0 ) ++nopcnt; // Balance alignment (other case: see below). goetz@6458: if (needlecntval == 0) { // We have to handle these cases separately. goetz@6458: Label L_OneCharLoop; goetz@6458: bind(L_TooShort); goetz@6458: mtctr(haycnt); goetz@6458: lhz(n_start, 0, needle); // First character of needle goetz@6458: bind(L_OneCharLoop); goetz@6458: lhzu(ch1, 2, addr); goetz@6458: cmpw(CCR1, ch1, n_start); goetz@6458: beq(CCR1, L_Found); // Did we find the one character needle? goetz@6458: bdnz(L_OneCharLoop); goetz@6458: li(result, -1); // Not found. goetz@6458: b(L_End); goetz@6458: } // 8 instructions, so no impact on alignment. goetz@6458: for (int x = 0; x < nopcnt; ++x) nop(); goetz@6458: } goetz@6458: goetz@6458: // ************************************************************************************************** goetz@6458: // Regular Case Part II: compare rest of needle (first 2 characters have been compared already) goetz@6458: // ************************************************************************************************** goetz@6458: goetz@6458: // Compare the rest goetz@6458: //36 if needlecntval==0, else 37: goetz@6458: bind(L_Comp2); goetz@6458: addi(addr, addr, 2); // First comparison has failed, 2nd one hit. goetz@6458: bind(L_Comp1); // Addr points to possible needle start. goetz@6458: bind(L_Comp3); // Could have created a copy and use a different return address but saving code size here. goetz@6458: if (needlecntval != 2) { // Const needlecnt==2? goetz@6458: if (needlecntval != 3) { goetz@6458: if (needlecntval == 0) beq(CCR6, L_Found); // Variable needlecnt==2? goetz@6458: Register ind_reg = tmp4; goetz@6458: li(ind_reg, 2*2); // First 2 characters are already compared, use index 2. goetz@6458: mtctr(needlecnt); // Decremented by 2, still > 0. goetz@6458: //40: goetz@6458: Label L_CompLoop; goetz@6458: bind(L_CompLoop); goetz@6458: lhzx(ch2, needle, ind_reg); goetz@6458: lhzx(ch1, addr, ind_reg); goetz@6458: cmpw(CCR1, ch1, ch2); goetz@6458: bne(CCR1, L_OuterLoop); goetz@6458: addi(ind_reg, ind_reg, 2); goetz@6458: bdnz(L_CompLoop); goetz@6458: } else { // No loop required if there's only one needle character left. goetz@6458: lhz(ch2, 2*2, needle); goetz@6458: lhz(ch1, 2*2, addr); goetz@6458: cmpw(CCR1, ch1, ch2); goetz@6458: bne(CCR1, L_OuterLoop); goetz@6458: } goetz@6458: } goetz@6458: // Return index ... goetz@6458: //46: goetz@6458: bind(L_Found); goetz@6458: subf(addr, haystack, addr); // relative to haystack, ... goetz@6458: srdi(result, addr, 1); // in characters. goetz@6458: //48: goetz@6458: bind(L_End); goetz@6458: } goetz@6458: goetz@6458: // Implementation of Compare for jchar arrays. goetz@6458: // goetz@6458: // Kills the registers str1, str2, cnt1, cnt2. goetz@6458: // Kills cr0, ctr. goetz@6458: // Assumes that result differes from the input registers. goetz@6458: void MacroAssembler::string_compare(Register str1_reg, Register str2_reg, Register cnt1_reg, Register cnt2_reg, goetz@6458: Register result_reg, Register tmp_reg) { goetz@6458: assert_different_registers(result_reg, str1_reg, str2_reg, cnt1_reg, cnt2_reg, tmp_reg); goetz@6458: goetz@6458: Label Ldone, Lslow_case, Lslow_loop, Lfast_loop; goetz@6458: Register cnt_diff = R0, goetz@6458: limit_reg = cnt1_reg, goetz@6458: chr1_reg = result_reg, goetz@6458: chr2_reg = cnt2_reg, goetz@6458: addr_diff = str2_reg; goetz@6458: goetz@6458: // Offset 0 should be 32 byte aligned. goetz@6458: //-4: goetz@6458: dcbtct(str1_reg, 0x00); // Indicate R/O access to str1. goetz@6458: dcbtct(str2_reg, 0x00); // Indicate R/O access to str2. goetz@6458: //-2: goetz@6458: // Compute min(cnt1, cnt2) and check if 0 (bail out if we don't need to compare characters). goetz@6458: subf(result_reg, cnt2_reg, cnt1_reg); // difference between cnt1/2 goetz@6458: subf_(addr_diff, str1_reg, str2_reg); // alias? goetz@6458: beq(CCR0, Ldone); // return cnt difference if both ones are identical goetz@6458: srawi(limit_reg, result_reg, 31); // generate signmask (cnt1/2 must be non-negative so cnt_diff can't overflow) goetz@6458: mr(cnt_diff, result_reg); goetz@6458: andr(limit_reg, result_reg, limit_reg); // difference or zero (negative): cnt14 characters for fast loop goetz@6458: andi(limit_reg, tmp_reg, 4-1); // remaining characters goetz@6458: goetz@6458: // Adapt str1_reg str2_reg for the first loop iteration goetz@6458: mtctr(chr2_reg); // (min(cnt1, cnt2)-1)/4 goetz@6458: addi(limit_reg, limit_reg, 4+1); // compare last 5-8 characters in slow_case if mismatch found in fast_loop goetz@6458: //16: goetz@6458: // Compare the rest of the characters goetz@6458: bind(Lfast_loop); goetz@6458: ld(chr1_reg, 0, str1_reg); goetz@6458: ldx(chr2_reg, str1_reg, addr_diff); goetz@6458: cmpd(CCR0, chr2_reg, chr1_reg); goetz@6458: bne(CCR0, Lslow_case); // return chr1_reg goetz@6458: addi(str1_reg, str1_reg, 4*2); goetz@6458: bdnz(Lfast_loop); goetz@6458: addi(limit_reg, limit_reg, -4); // no mismatch found in fast_loop, only 1-4 characters missing goetz@6458: //23: goetz@6458: bind(Lslow_case); goetz@6458: mtctr(limit_reg); goetz@6458: //24: goetz@6458: bind(Lslow_loop); goetz@6458: lhz(chr1_reg, 0, str1_reg); goetz@6458: lhzx(chr2_reg, str1_reg, addr_diff); goetz@6458: subf_(result_reg, chr2_reg, chr1_reg); goetz@6458: bne(CCR0, Ldone); // return chr1_reg goetz@6458: addi(str1_reg, str1_reg, 1*2); goetz@6458: bdnz(Lslow_loop); goetz@6458: //30: goetz@6458: // If strings are equal up to min length, return the length difference. goetz@6458: mr(result_reg, cnt_diff); goetz@6458: nop(); // alignment goetz@6458: //32: goetz@6458: // Otherwise, return the difference between the first mismatched chars. goetz@6458: bind(Ldone); goetz@6458: } goetz@6458: goetz@6458: goetz@6458: // Compare char[] arrays. goetz@6458: // goetz@6458: // str1_reg USE only goetz@6458: // str2_reg USE only goetz@6458: // cnt_reg USE_DEF, due to tmp reg shortage goetz@6458: // result_reg DEF only, might compromise USE only registers goetz@6458: void MacroAssembler::char_arrays_equals(Register str1_reg, Register str2_reg, Register cnt_reg, Register result_reg, goetz@6458: Register tmp1_reg, Register tmp2_reg, Register tmp3_reg, Register tmp4_reg, goetz@6458: Register tmp5_reg) { goetz@6458: goetz@6458: // Str1 may be the same register as str2 which can occur e.g. after scalar replacement. goetz@6458: assert_different_registers(result_reg, str1_reg, cnt_reg, tmp1_reg, tmp2_reg, tmp3_reg, tmp4_reg, tmp5_reg); goetz@6458: assert_different_registers(result_reg, str2_reg, cnt_reg, tmp1_reg, tmp2_reg, tmp3_reg, tmp4_reg, tmp5_reg); goetz@6458: goetz@6458: // Offset 0 should be 32 byte aligned. goetz@6458: Label Linit_cbc, Lcbc, Lloop, Ldone_true, Ldone_false; goetz@6458: Register index_reg = tmp5_reg; goetz@6458: Register cbc_iter = tmp4_reg; goetz@6458: goetz@6458: //-1: goetz@6458: dcbtct(str1_reg, 0x00); // Indicate R/O access to str1. goetz@6458: dcbtct(str2_reg, 0x00); // Indicate R/O access to str2. goetz@6458: //1: goetz@6458: andi(cbc_iter, cnt_reg, 4-1); // Remaining iterations after 4 java characters per iteration loop. goetz@6458: li(index_reg, 0); // init goetz@6458: li(result_reg, 0); // assume false goetz@6458: srwi_(tmp2_reg, cnt_reg, exact_log2(4)); // Div: 4 java characters per iteration (main loop). goetz@6458: goetz@6458: cmpwi(CCR1, cbc_iter, 0); // CCR1 = (cbc_iter==0) goetz@6458: beq(CCR0, Linit_cbc); // too short goetz@6458: mtctr(tmp2_reg); goetz@6458: //8: goetz@6458: bind(Lloop); goetz@6458: ldx(tmp1_reg, str1_reg, index_reg); goetz@6458: ldx(tmp2_reg, str2_reg, index_reg); goetz@6458: cmpd(CCR0, tmp1_reg, tmp2_reg); goetz@6458: bne(CCR0, Ldone_false); // Unequal char pair found -> done. goetz@6458: addi(index_reg, index_reg, 4*sizeof(jchar)); goetz@6458: bdnz(Lloop); goetz@6458: //14: goetz@6458: bind(Linit_cbc); goetz@6458: beq(CCR1, Ldone_true); goetz@6458: mtctr(cbc_iter); goetz@6458: //16: goetz@6458: bind(Lcbc); goetz@6458: lhzx(tmp1_reg, str1_reg, index_reg); goetz@6458: lhzx(tmp2_reg, str2_reg, index_reg); goetz@6458: cmpw(CCR0, tmp1_reg, tmp2_reg); goetz@6458: bne(CCR0, Ldone_false); // Unequal char pair found -> done. goetz@6458: addi(index_reg, index_reg, 1*sizeof(jchar)); goetz@6458: bdnz(Lcbc); goetz@6458: nop(); goetz@6458: bind(Ldone_true); goetz@6458: li(result_reg, 1); goetz@6458: //24: goetz@6458: bind(Ldone_false); goetz@6458: } goetz@6458: goetz@6458: goetz@6458: void MacroAssembler::char_arrays_equalsImm(Register str1_reg, Register str2_reg, int cntval, Register result_reg, goetz@6458: Register tmp1_reg, Register tmp2_reg) { goetz@6458: // Str1 may be the same register as str2 which can occur e.g. after scalar replacement. goetz@6458: assert_different_registers(result_reg, str1_reg, tmp1_reg, tmp2_reg); goetz@6458: assert_different_registers(result_reg, str2_reg, tmp1_reg, tmp2_reg); goetz@6458: assert(sizeof(jchar) == 2, "must be"); goetz@6458: assert(cntval >= 0 && ((cntval & 0x7fff) == cntval), "wrong immediate"); goetz@6458: goetz@6458: Label Ldone_false; goetz@6458: goetz@6458: if (cntval < 16) { // short case goetz@6458: if (cntval != 0) li(result_reg, 0); // assume false goetz@6458: goetz@6458: const int num_bytes = cntval*sizeof(jchar); goetz@6458: int index = 0; goetz@6458: for (int next_index; (next_index = index + 8) <= num_bytes; index = next_index) { goetz@6458: ld(tmp1_reg, index, str1_reg); goetz@6458: ld(tmp2_reg, index, str2_reg); goetz@6458: cmpd(CCR0, tmp1_reg, tmp2_reg); goetz@6458: bne(CCR0, Ldone_false); goetz@6458: } goetz@6458: if (cntval & 2) { goetz@6458: lwz(tmp1_reg, index, str1_reg); goetz@6458: lwz(tmp2_reg, index, str2_reg); goetz@6458: cmpw(CCR0, tmp1_reg, tmp2_reg); goetz@6458: bne(CCR0, Ldone_false); goetz@6458: index += 4; goetz@6458: } goetz@6458: if (cntval & 1) { goetz@6458: lhz(tmp1_reg, index, str1_reg); goetz@6458: lhz(tmp2_reg, index, str2_reg); goetz@6458: cmpw(CCR0, tmp1_reg, tmp2_reg); goetz@6458: bne(CCR0, Ldone_false); goetz@6458: } goetz@6458: // fallthrough: true goetz@6458: } else { goetz@6458: Label Lloop; goetz@6458: Register index_reg = tmp1_reg; goetz@6458: const int loopcnt = cntval/4; goetz@6458: assert(loopcnt > 0, "must be"); goetz@6458: // Offset 0 should be 32 byte aligned. goetz@6458: //2: goetz@6458: dcbtct(str1_reg, 0x00); // Indicate R/O access to str1. goetz@6458: dcbtct(str2_reg, 0x00); // Indicate R/O access to str2. goetz@6458: li(tmp2_reg, loopcnt); goetz@6458: li(index_reg, 0); // init goetz@6458: li(result_reg, 0); // assume false goetz@6458: mtctr(tmp2_reg); goetz@6458: //8: goetz@6458: bind(Lloop); goetz@6458: ldx(R0, str1_reg, index_reg); goetz@6458: ldx(tmp2_reg, str2_reg, index_reg); goetz@6458: cmpd(CCR0, R0, tmp2_reg); goetz@6458: bne(CCR0, Ldone_false); // Unequal char pair found -> done. goetz@6458: addi(index_reg, index_reg, 4*sizeof(jchar)); goetz@6458: bdnz(Lloop); goetz@6458: //14: goetz@6458: if (cntval & 2) { goetz@6458: lwzx(R0, str1_reg, index_reg); goetz@6458: lwzx(tmp2_reg, str2_reg, index_reg); goetz@6458: cmpw(CCR0, R0, tmp2_reg); goetz@6458: bne(CCR0, Ldone_false); goetz@6458: if (cntval & 1) addi(index_reg, index_reg, 2*sizeof(jchar)); goetz@6458: } goetz@6458: if (cntval & 1) { goetz@6458: lhzx(R0, str1_reg, index_reg); goetz@6458: lhzx(tmp2_reg, str2_reg, index_reg); goetz@6458: cmpw(CCR0, R0, tmp2_reg); goetz@6458: bne(CCR0, Ldone_false); goetz@6458: } goetz@6458: // fallthru: true goetz@6458: } goetz@6458: li(result_reg, 1); goetz@6458: bind(Ldone_false); goetz@6458: } goetz@6458: goetz@6458: goetz@6458: void MacroAssembler::asm_assert(bool check_equal, const char *msg, int id) { goetz@6458: #ifdef ASSERT goetz@6458: Label ok; goetz@6458: if (check_equal) { goetz@6458: beq(CCR0, ok); goetz@6458: } else { goetz@6458: bne(CCR0, ok); goetz@6458: } goetz@6458: stop(msg, id); goetz@6458: bind(ok); goetz@6458: #endif goetz@6458: } goetz@6458: goetz@6458: void MacroAssembler::asm_assert_mems_zero(bool check_equal, int size, int mem_offset, goetz@6458: Register mem_base, const char* msg, int id) { goetz@6458: #ifdef ASSERT goetz@6458: switch (size) { goetz@6458: case 4: goetz@6458: lwz(R0, mem_offset, mem_base); goetz@6458: cmpwi(CCR0, R0, 0); goetz@6458: break; goetz@6458: case 8: goetz@6458: ld(R0, mem_offset, mem_base); goetz@6458: cmpdi(CCR0, R0, 0); goetz@6458: break; goetz@6458: default: goetz@6458: ShouldNotReachHere(); goetz@6458: } goetz@6458: asm_assert(check_equal, msg, id); goetz@6458: #endif // ASSERT goetz@6458: } goetz@6458: goetz@6458: void MacroAssembler::verify_thread() { goetz@6458: if (VerifyThread) { goetz@6458: unimplemented("'VerifyThread' currently not implemented on PPC"); goetz@6458: } goetz@6458: } goetz@6458: goetz@6458: // READ: oop. KILL: R0. Volatile floats perhaps. goetz@6458: void MacroAssembler::verify_oop(Register oop, const char* msg) { goetz@6458: if (!VerifyOops) { goetz@6458: return; goetz@6458: } goetz@6495: // Will be preserved. goetz@6458: Register tmp = R11; goetz@6458: assert(oop != tmp, "precondition"); goetz@6458: unsigned int nbytes_save = 10*8; // 10 volatile gprs goetz@6495: address/* FunctionDescriptor** */fd = StubRoutines::verify_oop_subroutine_entry_address(); goetz@6458: // save tmp goetz@6458: mr(R0, tmp); goetz@6458: // kill tmp goetz@6458: save_LR_CR(tmp); goetz@6511: push_frame_reg_args(nbytes_save, tmp); goetz@6458: // restore tmp goetz@6458: mr(tmp, R0); goetz@6458: save_volatile_gprs(R1_SP, 112); // except R0 goetz@6511: // load FunctionDescriptor** / entry_address * goetz@6458: load_const(tmp, fd); goetz@6511: // load FunctionDescriptor* / entry_address goetz@6458: ld(tmp, 0, tmp); goetz@6458: mr(R4_ARG2, oop); goetz@6458: load_const(R3_ARG1, (address)msg); goetz@6458: // call destination for its side effect goetz@6458: call_c(tmp); goetz@6458: restore_volatile_gprs(R1_SP, 112); // except R0 goetz@6458: pop_frame(); goetz@6458: // save tmp goetz@6458: mr(R0, tmp); goetz@6458: // kill tmp goetz@6458: restore_LR_CR(tmp); goetz@6458: // restore tmp goetz@6458: mr(tmp, R0); goetz@6458: } goetz@6458: goetz@6458: const char* stop_types[] = { goetz@6458: "stop", goetz@6458: "untested", goetz@6458: "unimplemented", goetz@6458: "shouldnotreachhere" goetz@6458: }; goetz@6458: goetz@6458: static void stop_on_request(int tp, const char* msg) { goetz@6458: tty->print("PPC assembly code requires stop: (%s) %s\n", (void *)stop_types[tp%/*stop_end*/4], msg); goetz@6458: guarantee(false, err_msg("PPC assembly code requires stop: %s", msg)); goetz@6458: } goetz@6458: goetz@6458: // Call a C-function that prints output. goetz@6458: void MacroAssembler::stop(int type, const char* msg, int id) { goetz@6458: #ifndef PRODUCT goetz@6458: block_comment(err_msg("stop: %s %s {", stop_types[type%stop_end], msg)); goetz@6458: #else goetz@6458: block_comment("stop {"); goetz@6458: #endif goetz@6458: goetz@6458: // setup arguments goetz@6458: load_const_optimized(R3_ARG1, type); goetz@6458: load_const_optimized(R4_ARG2, (void *)msg, /*tmp=*/R0); goetz@6458: call_VM_leaf(CAST_FROM_FN_PTR(address, stop_on_request), R3_ARG1, R4_ARG2); goetz@6458: illtrap(); goetz@6458: emit_int32(id); goetz@6458: block_comment("} stop;"); goetz@6458: } goetz@6458: goetz@6458: #ifndef PRODUCT goetz@6458: // Write pattern 0x0101010101010101 in memory region [low-before, high+after]. goetz@6458: // Val, addr are temp registers. goetz@6458: // If low == addr, addr is killed. goetz@6458: // High is preserved. goetz@6458: void MacroAssembler::zap_from_to(Register low, int before, Register high, int after, Register val, Register addr) { goetz@6458: if (!ZapMemory) return; goetz@6458: goetz@6458: assert_different_registers(low, val); goetz@6458: goetz@6458: BLOCK_COMMENT("zap memory region {"); goetz@6458: load_const_optimized(val, 0x0101010101010101); goetz@6458: int size = before + after; goetz@6458: if (low == high && size < 5 && size > 0) { goetz@6458: int offset = -before*BytesPerWord; goetz@6458: for (int i = 0; i < size; ++i) { goetz@6458: std(val, offset, low); goetz@6458: offset += (1*BytesPerWord); goetz@6458: } goetz@6458: } else { goetz@6458: addi(addr, low, -before*BytesPerWord); goetz@6458: assert_different_registers(high, val); goetz@6458: if (after) addi(high, high, after * BytesPerWord); goetz@6458: Label loop; goetz@6458: bind(loop); goetz@6458: std(val, 0, addr); goetz@6458: addi(addr, addr, 8); goetz@6458: cmpd(CCR6, addr, high); goetz@6458: ble(CCR6, loop); goetz@6458: if (after) addi(high, high, -after * BytesPerWord); // Correct back to old value. goetz@6458: } goetz@6458: BLOCK_COMMENT("} zap memory region"); goetz@6458: } goetz@6458: goetz@6458: #endif // !PRODUCT goetz@6512: goetz@6512: SkipIfEqualZero::SkipIfEqualZero(MacroAssembler* masm, Register temp, const bool* flag_addr) : _masm(masm), _label() { goetz@6512: int simm16_offset = masm->load_const_optimized(temp, (address)flag_addr, R0, true); goetz@6512: assert(sizeof(bool) == 1, "PowerPC ABI"); goetz@6512: masm->lbz(temp, simm16_offset, temp); goetz@6512: masm->cmpwi(CCR0, temp, 0); goetz@6512: masm->beq(CCR0, _label); goetz@6512: } goetz@6512: goetz@6512: SkipIfEqualZero::~SkipIfEqualZero() { goetz@6512: _masm->bind(_label); goetz@6512: }