Thu, 20 Mar 2014 11:03:06 +0100
8037915: PPC64/AIX: Several smaller fixes
Reviewed-by: kvn
goetz@6458 | 1 | /* |
goetz@6458 | 2 | * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. |
goetz@6515 | 3 | * Copyright 2012, 2014 SAP AG. All rights reserved. |
goetz@6458 | 4 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
goetz@6458 | 5 | * |
goetz@6458 | 6 | * This code is free software; you can redistribute it and/or modify it |
goetz@6458 | 7 | * under the terms of the GNU General Public License version 2 only, as |
goetz@6458 | 8 | * published by the Free Software Foundation. |
goetz@6458 | 9 | * |
goetz@6458 | 10 | * This code is distributed in the hope that it will be useful, but WITHOUT |
goetz@6458 | 11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
goetz@6458 | 12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
goetz@6458 | 13 | * version 2 for more details (a copy is included in the LICENSE file that |
goetz@6458 | 14 | * accompanied this code). |
goetz@6458 | 15 | * |
goetz@6458 | 16 | * You should have received a copy of the GNU General Public License version |
goetz@6458 | 17 | * 2 along with this work; if not, write to the Free Software Foundation, |
goetz@6458 | 18 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
goetz@6458 | 19 | * |
goetz@6458 | 20 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
goetz@6458 | 21 | * or visit www.oracle.com if you need additional information or have any |
goetz@6458 | 22 | * questions. |
goetz@6458 | 23 | * |
goetz@6458 | 24 | */ |
goetz@6458 | 25 | |
goetz@6458 | 26 | #include "precompiled.hpp" |
goetz@6458 | 27 | #include "asm/macroAssembler.inline.hpp" |
goetz@6458 | 28 | #include "compiler/disassembler.hpp" |
goetz@6458 | 29 | #include "gc_interface/collectedHeap.inline.hpp" |
goetz@6458 | 30 | #include "interpreter/interpreter.hpp" |
goetz@6458 | 31 | #include "memory/cardTableModRefBS.hpp" |
goetz@6458 | 32 | #include "memory/resourceArea.hpp" |
goetz@6458 | 33 | #include "prims/methodHandles.hpp" |
goetz@6458 | 34 | #include "runtime/biasedLocking.hpp" |
goetz@6458 | 35 | #include "runtime/interfaceSupport.hpp" |
goetz@6458 | 36 | #include "runtime/objectMonitor.hpp" |
goetz@6458 | 37 | #include "runtime/os.hpp" |
goetz@6458 | 38 | #include "runtime/sharedRuntime.hpp" |
goetz@6458 | 39 | #include "runtime/stubRoutines.hpp" |
goetz@6458 | 40 | #include "utilities/macros.hpp" |
goetz@6458 | 41 | #if INCLUDE_ALL_GCS |
goetz@6458 | 42 | #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" |
goetz@6458 | 43 | #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" |
goetz@6458 | 44 | #include "gc_implementation/g1/heapRegion.hpp" |
goetz@6458 | 45 | #endif // INCLUDE_ALL_GCS |
goetz@6458 | 46 | |
goetz@6458 | 47 | #ifdef PRODUCT |
goetz@6458 | 48 | #define BLOCK_COMMENT(str) // nothing |
goetz@6458 | 49 | #else |
goetz@6458 | 50 | #define BLOCK_COMMENT(str) block_comment(str) |
goetz@6458 | 51 | #endif |
goetz@6458 | 52 | |
goetz@6458 | 53 | #ifdef ASSERT |
goetz@6458 | 54 | // On RISC, there's no benefit to verifying instruction boundaries. |
goetz@6458 | 55 | bool AbstractAssembler::pd_check_instruction_mark() { return false; } |
goetz@6458 | 56 | #endif |
goetz@6458 | 57 | |
goetz@6458 | 58 | void MacroAssembler::ld_largeoffset_unchecked(Register d, int si31, Register a, int emit_filler_nop) { |
goetz@6458 | 59 | assert(Assembler::is_simm(si31, 31) && si31 >= 0, "si31 out of range"); |
goetz@6458 | 60 | if (Assembler::is_simm(si31, 16)) { |
goetz@6458 | 61 | ld(d, si31, a); |
goetz@6458 | 62 | if (emit_filler_nop) nop(); |
goetz@6458 | 63 | } else { |
goetz@6458 | 64 | const int hi = MacroAssembler::largeoffset_si16_si16_hi(si31); |
goetz@6458 | 65 | const int lo = MacroAssembler::largeoffset_si16_si16_lo(si31); |
goetz@6458 | 66 | addis(d, a, hi); |
goetz@6458 | 67 | ld(d, lo, d); |
goetz@6458 | 68 | } |
goetz@6458 | 69 | } |
goetz@6458 | 70 | |
goetz@6458 | 71 | void MacroAssembler::ld_largeoffset(Register d, int si31, Register a, int emit_filler_nop) { |
goetz@6458 | 72 | assert_different_registers(d, a); |
goetz@6458 | 73 | ld_largeoffset_unchecked(d, si31, a, emit_filler_nop); |
goetz@6458 | 74 | } |
goetz@6458 | 75 | |
goetz@6458 | 76 | void MacroAssembler::load_sized_value(Register dst, RegisterOrConstant offs, Register base, |
goetz@6458 | 77 | size_t size_in_bytes, bool is_signed) { |
goetz@6458 | 78 | switch (size_in_bytes) { |
goetz@6458 | 79 | case 8: ld(dst, offs, base); break; |
goetz@6458 | 80 | case 4: is_signed ? lwa(dst, offs, base) : lwz(dst, offs, base); break; |
goetz@6458 | 81 | case 2: is_signed ? lha(dst, offs, base) : lhz(dst, offs, base); break; |
goetz@6458 | 82 | case 1: lbz(dst, offs, base); if (is_signed) extsb(dst, dst); break; // lba doesn't exist :( |
goetz@6458 | 83 | default: ShouldNotReachHere(); |
goetz@6458 | 84 | } |
goetz@6458 | 85 | } |
goetz@6458 | 86 | |
goetz@6458 | 87 | void MacroAssembler::store_sized_value(Register dst, RegisterOrConstant offs, Register base, |
goetz@6458 | 88 | size_t size_in_bytes) { |
goetz@6458 | 89 | switch (size_in_bytes) { |
goetz@6458 | 90 | case 8: std(dst, offs, base); break; |
goetz@6458 | 91 | case 4: stw(dst, offs, base); break; |
goetz@6458 | 92 | case 2: sth(dst, offs, base); break; |
goetz@6458 | 93 | case 1: stb(dst, offs, base); break; |
goetz@6458 | 94 | default: ShouldNotReachHere(); |
goetz@6458 | 95 | } |
goetz@6458 | 96 | } |
goetz@6458 | 97 | |
goetz@6495 | 98 | void MacroAssembler::align(int modulus, int max, int rem) { |
goetz@6495 | 99 | int padding = (rem + modulus - (offset() % modulus)) % modulus; |
goetz@6495 | 100 | if (padding > max) return; |
goetz@6495 | 101 | for (int c = (padding >> 2); c > 0; --c) { nop(); } |
goetz@6458 | 102 | } |
goetz@6458 | 103 | |
goetz@6458 | 104 | // Issue instructions that calculate given TOC from global TOC. |
goetz@6458 | 105 | void MacroAssembler::calculate_address_from_global_toc(Register dst, address addr, bool hi16, bool lo16, |
goetz@6458 | 106 | bool add_relocation, bool emit_dummy_addr) { |
goetz@6458 | 107 | int offset = -1; |
goetz@6458 | 108 | if (emit_dummy_addr) { |
goetz@6458 | 109 | offset = -128; // dummy address |
goetz@6458 | 110 | } else if (addr != (address)(intptr_t)-1) { |
goetz@6458 | 111 | offset = MacroAssembler::offset_to_global_toc(addr); |
goetz@6458 | 112 | } |
goetz@6458 | 113 | |
goetz@6458 | 114 | if (hi16) { |
goetz@6458 | 115 | addis(dst, R29, MacroAssembler::largeoffset_si16_si16_hi(offset)); |
goetz@6458 | 116 | } |
goetz@6458 | 117 | if (lo16) { |
goetz@6458 | 118 | if (add_relocation) { |
goetz@6458 | 119 | // Relocate at the addi to avoid confusion with a load from the method's TOC. |
goetz@6458 | 120 | relocate(internal_word_Relocation::spec(addr)); |
goetz@6458 | 121 | } |
goetz@6458 | 122 | addi(dst, dst, MacroAssembler::largeoffset_si16_si16_lo(offset)); |
goetz@6458 | 123 | } |
goetz@6458 | 124 | } |
goetz@6458 | 125 | |
goetz@6458 | 126 | int MacroAssembler::patch_calculate_address_from_global_toc_at(address a, address bound, address addr) { |
goetz@6458 | 127 | const int offset = MacroAssembler::offset_to_global_toc(addr); |
goetz@6458 | 128 | |
goetz@6458 | 129 | const address inst2_addr = a; |
goetz@6458 | 130 | const int inst2 = *(int *)inst2_addr; |
goetz@6458 | 131 | |
goetz@6458 | 132 | // The relocation points to the second instruction, the addi, |
goetz@6458 | 133 | // and the addi reads and writes the same register dst. |
goetz@6458 | 134 | const int dst = inv_rt_field(inst2); |
goetz@6458 | 135 | assert(is_addi(inst2) && inv_ra_field(inst2) == dst, "must be addi reading and writing dst"); |
goetz@6458 | 136 | |
goetz@6458 | 137 | // Now, find the preceding addis which writes to dst. |
goetz@6458 | 138 | int inst1 = 0; |
goetz@6458 | 139 | address inst1_addr = inst2_addr - BytesPerInstWord; |
goetz@6458 | 140 | while (inst1_addr >= bound) { |
goetz@6458 | 141 | inst1 = *(int *) inst1_addr; |
goetz@6458 | 142 | if (is_addis(inst1) && inv_rt_field(inst1) == dst) { |
goetz@6458 | 143 | // Stop, found the addis which writes dst. |
goetz@6458 | 144 | break; |
goetz@6458 | 145 | } |
goetz@6458 | 146 | inst1_addr -= BytesPerInstWord; |
goetz@6458 | 147 | } |
goetz@6458 | 148 | |
goetz@6458 | 149 | assert(is_addis(inst1) && inv_ra_field(inst1) == 29 /* R29 */, "source must be global TOC"); |
goetz@6458 | 150 | set_imm((int *)inst1_addr, MacroAssembler::largeoffset_si16_si16_hi(offset)); |
goetz@6458 | 151 | set_imm((int *)inst2_addr, MacroAssembler::largeoffset_si16_si16_lo(offset)); |
goetz@6458 | 152 | return (int)((intptr_t)addr - (intptr_t)inst1_addr); |
goetz@6458 | 153 | } |
goetz@6458 | 154 | |
goetz@6458 | 155 | address MacroAssembler::get_address_of_calculate_address_from_global_toc_at(address a, address bound) { |
goetz@6458 | 156 | const address inst2_addr = a; |
goetz@6458 | 157 | const int inst2 = *(int *)inst2_addr; |
goetz@6458 | 158 | |
goetz@6458 | 159 | // The relocation points to the second instruction, the addi, |
goetz@6458 | 160 | // and the addi reads and writes the same register dst. |
goetz@6458 | 161 | const int dst = inv_rt_field(inst2); |
goetz@6458 | 162 | assert(is_addi(inst2) && inv_ra_field(inst2) == dst, "must be addi reading and writing dst"); |
goetz@6458 | 163 | |
goetz@6458 | 164 | // Now, find the preceding addis which writes to dst. |
goetz@6458 | 165 | int inst1 = 0; |
goetz@6458 | 166 | address inst1_addr = inst2_addr - BytesPerInstWord; |
goetz@6458 | 167 | while (inst1_addr >= bound) { |
goetz@6458 | 168 | inst1 = *(int *) inst1_addr; |
goetz@6458 | 169 | if (is_addis(inst1) && inv_rt_field(inst1) == dst) { |
goetz@6458 | 170 | // stop, found the addis which writes dst |
goetz@6458 | 171 | break; |
goetz@6458 | 172 | } |
goetz@6458 | 173 | inst1_addr -= BytesPerInstWord; |
goetz@6458 | 174 | } |
goetz@6458 | 175 | |
goetz@6458 | 176 | assert(is_addis(inst1) && inv_ra_field(inst1) == 29 /* R29 */, "source must be global TOC"); |
goetz@6458 | 177 | |
goetz@6458 | 178 | int offset = (get_imm(inst1_addr, 0) << 16) + get_imm(inst2_addr, 0); |
goetz@6458 | 179 | // -1 is a special case |
goetz@6458 | 180 | if (offset == -1) { |
goetz@6458 | 181 | return (address)(intptr_t)-1; |
goetz@6458 | 182 | } else { |
goetz@6458 | 183 | return global_toc() + offset; |
goetz@6458 | 184 | } |
goetz@6458 | 185 | } |
goetz@6458 | 186 | |
goetz@6458 | 187 | #ifdef _LP64 |
goetz@6458 | 188 | // Patch compressed oops or klass constants. |
goetz@6495 | 189 | // Assembler sequence is |
goetz@6495 | 190 | // 1) compressed oops: |
goetz@6495 | 191 | // lis rx = const.hi |
goetz@6495 | 192 | // ori rx = rx | const.lo |
goetz@6495 | 193 | // 2) compressed klass: |
goetz@6495 | 194 | // lis rx = const.hi |
goetz@6495 | 195 | // clrldi rx = rx & 0xFFFFffff // clearMS32b, optional |
goetz@6495 | 196 | // ori rx = rx | const.lo |
goetz@6495 | 197 | // Clrldi will be passed by. |
goetz@6458 | 198 | int MacroAssembler::patch_set_narrow_oop(address a, address bound, narrowOop data) { |
goetz@6458 | 199 | assert(UseCompressedOops, "Should only patch compressed oops"); |
goetz@6458 | 200 | |
goetz@6458 | 201 | const address inst2_addr = a; |
goetz@6458 | 202 | const int inst2 = *(int *)inst2_addr; |
goetz@6458 | 203 | |
goetz@6495 | 204 | // The relocation points to the second instruction, the ori, |
goetz@6495 | 205 | // and the ori reads and writes the same register dst. |
goetz@6495 | 206 | const int dst = inv_rta_field(inst2); |
goetz@6501 | 207 | assert(is_ori(inst2) && inv_rs_field(inst2) == dst, "must be ori reading and writing dst"); |
goetz@6458 | 208 | // Now, find the preceding addis which writes to dst. |
goetz@6458 | 209 | int inst1 = 0; |
goetz@6458 | 210 | address inst1_addr = inst2_addr - BytesPerInstWord; |
goetz@6458 | 211 | bool inst1_found = false; |
goetz@6458 | 212 | while (inst1_addr >= bound) { |
goetz@6458 | 213 | inst1 = *(int *)inst1_addr; |
goetz@6458 | 214 | if (is_lis(inst1) && inv_rs_field(inst1) == dst) { inst1_found = true; break; } |
goetz@6458 | 215 | inst1_addr -= BytesPerInstWord; |
goetz@6458 | 216 | } |
goetz@6458 | 217 | assert(inst1_found, "inst is not lis"); |
goetz@6458 | 218 | |
goetz@6458 | 219 | int xc = (data >> 16) & 0xffff; |
goetz@6458 | 220 | int xd = (data >> 0) & 0xffff; |
goetz@6458 | 221 | |
goetz@6495 | 222 | set_imm((int *)inst1_addr, (short)(xc)); // see enc_load_con_narrow_hi/_lo |
goetz@6501 | 223 | set_imm((int *)inst2_addr, (xd)); // unsigned int |
goetz@6458 | 224 | return (int)((intptr_t)inst2_addr - (intptr_t)inst1_addr); |
goetz@6458 | 225 | } |
goetz@6458 | 226 | |
goetz@6458 | 227 | // Get compressed oop or klass constant. |
goetz@6458 | 228 | narrowOop MacroAssembler::get_narrow_oop(address a, address bound) { |
goetz@6458 | 229 | assert(UseCompressedOops, "Should only patch compressed oops"); |
goetz@6458 | 230 | |
goetz@6458 | 231 | const address inst2_addr = a; |
goetz@6458 | 232 | const int inst2 = *(int *)inst2_addr; |
goetz@6458 | 233 | |
goetz@6495 | 234 | // The relocation points to the second instruction, the ori, |
goetz@6495 | 235 | // and the ori reads and writes the same register dst. |
goetz@6495 | 236 | const int dst = inv_rta_field(inst2); |
goetz@6501 | 237 | assert(is_ori(inst2) && inv_rs_field(inst2) == dst, "must be ori reading and writing dst"); |
goetz@6458 | 238 | // Now, find the preceding lis which writes to dst. |
goetz@6458 | 239 | int inst1 = 0; |
goetz@6458 | 240 | address inst1_addr = inst2_addr - BytesPerInstWord; |
goetz@6458 | 241 | bool inst1_found = false; |
goetz@6458 | 242 | |
goetz@6458 | 243 | while (inst1_addr >= bound) { |
goetz@6458 | 244 | inst1 = *(int *) inst1_addr; |
goetz@6458 | 245 | if (is_lis(inst1) && inv_rs_field(inst1) == dst) { inst1_found = true; break;} |
goetz@6458 | 246 | inst1_addr -= BytesPerInstWord; |
goetz@6458 | 247 | } |
goetz@6458 | 248 | assert(inst1_found, "inst is not lis"); |
goetz@6458 | 249 | |
goetz@6495 | 250 | uint xl = ((unsigned int) (get_imm(inst2_addr, 0) & 0xffff)); |
goetz@6495 | 251 | uint xh = (((get_imm(inst1_addr, 0)) & 0xffff) << 16); |
goetz@6495 | 252 | |
goetz@6458 | 253 | return (int) (xl | xh); |
goetz@6458 | 254 | } |
goetz@6458 | 255 | #endif // _LP64 |
goetz@6458 | 256 | |
goetz@6458 | 257 | void MacroAssembler::load_const_from_method_toc(Register dst, AddressLiteral& a, Register toc) { |
goetz@6458 | 258 | int toc_offset = 0; |
goetz@6458 | 259 | // Use RelocationHolder::none for the constant pool entry, otherwise |
goetz@6458 | 260 | // we will end up with a failing NativeCall::verify(x) where x is |
goetz@6458 | 261 | // the address of the constant pool entry. |
goetz@6458 | 262 | // FIXME: We should insert relocation information for oops at the constant |
goetz@6458 | 263 | // pool entries instead of inserting it at the loads; patching of a constant |
goetz@6458 | 264 | // pool entry should be less expensive. |
goetz@6495 | 265 | address oop_address = address_constant((address)a.value(), RelocationHolder::none); |
goetz@6495 | 266 | // Relocate at the pc of the load. |
goetz@6495 | 267 | relocate(a.rspec()); |
goetz@6495 | 268 | toc_offset = (int)(oop_address - code()->consts()->start()); |
goetz@6458 | 269 | ld_largeoffset_unchecked(dst, toc_offset, toc, true); |
goetz@6458 | 270 | } |
goetz@6458 | 271 | |
goetz@6458 | 272 | bool MacroAssembler::is_load_const_from_method_toc_at(address a) { |
goetz@6458 | 273 | const address inst1_addr = a; |
goetz@6458 | 274 | const int inst1 = *(int *)inst1_addr; |
goetz@6458 | 275 | |
goetz@6458 | 276 | // The relocation points to the ld or the addis. |
goetz@6458 | 277 | return (is_ld(inst1)) || |
goetz@6458 | 278 | (is_addis(inst1) && inv_ra_field(inst1) != 0); |
goetz@6458 | 279 | } |
goetz@6458 | 280 | |
goetz@6458 | 281 | int MacroAssembler::get_offset_of_load_const_from_method_toc_at(address a) { |
goetz@6458 | 282 | assert(is_load_const_from_method_toc_at(a), "must be load_const_from_method_toc"); |
goetz@6458 | 283 | |
goetz@6458 | 284 | const address inst1_addr = a; |
goetz@6458 | 285 | const int inst1 = *(int *)inst1_addr; |
goetz@6458 | 286 | |
goetz@6458 | 287 | if (is_ld(inst1)) { |
goetz@6458 | 288 | return inv_d1_field(inst1); |
goetz@6458 | 289 | } else if (is_addis(inst1)) { |
goetz@6458 | 290 | const int dst = inv_rt_field(inst1); |
goetz@6458 | 291 | |
goetz@6458 | 292 | // Now, find the succeeding ld which reads and writes to dst. |
goetz@6458 | 293 | address inst2_addr = inst1_addr + BytesPerInstWord; |
goetz@6458 | 294 | int inst2 = 0; |
goetz@6458 | 295 | while (true) { |
goetz@6458 | 296 | inst2 = *(int *) inst2_addr; |
goetz@6458 | 297 | if (is_ld(inst2) && inv_ra_field(inst2) == dst && inv_rt_field(inst2) == dst) { |
goetz@6458 | 298 | // Stop, found the ld which reads and writes dst. |
goetz@6458 | 299 | break; |
goetz@6458 | 300 | } |
goetz@6458 | 301 | inst2_addr += BytesPerInstWord; |
goetz@6458 | 302 | } |
goetz@6458 | 303 | return (inv_d1_field(inst1) << 16) + inv_d1_field(inst2); |
goetz@6458 | 304 | } |
goetz@6458 | 305 | ShouldNotReachHere(); |
goetz@6458 | 306 | return 0; |
goetz@6458 | 307 | } |
goetz@6458 | 308 | |
goetz@6458 | 309 | // Get the constant from a `load_const' sequence. |
goetz@6458 | 310 | long MacroAssembler::get_const(address a) { |
goetz@6458 | 311 | assert(is_load_const_at(a), "not a load of a constant"); |
goetz@6458 | 312 | const int *p = (const int*) a; |
goetz@6458 | 313 | unsigned long x = (((unsigned long) (get_imm(a,0) & 0xffff)) << 48); |
goetz@6458 | 314 | if (is_ori(*(p+1))) { |
goetz@6458 | 315 | x |= (((unsigned long) (get_imm(a,1) & 0xffff)) << 32); |
goetz@6458 | 316 | x |= (((unsigned long) (get_imm(a,3) & 0xffff)) << 16); |
goetz@6458 | 317 | x |= (((unsigned long) (get_imm(a,4) & 0xffff))); |
goetz@6458 | 318 | } else if (is_lis(*(p+1))) { |
goetz@6458 | 319 | x |= (((unsigned long) (get_imm(a,2) & 0xffff)) << 32); |
goetz@6458 | 320 | x |= (((unsigned long) (get_imm(a,1) & 0xffff)) << 16); |
goetz@6458 | 321 | x |= (((unsigned long) (get_imm(a,3) & 0xffff))); |
goetz@6458 | 322 | } else { |
goetz@6458 | 323 | ShouldNotReachHere(); |
goetz@6458 | 324 | return (long) 0; |
goetz@6458 | 325 | } |
goetz@6458 | 326 | return (long) x; |
goetz@6458 | 327 | } |
goetz@6458 | 328 | |
goetz@6458 | 329 | // Patch the 64 bit constant of a `load_const' sequence. This is a low |
goetz@6458 | 330 | // level procedure. It neither flushes the instruction cache nor is it |
goetz@6458 | 331 | // mt safe. |
goetz@6458 | 332 | void MacroAssembler::patch_const(address a, long x) { |
goetz@6458 | 333 | assert(is_load_const_at(a), "not a load of a constant"); |
goetz@6458 | 334 | int *p = (int*) a; |
goetz@6458 | 335 | if (is_ori(*(p+1))) { |
goetz@6458 | 336 | set_imm(0 + p, (x >> 48) & 0xffff); |
goetz@6458 | 337 | set_imm(1 + p, (x >> 32) & 0xffff); |
goetz@6458 | 338 | set_imm(3 + p, (x >> 16) & 0xffff); |
goetz@6458 | 339 | set_imm(4 + p, x & 0xffff); |
goetz@6458 | 340 | } else if (is_lis(*(p+1))) { |
goetz@6458 | 341 | set_imm(0 + p, (x >> 48) & 0xffff); |
goetz@6458 | 342 | set_imm(2 + p, (x >> 32) & 0xffff); |
goetz@6458 | 343 | set_imm(1 + p, (x >> 16) & 0xffff); |
goetz@6458 | 344 | set_imm(3 + p, x & 0xffff); |
goetz@6458 | 345 | } else { |
goetz@6458 | 346 | ShouldNotReachHere(); |
goetz@6458 | 347 | } |
goetz@6458 | 348 | } |
goetz@6458 | 349 | |
goetz@6458 | 350 | AddressLiteral MacroAssembler::allocate_metadata_address(Metadata* obj) { |
goetz@6458 | 351 | assert(oop_recorder() != NULL, "this assembler needs a Recorder"); |
goetz@6458 | 352 | int index = oop_recorder()->allocate_metadata_index(obj); |
goetz@6458 | 353 | RelocationHolder rspec = metadata_Relocation::spec(index); |
goetz@6458 | 354 | return AddressLiteral((address)obj, rspec); |
goetz@6458 | 355 | } |
goetz@6458 | 356 | |
goetz@6458 | 357 | AddressLiteral MacroAssembler::constant_metadata_address(Metadata* obj) { |
goetz@6458 | 358 | assert(oop_recorder() != NULL, "this assembler needs a Recorder"); |
goetz@6458 | 359 | int index = oop_recorder()->find_index(obj); |
goetz@6458 | 360 | RelocationHolder rspec = metadata_Relocation::spec(index); |
goetz@6458 | 361 | return AddressLiteral((address)obj, rspec); |
goetz@6458 | 362 | } |
goetz@6458 | 363 | |
goetz@6458 | 364 | AddressLiteral MacroAssembler::allocate_oop_address(jobject obj) { |
goetz@6458 | 365 | assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); |
goetz@6458 | 366 | int oop_index = oop_recorder()->allocate_oop_index(obj); |
goetz@6458 | 367 | return AddressLiteral(address(obj), oop_Relocation::spec(oop_index)); |
goetz@6458 | 368 | } |
goetz@6458 | 369 | |
goetz@6458 | 370 | AddressLiteral MacroAssembler::constant_oop_address(jobject obj) { |
goetz@6458 | 371 | assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); |
goetz@6458 | 372 | int oop_index = oop_recorder()->find_index(obj); |
goetz@6458 | 373 | return AddressLiteral(address(obj), oop_Relocation::spec(oop_index)); |
goetz@6458 | 374 | } |
goetz@6458 | 375 | |
goetz@6458 | 376 | RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr, |
goetz@6458 | 377 | Register tmp, int offset) { |
goetz@6458 | 378 | intptr_t value = *delayed_value_addr; |
goetz@6458 | 379 | if (value != 0) { |
goetz@6458 | 380 | return RegisterOrConstant(value + offset); |
goetz@6458 | 381 | } |
goetz@6458 | 382 | |
goetz@6458 | 383 | // Load indirectly to solve generation ordering problem. |
goetz@6458 | 384 | // static address, no relocation |
goetz@6458 | 385 | int simm16_offset = load_const_optimized(tmp, delayed_value_addr, noreg, true); |
goetz@6458 | 386 | ld(tmp, simm16_offset, tmp); // must be aligned ((xa & 3) == 0) |
goetz@6458 | 387 | |
goetz@6458 | 388 | if (offset != 0) { |
goetz@6458 | 389 | addi(tmp, tmp, offset); |
goetz@6458 | 390 | } |
goetz@6458 | 391 | |
goetz@6458 | 392 | return RegisterOrConstant(tmp); |
goetz@6458 | 393 | } |
goetz@6458 | 394 | |
goetz@6458 | 395 | #ifndef PRODUCT |
goetz@6458 | 396 | void MacroAssembler::pd_print_patched_instruction(address branch) { |
goetz@6458 | 397 | Unimplemented(); // TODO: PPC port |
goetz@6458 | 398 | } |
goetz@6458 | 399 | #endif // ndef PRODUCT |
goetz@6458 | 400 | |
goetz@6458 | 401 | // Conditional far branch for destinations encodable in 24+2 bits. |
goetz@6458 | 402 | void MacroAssembler::bc_far(int boint, int biint, Label& dest, int optimize) { |
goetz@6458 | 403 | |
goetz@6458 | 404 | // If requested by flag optimize, relocate the bc_far as a |
goetz@6458 | 405 | // runtime_call and prepare for optimizing it when the code gets |
goetz@6458 | 406 | // relocated. |
goetz@6458 | 407 | if (optimize == bc_far_optimize_on_relocate) { |
goetz@6458 | 408 | relocate(relocInfo::runtime_call_type); |
goetz@6458 | 409 | } |
goetz@6458 | 410 | |
goetz@6458 | 411 | // variant 2: |
goetz@6458 | 412 | // |
goetz@6458 | 413 | // b!cxx SKIP |
goetz@6458 | 414 | // bxx DEST |
goetz@6458 | 415 | // SKIP: |
goetz@6458 | 416 | // |
goetz@6458 | 417 | |
goetz@6458 | 418 | const int opposite_boint = add_bhint_to_boint(opposite_bhint(inv_boint_bhint(boint)), |
goetz@6458 | 419 | opposite_bcond(inv_boint_bcond(boint))); |
goetz@6458 | 420 | |
goetz@6458 | 421 | // We emit two branches. |
goetz@6458 | 422 | // First, a conditional branch which jumps around the far branch. |
goetz@6458 | 423 | const address not_taken_pc = pc() + 2 * BytesPerInstWord; |
goetz@6458 | 424 | const address bc_pc = pc(); |
goetz@6458 | 425 | bc(opposite_boint, biint, not_taken_pc); |
goetz@6458 | 426 | |
goetz@6458 | 427 | const int bc_instr = *(int*)bc_pc; |
goetz@6458 | 428 | assert(not_taken_pc == (address)inv_bd_field(bc_instr, (intptr_t)bc_pc), "postcondition"); |
goetz@6458 | 429 | assert(opposite_boint == inv_bo_field(bc_instr), "postcondition"); |
goetz@6458 | 430 | assert(boint == add_bhint_to_boint(opposite_bhint(inv_boint_bhint(inv_bo_field(bc_instr))), |
goetz@6458 | 431 | opposite_bcond(inv_boint_bcond(inv_bo_field(bc_instr)))), |
goetz@6458 | 432 | "postcondition"); |
goetz@6458 | 433 | assert(biint == inv_bi_field(bc_instr), "postcondition"); |
goetz@6458 | 434 | |
goetz@6458 | 435 | // Second, an unconditional far branch which jumps to dest. |
goetz@6458 | 436 | // Note: target(dest) remembers the current pc (see CodeSection::target) |
goetz@6458 | 437 | // and returns the current pc if the label is not bound yet; when |
goetz@6458 | 438 | // the label gets bound, the unconditional far branch will be patched. |
goetz@6458 | 439 | const address target_pc = target(dest); |
goetz@6458 | 440 | const address b_pc = pc(); |
goetz@6458 | 441 | b(target_pc); |
goetz@6458 | 442 | |
goetz@6458 | 443 | assert(not_taken_pc == pc(), "postcondition"); |
goetz@6458 | 444 | assert(dest.is_bound() || target_pc == b_pc, "postcondition"); |
goetz@6458 | 445 | } |
goetz@6458 | 446 | |
goetz@6458 | 447 | bool MacroAssembler::is_bc_far_at(address instruction_addr) { |
goetz@6458 | 448 | return is_bc_far_variant1_at(instruction_addr) || |
goetz@6458 | 449 | is_bc_far_variant2_at(instruction_addr) || |
goetz@6458 | 450 | is_bc_far_variant3_at(instruction_addr); |
goetz@6458 | 451 | } |
goetz@6458 | 452 | |
goetz@6458 | 453 | address MacroAssembler::get_dest_of_bc_far_at(address instruction_addr) { |
goetz@6458 | 454 | if (is_bc_far_variant1_at(instruction_addr)) { |
goetz@6458 | 455 | const address instruction_1_addr = instruction_addr; |
goetz@6458 | 456 | const int instruction_1 = *(int*)instruction_1_addr; |
goetz@6458 | 457 | return (address)inv_bd_field(instruction_1, (intptr_t)instruction_1_addr); |
goetz@6458 | 458 | } else if (is_bc_far_variant2_at(instruction_addr)) { |
goetz@6458 | 459 | const address instruction_2_addr = instruction_addr + 4; |
goetz@6458 | 460 | return bxx_destination(instruction_2_addr); |
goetz@6458 | 461 | } else if (is_bc_far_variant3_at(instruction_addr)) { |
goetz@6458 | 462 | return instruction_addr + 8; |
goetz@6458 | 463 | } |
goetz@6458 | 464 | // variant 4 ??? |
goetz@6458 | 465 | ShouldNotReachHere(); |
goetz@6458 | 466 | return NULL; |
goetz@6458 | 467 | } |
goetz@6458 | 468 | void MacroAssembler::set_dest_of_bc_far_at(address instruction_addr, address dest) { |
goetz@6458 | 469 | |
goetz@6458 | 470 | if (is_bc_far_variant3_at(instruction_addr)) { |
goetz@6458 | 471 | // variant 3, far cond branch to the next instruction, already patched to nops: |
goetz@6458 | 472 | // |
goetz@6458 | 473 | // nop |
goetz@6458 | 474 | // endgroup |
goetz@6458 | 475 | // SKIP/DEST: |
goetz@6458 | 476 | // |
goetz@6458 | 477 | return; |
goetz@6458 | 478 | } |
goetz@6458 | 479 | |
goetz@6458 | 480 | // first, extract boint and biint from the current branch |
goetz@6458 | 481 | int boint = 0; |
goetz@6458 | 482 | int biint = 0; |
goetz@6458 | 483 | |
goetz@6458 | 484 | ResourceMark rm; |
goetz@6458 | 485 | const int code_size = 2 * BytesPerInstWord; |
goetz@6458 | 486 | CodeBuffer buf(instruction_addr, code_size); |
goetz@6458 | 487 | MacroAssembler masm(&buf); |
goetz@6458 | 488 | if (is_bc_far_variant2_at(instruction_addr) && dest == instruction_addr + 8) { |
goetz@6458 | 489 | // Far branch to next instruction: Optimize it by patching nops (produce variant 3). |
goetz@6458 | 490 | masm.nop(); |
goetz@6458 | 491 | masm.endgroup(); |
goetz@6458 | 492 | } else { |
goetz@6458 | 493 | if (is_bc_far_variant1_at(instruction_addr)) { |
goetz@6458 | 494 | // variant 1, the 1st instruction contains the destination address: |
goetz@6458 | 495 | // |
goetz@6458 | 496 | // bcxx DEST |
goetz@6458 | 497 | // endgroup |
goetz@6458 | 498 | // |
goetz@6458 | 499 | const int instruction_1 = *(int*)(instruction_addr); |
goetz@6458 | 500 | boint = inv_bo_field(instruction_1); |
goetz@6458 | 501 | biint = inv_bi_field(instruction_1); |
goetz@6458 | 502 | } else if (is_bc_far_variant2_at(instruction_addr)) { |
goetz@6458 | 503 | // variant 2, the 2nd instruction contains the destination address: |
goetz@6458 | 504 | // |
goetz@6458 | 505 | // b!cxx SKIP |
goetz@6458 | 506 | // bxx DEST |
goetz@6458 | 507 | // SKIP: |
goetz@6458 | 508 | // |
goetz@6458 | 509 | const int instruction_1 = *(int*)(instruction_addr); |
goetz@6458 | 510 | boint = add_bhint_to_boint(opposite_bhint(inv_boint_bhint(inv_bo_field(instruction_1))), |
goetz@6458 | 511 | opposite_bcond(inv_boint_bcond(inv_bo_field(instruction_1)))); |
goetz@6458 | 512 | biint = inv_bi_field(instruction_1); |
goetz@6458 | 513 | } else { |
goetz@6458 | 514 | // variant 4??? |
goetz@6458 | 515 | ShouldNotReachHere(); |
goetz@6458 | 516 | } |
goetz@6458 | 517 | |
goetz@6458 | 518 | // second, set the new branch destination and optimize the code |
goetz@6458 | 519 | if (dest != instruction_addr + 4 && // the bc_far is still unbound! |
goetz@6458 | 520 | masm.is_within_range_of_bcxx(dest, instruction_addr)) { |
goetz@6458 | 521 | // variant 1: |
goetz@6458 | 522 | // |
goetz@6458 | 523 | // bcxx DEST |
goetz@6458 | 524 | // endgroup |
goetz@6458 | 525 | // |
goetz@6458 | 526 | masm.bc(boint, biint, dest); |
goetz@6458 | 527 | masm.endgroup(); |
goetz@6458 | 528 | } else { |
goetz@6458 | 529 | // variant 2: |
goetz@6458 | 530 | // |
goetz@6458 | 531 | // b!cxx SKIP |
goetz@6458 | 532 | // bxx DEST |
goetz@6458 | 533 | // SKIP: |
goetz@6458 | 534 | // |
goetz@6458 | 535 | const int opposite_boint = add_bhint_to_boint(opposite_bhint(inv_boint_bhint(boint)), |
goetz@6458 | 536 | opposite_bcond(inv_boint_bcond(boint))); |
goetz@6458 | 537 | const address not_taken_pc = masm.pc() + 2 * BytesPerInstWord; |
goetz@6458 | 538 | masm.bc(opposite_boint, biint, not_taken_pc); |
goetz@6458 | 539 | masm.b(dest); |
goetz@6458 | 540 | } |
goetz@6458 | 541 | } |
goetz@6495 | 542 | ICache::ppc64_flush_icache_bytes(instruction_addr, code_size); |
goetz@6458 | 543 | } |
goetz@6458 | 544 | |
goetz@6458 | 545 | // Emit a NOT mt-safe patchable 64 bit absolute call/jump. |
goetz@6458 | 546 | void MacroAssembler::bxx64_patchable(address dest, relocInfo::relocType rt, bool link) { |
goetz@6458 | 547 | // get current pc |
goetz@6458 | 548 | uint64_t start_pc = (uint64_t) pc(); |
goetz@6458 | 549 | |
goetz@6458 | 550 | const address pc_of_bl = (address) (start_pc + (6*BytesPerInstWord)); // bl is last |
goetz@6458 | 551 | const address pc_of_b = (address) (start_pc + (0*BytesPerInstWord)); // b is first |
goetz@6458 | 552 | |
goetz@6458 | 553 | // relocate here |
goetz@6458 | 554 | if (rt != relocInfo::none) { |
goetz@6458 | 555 | relocate(rt); |
goetz@6458 | 556 | } |
goetz@6458 | 557 | |
goetz@6458 | 558 | if ( ReoptimizeCallSequences && |
goetz@6458 | 559 | (( link && is_within_range_of_b(dest, pc_of_bl)) || |
goetz@6458 | 560 | (!link && is_within_range_of_b(dest, pc_of_b)))) { |
goetz@6458 | 561 | // variant 2: |
goetz@6458 | 562 | // Emit an optimized, pc-relative call/jump. |
goetz@6458 | 563 | |
goetz@6458 | 564 | if (link) { |
goetz@6458 | 565 | // some padding |
goetz@6458 | 566 | nop(); |
goetz@6458 | 567 | nop(); |
goetz@6458 | 568 | nop(); |
goetz@6458 | 569 | nop(); |
goetz@6458 | 570 | nop(); |
goetz@6458 | 571 | nop(); |
goetz@6458 | 572 | |
goetz@6458 | 573 | // do the call |
goetz@6458 | 574 | assert(pc() == pc_of_bl, "just checking"); |
goetz@6458 | 575 | bl(dest, relocInfo::none); |
goetz@6458 | 576 | } else { |
goetz@6458 | 577 | // do the jump |
goetz@6458 | 578 | assert(pc() == pc_of_b, "just checking"); |
goetz@6458 | 579 | b(dest, relocInfo::none); |
goetz@6458 | 580 | |
goetz@6458 | 581 | // some padding |
goetz@6458 | 582 | nop(); |
goetz@6458 | 583 | nop(); |
goetz@6458 | 584 | nop(); |
goetz@6458 | 585 | nop(); |
goetz@6458 | 586 | nop(); |
goetz@6458 | 587 | nop(); |
goetz@6458 | 588 | } |
goetz@6458 | 589 | |
goetz@6458 | 590 | // Assert that we can identify the emitted call/jump. |
goetz@6458 | 591 | assert(is_bxx64_patchable_variant2_at((address)start_pc, link), |
goetz@6458 | 592 | "can't identify emitted call"); |
goetz@6458 | 593 | } else { |
goetz@6458 | 594 | // variant 1: |
goetz@6511 | 595 | #if defined(ABI_ELFv2) |
goetz@6511 | 596 | nop(); |
goetz@6511 | 597 | calculate_address_from_global_toc(R12, dest, true, true, false); |
goetz@6511 | 598 | mtctr(R12); |
goetz@6511 | 599 | nop(); |
goetz@6511 | 600 | nop(); |
goetz@6511 | 601 | #else |
goetz@6458 | 602 | mr(R0, R11); // spill R11 -> R0. |
goetz@6458 | 603 | |
goetz@6458 | 604 | // Load the destination address into CTR, |
goetz@6458 | 605 | // calculate destination relative to global toc. |
goetz@6458 | 606 | calculate_address_from_global_toc(R11, dest, true, true, false); |
goetz@6458 | 607 | |
goetz@6458 | 608 | mtctr(R11); |
goetz@6458 | 609 | mr(R11, R0); // spill R11 <- R0. |
goetz@6458 | 610 | nop(); |
goetz@6511 | 611 | #endif |
goetz@6458 | 612 | |
goetz@6458 | 613 | // do the call/jump |
goetz@6458 | 614 | if (link) { |
goetz@6458 | 615 | bctrl(); |
goetz@6458 | 616 | } else{ |
goetz@6458 | 617 | bctr(); |
goetz@6458 | 618 | } |
goetz@6458 | 619 | // Assert that we can identify the emitted call/jump. |
goetz@6458 | 620 | assert(is_bxx64_patchable_variant1b_at((address)start_pc, link), |
goetz@6458 | 621 | "can't identify emitted call"); |
goetz@6458 | 622 | } |
goetz@6458 | 623 | |
goetz@6458 | 624 | // Assert that we can identify the emitted call/jump. |
goetz@6458 | 625 | assert(is_bxx64_patchable_at((address)start_pc, link), |
goetz@6458 | 626 | "can't identify emitted call"); |
goetz@6458 | 627 | assert(get_dest_of_bxx64_patchable_at((address)start_pc, link) == dest, |
goetz@6458 | 628 | "wrong encoding of dest address"); |
goetz@6458 | 629 | } |
goetz@6458 | 630 | |
goetz@6458 | 631 | // Identify a bxx64_patchable instruction. |
goetz@6458 | 632 | bool MacroAssembler::is_bxx64_patchable_at(address instruction_addr, bool link) { |
goetz@6458 | 633 | return is_bxx64_patchable_variant1b_at(instruction_addr, link) |
goetz@6458 | 634 | //|| is_bxx64_patchable_variant1_at(instruction_addr, link) |
goetz@6458 | 635 | || is_bxx64_patchable_variant2_at(instruction_addr, link); |
goetz@6458 | 636 | } |
goetz@6458 | 637 | |
goetz@6458 | 638 | // Does the call64_patchable instruction use a pc-relative encoding of |
goetz@6458 | 639 | // the call destination? |
goetz@6458 | 640 | bool MacroAssembler::is_bxx64_patchable_pcrelative_at(address instruction_addr, bool link) { |
goetz@6458 | 641 | // variant 2 is pc-relative |
goetz@6458 | 642 | return is_bxx64_patchable_variant2_at(instruction_addr, link); |
goetz@6458 | 643 | } |
goetz@6458 | 644 | |
goetz@6458 | 645 | // Identify variant 1. |
goetz@6458 | 646 | bool MacroAssembler::is_bxx64_patchable_variant1_at(address instruction_addr, bool link) { |
goetz@6458 | 647 | unsigned int* instr = (unsigned int*) instruction_addr; |
goetz@6458 | 648 | return (link ? is_bctrl(instr[6]) : is_bctr(instr[6])) // bctr[l] |
goetz@6458 | 649 | && is_mtctr(instr[5]) // mtctr |
goetz@6458 | 650 | && is_load_const_at(instruction_addr); |
goetz@6458 | 651 | } |
goetz@6458 | 652 | |
goetz@6458 | 653 | // Identify variant 1b: load destination relative to global toc. |
goetz@6458 | 654 | bool MacroAssembler::is_bxx64_patchable_variant1b_at(address instruction_addr, bool link) { |
goetz@6458 | 655 | unsigned int* instr = (unsigned int*) instruction_addr; |
goetz@6458 | 656 | return (link ? is_bctrl(instr[6]) : is_bctr(instr[6])) // bctr[l] |
goetz@6458 | 657 | && is_mtctr(instr[3]) // mtctr |
goetz@6458 | 658 | && is_calculate_address_from_global_toc_at(instruction_addr + 2*BytesPerInstWord, instruction_addr); |
goetz@6458 | 659 | } |
goetz@6458 | 660 | |
goetz@6458 | 661 | // Identify variant 2. |
goetz@6458 | 662 | bool MacroAssembler::is_bxx64_patchable_variant2_at(address instruction_addr, bool link) { |
goetz@6458 | 663 | unsigned int* instr = (unsigned int*) instruction_addr; |
goetz@6458 | 664 | if (link) { |
goetz@6458 | 665 | return is_bl (instr[6]) // bl dest is last |
goetz@6458 | 666 | && is_nop(instr[0]) // nop |
goetz@6458 | 667 | && is_nop(instr[1]) // nop |
goetz@6458 | 668 | && is_nop(instr[2]) // nop |
goetz@6458 | 669 | && is_nop(instr[3]) // nop |
goetz@6458 | 670 | && is_nop(instr[4]) // nop |
goetz@6458 | 671 | && is_nop(instr[5]); // nop |
goetz@6458 | 672 | } else { |
goetz@6458 | 673 | return is_b (instr[0]) // b dest is first |
goetz@6458 | 674 | && is_nop(instr[1]) // nop |
goetz@6458 | 675 | && is_nop(instr[2]) // nop |
goetz@6458 | 676 | && is_nop(instr[3]) // nop |
goetz@6458 | 677 | && is_nop(instr[4]) // nop |
goetz@6458 | 678 | && is_nop(instr[5]) // nop |
goetz@6458 | 679 | && is_nop(instr[6]); // nop |
goetz@6458 | 680 | } |
goetz@6458 | 681 | } |
goetz@6458 | 682 | |
goetz@6458 | 683 | // Set dest address of a bxx64_patchable instruction. |
goetz@6458 | 684 | void MacroAssembler::set_dest_of_bxx64_patchable_at(address instruction_addr, address dest, bool link) { |
goetz@6458 | 685 | ResourceMark rm; |
goetz@6458 | 686 | int code_size = MacroAssembler::bxx64_patchable_size; |
goetz@6458 | 687 | CodeBuffer buf(instruction_addr, code_size); |
goetz@6458 | 688 | MacroAssembler masm(&buf); |
goetz@6458 | 689 | masm.bxx64_patchable(dest, relocInfo::none, link); |
goetz@6495 | 690 | ICache::ppc64_flush_icache_bytes(instruction_addr, code_size); |
goetz@6458 | 691 | } |
goetz@6458 | 692 | |
goetz@6458 | 693 | // Get dest address of a bxx64_patchable instruction. |
goetz@6458 | 694 | address MacroAssembler::get_dest_of_bxx64_patchable_at(address instruction_addr, bool link) { |
goetz@6458 | 695 | if (is_bxx64_patchable_variant1_at(instruction_addr, link)) { |
goetz@6458 | 696 | return (address) (unsigned long) get_const(instruction_addr); |
goetz@6458 | 697 | } else if (is_bxx64_patchable_variant2_at(instruction_addr, link)) { |
goetz@6458 | 698 | unsigned int* instr = (unsigned int*) instruction_addr; |
goetz@6458 | 699 | if (link) { |
goetz@6458 | 700 | const int instr_idx = 6; // bl is last |
goetz@6458 | 701 | int branchoffset = branch_destination(instr[instr_idx], 0); |
goetz@6458 | 702 | return instruction_addr + branchoffset + instr_idx*BytesPerInstWord; |
goetz@6458 | 703 | } else { |
goetz@6458 | 704 | const int instr_idx = 0; // b is first |
goetz@6458 | 705 | int branchoffset = branch_destination(instr[instr_idx], 0); |
goetz@6458 | 706 | return instruction_addr + branchoffset + instr_idx*BytesPerInstWord; |
goetz@6458 | 707 | } |
goetz@6458 | 708 | // Load dest relative to global toc. |
goetz@6458 | 709 | } else if (is_bxx64_patchable_variant1b_at(instruction_addr, link)) { |
goetz@6458 | 710 | return get_address_of_calculate_address_from_global_toc_at(instruction_addr + 2*BytesPerInstWord, |
goetz@6458 | 711 | instruction_addr); |
goetz@6458 | 712 | } else { |
goetz@6458 | 713 | ShouldNotReachHere(); |
goetz@6458 | 714 | return NULL; |
goetz@6458 | 715 | } |
goetz@6458 | 716 | } |
goetz@6458 | 717 | |
goetz@6458 | 718 | // Uses ordering which corresponds to ABI: |
goetz@6458 | 719 | // _savegpr0_14: std r14,-144(r1) |
goetz@6458 | 720 | // _savegpr0_15: std r15,-136(r1) |
goetz@6458 | 721 | // _savegpr0_16: std r16,-128(r1) |
goetz@6458 | 722 | void MacroAssembler::save_nonvolatile_gprs(Register dst, int offset) { |
goetz@6458 | 723 | std(R14, offset, dst); offset += 8; |
goetz@6458 | 724 | std(R15, offset, dst); offset += 8; |
goetz@6458 | 725 | std(R16, offset, dst); offset += 8; |
goetz@6458 | 726 | std(R17, offset, dst); offset += 8; |
goetz@6458 | 727 | std(R18, offset, dst); offset += 8; |
goetz@6458 | 728 | std(R19, offset, dst); offset += 8; |
goetz@6458 | 729 | std(R20, offset, dst); offset += 8; |
goetz@6458 | 730 | std(R21, offset, dst); offset += 8; |
goetz@6458 | 731 | std(R22, offset, dst); offset += 8; |
goetz@6458 | 732 | std(R23, offset, dst); offset += 8; |
goetz@6458 | 733 | std(R24, offset, dst); offset += 8; |
goetz@6458 | 734 | std(R25, offset, dst); offset += 8; |
goetz@6458 | 735 | std(R26, offset, dst); offset += 8; |
goetz@6458 | 736 | std(R27, offset, dst); offset += 8; |
goetz@6458 | 737 | std(R28, offset, dst); offset += 8; |
goetz@6458 | 738 | std(R29, offset, dst); offset += 8; |
goetz@6458 | 739 | std(R30, offset, dst); offset += 8; |
goetz@6458 | 740 | std(R31, offset, dst); offset += 8; |
goetz@6458 | 741 | |
goetz@6458 | 742 | stfd(F14, offset, dst); offset += 8; |
goetz@6458 | 743 | stfd(F15, offset, dst); offset += 8; |
goetz@6458 | 744 | stfd(F16, offset, dst); offset += 8; |
goetz@6458 | 745 | stfd(F17, offset, dst); offset += 8; |
goetz@6458 | 746 | stfd(F18, offset, dst); offset += 8; |
goetz@6458 | 747 | stfd(F19, offset, dst); offset += 8; |
goetz@6458 | 748 | stfd(F20, offset, dst); offset += 8; |
goetz@6458 | 749 | stfd(F21, offset, dst); offset += 8; |
goetz@6458 | 750 | stfd(F22, offset, dst); offset += 8; |
goetz@6458 | 751 | stfd(F23, offset, dst); offset += 8; |
goetz@6458 | 752 | stfd(F24, offset, dst); offset += 8; |
goetz@6458 | 753 | stfd(F25, offset, dst); offset += 8; |
goetz@6458 | 754 | stfd(F26, offset, dst); offset += 8; |
goetz@6458 | 755 | stfd(F27, offset, dst); offset += 8; |
goetz@6458 | 756 | stfd(F28, offset, dst); offset += 8; |
goetz@6458 | 757 | stfd(F29, offset, dst); offset += 8; |
goetz@6458 | 758 | stfd(F30, offset, dst); offset += 8; |
goetz@6458 | 759 | stfd(F31, offset, dst); |
goetz@6458 | 760 | } |
goetz@6458 | 761 | |
goetz@6458 | 762 | // Uses ordering which corresponds to ABI: |
goetz@6458 | 763 | // _restgpr0_14: ld r14,-144(r1) |
goetz@6458 | 764 | // _restgpr0_15: ld r15,-136(r1) |
goetz@6458 | 765 | // _restgpr0_16: ld r16,-128(r1) |
goetz@6458 | 766 | void MacroAssembler::restore_nonvolatile_gprs(Register src, int offset) { |
goetz@6458 | 767 | ld(R14, offset, src); offset += 8; |
goetz@6458 | 768 | ld(R15, offset, src); offset += 8; |
goetz@6458 | 769 | ld(R16, offset, src); offset += 8; |
goetz@6458 | 770 | ld(R17, offset, src); offset += 8; |
goetz@6458 | 771 | ld(R18, offset, src); offset += 8; |
goetz@6458 | 772 | ld(R19, offset, src); offset += 8; |
goetz@6458 | 773 | ld(R20, offset, src); offset += 8; |
goetz@6458 | 774 | ld(R21, offset, src); offset += 8; |
goetz@6458 | 775 | ld(R22, offset, src); offset += 8; |
goetz@6458 | 776 | ld(R23, offset, src); offset += 8; |
goetz@6458 | 777 | ld(R24, offset, src); offset += 8; |
goetz@6458 | 778 | ld(R25, offset, src); offset += 8; |
goetz@6458 | 779 | ld(R26, offset, src); offset += 8; |
goetz@6458 | 780 | ld(R27, offset, src); offset += 8; |
goetz@6458 | 781 | ld(R28, offset, src); offset += 8; |
goetz@6458 | 782 | ld(R29, offset, src); offset += 8; |
goetz@6458 | 783 | ld(R30, offset, src); offset += 8; |
goetz@6458 | 784 | ld(R31, offset, src); offset += 8; |
goetz@6458 | 785 | |
goetz@6458 | 786 | // FP registers |
goetz@6458 | 787 | lfd(F14, offset, src); offset += 8; |
goetz@6458 | 788 | lfd(F15, offset, src); offset += 8; |
goetz@6458 | 789 | lfd(F16, offset, src); offset += 8; |
goetz@6458 | 790 | lfd(F17, offset, src); offset += 8; |
goetz@6458 | 791 | lfd(F18, offset, src); offset += 8; |
goetz@6458 | 792 | lfd(F19, offset, src); offset += 8; |
goetz@6458 | 793 | lfd(F20, offset, src); offset += 8; |
goetz@6458 | 794 | lfd(F21, offset, src); offset += 8; |
goetz@6458 | 795 | lfd(F22, offset, src); offset += 8; |
goetz@6458 | 796 | lfd(F23, offset, src); offset += 8; |
goetz@6458 | 797 | lfd(F24, offset, src); offset += 8; |
goetz@6458 | 798 | lfd(F25, offset, src); offset += 8; |
goetz@6458 | 799 | lfd(F26, offset, src); offset += 8; |
goetz@6458 | 800 | lfd(F27, offset, src); offset += 8; |
goetz@6458 | 801 | lfd(F28, offset, src); offset += 8; |
goetz@6458 | 802 | lfd(F29, offset, src); offset += 8; |
goetz@6458 | 803 | lfd(F30, offset, src); offset += 8; |
goetz@6458 | 804 | lfd(F31, offset, src); |
goetz@6458 | 805 | } |
goetz@6458 | 806 | |
goetz@6458 | 807 | // For verify_oops. |
goetz@6458 | 808 | void MacroAssembler::save_volatile_gprs(Register dst, int offset) { |
goetz@6458 | 809 | std(R3, offset, dst); offset += 8; |
goetz@6458 | 810 | std(R4, offset, dst); offset += 8; |
goetz@6458 | 811 | std(R5, offset, dst); offset += 8; |
goetz@6458 | 812 | std(R6, offset, dst); offset += 8; |
goetz@6458 | 813 | std(R7, offset, dst); offset += 8; |
goetz@6458 | 814 | std(R8, offset, dst); offset += 8; |
goetz@6458 | 815 | std(R9, offset, dst); offset += 8; |
goetz@6458 | 816 | std(R10, offset, dst); offset += 8; |
goetz@6458 | 817 | std(R11, offset, dst); offset += 8; |
goetz@6458 | 818 | std(R12, offset, dst); |
goetz@6458 | 819 | } |
goetz@6458 | 820 | |
goetz@6458 | 821 | // For verify_oops. |
goetz@6458 | 822 | void MacroAssembler::restore_volatile_gprs(Register src, int offset) { |
goetz@6458 | 823 | ld(R3, offset, src); offset += 8; |
goetz@6458 | 824 | ld(R4, offset, src); offset += 8; |
goetz@6458 | 825 | ld(R5, offset, src); offset += 8; |
goetz@6458 | 826 | ld(R6, offset, src); offset += 8; |
goetz@6458 | 827 | ld(R7, offset, src); offset += 8; |
goetz@6458 | 828 | ld(R8, offset, src); offset += 8; |
goetz@6458 | 829 | ld(R9, offset, src); offset += 8; |
goetz@6458 | 830 | ld(R10, offset, src); offset += 8; |
goetz@6458 | 831 | ld(R11, offset, src); offset += 8; |
goetz@6458 | 832 | ld(R12, offset, src); |
goetz@6458 | 833 | } |
goetz@6458 | 834 | |
goetz@6458 | 835 | void MacroAssembler::save_LR_CR(Register tmp) { |
goetz@6458 | 836 | mfcr(tmp); |
goetz@6458 | 837 | std(tmp, _abi(cr), R1_SP); |
goetz@6458 | 838 | mflr(tmp); |
goetz@6458 | 839 | std(tmp, _abi(lr), R1_SP); |
goetz@6458 | 840 | // Tmp must contain lr on exit! (see return_addr and prolog in ppc64.ad) |
goetz@6458 | 841 | } |
goetz@6458 | 842 | |
goetz@6458 | 843 | void MacroAssembler::restore_LR_CR(Register tmp) { |
goetz@6458 | 844 | assert(tmp != R1_SP, "must be distinct"); |
goetz@6458 | 845 | ld(tmp, _abi(lr), R1_SP); |
goetz@6458 | 846 | mtlr(tmp); |
goetz@6458 | 847 | ld(tmp, _abi(cr), R1_SP); |
goetz@6458 | 848 | mtcr(tmp); |
goetz@6458 | 849 | } |
goetz@6458 | 850 | |
goetz@6458 | 851 | address MacroAssembler::get_PC_trash_LR(Register result) { |
goetz@6458 | 852 | Label L; |
goetz@6458 | 853 | bl(L); |
goetz@6458 | 854 | bind(L); |
goetz@6458 | 855 | address lr_pc = pc(); |
goetz@6458 | 856 | mflr(result); |
goetz@6458 | 857 | return lr_pc; |
goetz@6458 | 858 | } |
goetz@6458 | 859 | |
goetz@6458 | 860 | void MacroAssembler::resize_frame(Register offset, Register tmp) { |
goetz@6458 | 861 | #ifdef ASSERT |
goetz@6458 | 862 | assert_different_registers(offset, tmp, R1_SP); |
goetz@6458 | 863 | andi_(tmp, offset, frame::alignment_in_bytes-1); |
goetz@6458 | 864 | asm_assert_eq("resize_frame: unaligned", 0x204); |
goetz@6458 | 865 | #endif |
goetz@6458 | 866 | |
goetz@6458 | 867 | // tmp <- *(SP) |
goetz@6458 | 868 | ld(tmp, _abi(callers_sp), R1_SP); |
goetz@6458 | 869 | // addr <- SP + offset; |
goetz@6458 | 870 | // *(addr) <- tmp; |
goetz@6458 | 871 | // SP <- addr |
goetz@6458 | 872 | stdux(tmp, R1_SP, offset); |
goetz@6458 | 873 | } |
goetz@6458 | 874 | |
goetz@6458 | 875 | void MacroAssembler::resize_frame(int offset, Register tmp) { |
goetz@6458 | 876 | assert(is_simm(offset, 16), "too big an offset"); |
goetz@6458 | 877 | assert_different_registers(tmp, R1_SP); |
goetz@6458 | 878 | assert((offset & (frame::alignment_in_bytes-1))==0, "resize_frame: unaligned"); |
goetz@6458 | 879 | // tmp <- *(SP) |
goetz@6458 | 880 | ld(tmp, _abi(callers_sp), R1_SP); |
goetz@6458 | 881 | // addr <- SP + offset; |
goetz@6458 | 882 | // *(addr) <- tmp; |
goetz@6458 | 883 | // SP <- addr |
goetz@6458 | 884 | stdu(tmp, offset, R1_SP); |
goetz@6458 | 885 | } |
goetz@6458 | 886 | |
goetz@6458 | 887 | void MacroAssembler::resize_frame_absolute(Register addr, Register tmp1, Register tmp2) { |
goetz@6458 | 888 | // (addr == tmp1) || (addr == tmp2) is allowed here! |
goetz@6458 | 889 | assert(tmp1 != tmp2, "must be distinct"); |
goetz@6458 | 890 | |
goetz@6458 | 891 | // compute offset w.r.t. current stack pointer |
goetz@6458 | 892 | // tmp_1 <- addr - SP (!) |
goetz@6458 | 893 | subf(tmp1, R1_SP, addr); |
goetz@6458 | 894 | |
goetz@6458 | 895 | // atomically update SP keeping back link. |
goetz@6458 | 896 | resize_frame(tmp1/* offset */, tmp2/* tmp */); |
goetz@6458 | 897 | } |
goetz@6458 | 898 | |
goetz@6458 | 899 | void MacroAssembler::push_frame(Register bytes, Register tmp) { |
goetz@6458 | 900 | #ifdef ASSERT |
goetz@6458 | 901 | assert(bytes != R0, "r0 not allowed here"); |
goetz@6458 | 902 | andi_(R0, bytes, frame::alignment_in_bytes-1); |
goetz@6458 | 903 | asm_assert_eq("push_frame(Reg, Reg): unaligned", 0x203); |
goetz@6458 | 904 | #endif |
goetz@6458 | 905 | neg(tmp, bytes); |
goetz@6458 | 906 | stdux(R1_SP, R1_SP, tmp); |
goetz@6458 | 907 | } |
goetz@6458 | 908 | |
goetz@6458 | 909 | // Push a frame of size `bytes'. |
goetz@6458 | 910 | void MacroAssembler::push_frame(unsigned int bytes, Register tmp) { |
goetz@6458 | 911 | long offset = align_addr(bytes, frame::alignment_in_bytes); |
goetz@6458 | 912 | if (is_simm(-offset, 16)) { |
goetz@6458 | 913 | stdu(R1_SP, -offset, R1_SP); |
goetz@6458 | 914 | } else { |
goetz@6458 | 915 | load_const(tmp, -offset); |
goetz@6458 | 916 | stdux(R1_SP, R1_SP, tmp); |
goetz@6458 | 917 | } |
goetz@6458 | 918 | } |
goetz@6458 | 919 | |
goetz@6511 | 920 | // Push a frame of size `bytes' plus abi_reg_args on top. |
goetz@6511 | 921 | void MacroAssembler::push_frame_reg_args(unsigned int bytes, Register tmp) { |
goetz@6511 | 922 | push_frame(bytes + frame::abi_reg_args_size, tmp); |
goetz@6458 | 923 | } |
goetz@6458 | 924 | |
goetz@6458 | 925 | // Setup up a new C frame with a spill area for non-volatile GPRs and |
goetz@6458 | 926 | // additional space for local variables. |
goetz@6511 | 927 | void MacroAssembler::push_frame_reg_args_nonvolatiles(unsigned int bytes, |
goetz@6511 | 928 | Register tmp) { |
goetz@6511 | 929 | push_frame(bytes + frame::abi_reg_args_size + frame::spill_nonvolatiles_size, tmp); |
goetz@6458 | 930 | } |
goetz@6458 | 931 | |
goetz@6458 | 932 | // Pop current C frame. |
goetz@6458 | 933 | void MacroAssembler::pop_frame() { |
goetz@6458 | 934 | ld(R1_SP, _abi(callers_sp), R1_SP); |
goetz@6458 | 935 | } |
goetz@6458 | 936 | |
goetz@6511 | 937 | #if defined(ABI_ELFv2) |
goetz@6511 | 938 | address MacroAssembler::branch_to(Register r_function_entry, bool and_link) { |
goetz@6511 | 939 | // TODO(asmundak): make sure the caller uses R12 as function descriptor |
goetz@6511 | 940 | // most of the times. |
goetz@6511 | 941 | if (R12 != r_function_entry) { |
goetz@6511 | 942 | mr(R12, r_function_entry); |
goetz@6511 | 943 | } |
goetz@6511 | 944 | mtctr(R12); |
goetz@6511 | 945 | // Do a call or a branch. |
goetz@6511 | 946 | if (and_link) { |
goetz@6511 | 947 | bctrl(); |
goetz@6511 | 948 | } else { |
goetz@6511 | 949 | bctr(); |
goetz@6511 | 950 | } |
goetz@6511 | 951 | _last_calls_return_pc = pc(); |
goetz@6511 | 952 | |
goetz@6511 | 953 | return _last_calls_return_pc; |
goetz@6511 | 954 | } |
goetz@6511 | 955 | |
goetz@6511 | 956 | // Call a C function via a function descriptor and use full C |
goetz@6511 | 957 | // calling conventions. Updates and returns _last_calls_return_pc. |
goetz@6511 | 958 | address MacroAssembler::call_c(Register r_function_entry) { |
goetz@6511 | 959 | return branch_to(r_function_entry, /*and_link=*/true); |
goetz@6511 | 960 | } |
goetz@6511 | 961 | |
goetz@6511 | 962 | // For tail calls: only branch, don't link, so callee returns to caller of this function. |
goetz@6511 | 963 | address MacroAssembler::call_c_and_return_to_caller(Register r_function_entry) { |
goetz@6511 | 964 | return branch_to(r_function_entry, /*and_link=*/false); |
goetz@6511 | 965 | } |
goetz@6511 | 966 | |
goetz@6511 | 967 | address MacroAssembler::call_c(address function_entry, relocInfo::relocType rt) { |
goetz@6511 | 968 | load_const(R12, function_entry, R0); |
goetz@6511 | 969 | return branch_to(R12, /*and_link=*/true); |
goetz@6511 | 970 | } |
goetz@6511 | 971 | |
goetz@6511 | 972 | #else |
goetz@6458 | 973 | // Generic version of a call to C function via a function descriptor |
goetz@6458 | 974 | // with variable support for C calling conventions (TOC, ENV, etc.). |
goetz@6458 | 975 | // Updates and returns _last_calls_return_pc. |
goetz@6458 | 976 | address MacroAssembler::branch_to(Register function_descriptor, bool and_link, bool save_toc_before_call, |
goetz@6458 | 977 | bool restore_toc_after_call, bool load_toc_of_callee, bool load_env_of_callee) { |
goetz@6458 | 978 | // we emit standard ptrgl glue code here |
goetz@6458 | 979 | assert((function_descriptor != R0), "function_descriptor cannot be R0"); |
goetz@6458 | 980 | |
goetz@6458 | 981 | // retrieve necessary entries from the function descriptor |
goetz@6458 | 982 | ld(R0, in_bytes(FunctionDescriptor::entry_offset()), function_descriptor); |
goetz@6458 | 983 | mtctr(R0); |
goetz@6458 | 984 | |
goetz@6458 | 985 | if (load_toc_of_callee) { |
goetz@6458 | 986 | ld(R2_TOC, in_bytes(FunctionDescriptor::toc_offset()), function_descriptor); |
goetz@6458 | 987 | } |
goetz@6458 | 988 | if (load_env_of_callee) { |
goetz@6458 | 989 | ld(R11, in_bytes(FunctionDescriptor::env_offset()), function_descriptor); |
goetz@6458 | 990 | } else if (load_toc_of_callee) { |
goetz@6458 | 991 | li(R11, 0); |
goetz@6458 | 992 | } |
goetz@6458 | 993 | |
goetz@6458 | 994 | // do a call or a branch |
goetz@6458 | 995 | if (and_link) { |
goetz@6458 | 996 | bctrl(); |
goetz@6458 | 997 | } else { |
goetz@6458 | 998 | bctr(); |
goetz@6458 | 999 | } |
goetz@6458 | 1000 | _last_calls_return_pc = pc(); |
goetz@6458 | 1001 | |
goetz@6458 | 1002 | return _last_calls_return_pc; |
goetz@6458 | 1003 | } |
goetz@6458 | 1004 | |
goetz@6458 | 1005 | // Call a C function via a function descriptor and use full C calling |
goetz@6458 | 1006 | // conventions. |
goetz@6458 | 1007 | // We don't use the TOC in generated code, so there is no need to save |
goetz@6458 | 1008 | // and restore its value. |
goetz@6458 | 1009 | address MacroAssembler::call_c(Register fd) { |
goetz@6458 | 1010 | return branch_to(fd, /*and_link=*/true, |
goetz@6458 | 1011 | /*save toc=*/false, |
goetz@6458 | 1012 | /*restore toc=*/false, |
goetz@6458 | 1013 | /*load toc=*/true, |
goetz@6458 | 1014 | /*load env=*/true); |
goetz@6458 | 1015 | } |
goetz@6458 | 1016 | |
goetz@6495 | 1017 | address MacroAssembler::call_c_and_return_to_caller(Register fd) { |
goetz@6495 | 1018 | return branch_to(fd, /*and_link=*/false, |
goetz@6495 | 1019 | /*save toc=*/false, |
goetz@6495 | 1020 | /*restore toc=*/false, |
goetz@6495 | 1021 | /*load toc=*/true, |
goetz@6495 | 1022 | /*load env=*/true); |
goetz@6495 | 1023 | } |
goetz@6495 | 1024 | |
goetz@6458 | 1025 | address MacroAssembler::call_c(const FunctionDescriptor* fd, relocInfo::relocType rt) { |
goetz@6458 | 1026 | if (rt != relocInfo::none) { |
goetz@6458 | 1027 | // this call needs to be relocatable |
goetz@6458 | 1028 | if (!ReoptimizeCallSequences |
goetz@6458 | 1029 | || (rt != relocInfo::runtime_call_type && rt != relocInfo::none) |
goetz@6458 | 1030 | || fd == NULL // support code-size estimation |
goetz@6458 | 1031 | || !fd->is_friend_function() |
goetz@6458 | 1032 | || fd->entry() == NULL) { |
goetz@6458 | 1033 | // it's not a friend function as defined by class FunctionDescriptor, |
goetz@6458 | 1034 | // so do a full call-c here. |
goetz@6458 | 1035 | load_const(R11, (address)fd, R0); |
goetz@6458 | 1036 | |
goetz@6458 | 1037 | bool has_env = (fd != NULL && fd->env() != NULL); |
goetz@6458 | 1038 | return branch_to(R11, /*and_link=*/true, |
goetz@6501 | 1039 | /*save toc=*/false, |
goetz@6501 | 1040 | /*restore toc=*/false, |
goetz@6501 | 1041 | /*load toc=*/true, |
goetz@6501 | 1042 | /*load env=*/has_env); |
goetz@6458 | 1043 | } else { |
goetz@6458 | 1044 | // It's a friend function. Load the entry point and don't care about |
goetz@6458 | 1045 | // toc and env. Use an optimizable call instruction, but ensure the |
goetz@6458 | 1046 | // same code-size as in the case of a non-friend function. |
goetz@6458 | 1047 | nop(); |
goetz@6458 | 1048 | nop(); |
goetz@6458 | 1049 | nop(); |
goetz@6458 | 1050 | bl64_patchable(fd->entry(), rt); |
goetz@6458 | 1051 | _last_calls_return_pc = pc(); |
goetz@6458 | 1052 | return _last_calls_return_pc; |
goetz@6458 | 1053 | } |
goetz@6458 | 1054 | } else { |
goetz@6458 | 1055 | // This call does not need to be relocatable, do more aggressive |
goetz@6458 | 1056 | // optimizations. |
goetz@6458 | 1057 | if (!ReoptimizeCallSequences |
goetz@6458 | 1058 | || !fd->is_friend_function()) { |
goetz@6458 | 1059 | // It's not a friend function as defined by class FunctionDescriptor, |
goetz@6458 | 1060 | // so do a full call-c here. |
goetz@6458 | 1061 | load_const(R11, (address)fd, R0); |
goetz@6458 | 1062 | return branch_to(R11, /*and_link=*/true, |
goetz@6501 | 1063 | /*save toc=*/false, |
goetz@6501 | 1064 | /*restore toc=*/false, |
goetz@6501 | 1065 | /*load toc=*/true, |
goetz@6501 | 1066 | /*load env=*/true); |
goetz@6458 | 1067 | } else { |
goetz@6458 | 1068 | // it's a friend function, load the entry point and don't care about |
goetz@6458 | 1069 | // toc and env. |
goetz@6458 | 1070 | address dest = fd->entry(); |
goetz@6458 | 1071 | if (is_within_range_of_b(dest, pc())) { |
goetz@6458 | 1072 | bl(dest); |
goetz@6458 | 1073 | } else { |
goetz@6458 | 1074 | bl64_patchable(dest, rt); |
goetz@6458 | 1075 | } |
goetz@6458 | 1076 | _last_calls_return_pc = pc(); |
goetz@6458 | 1077 | return _last_calls_return_pc; |
goetz@6458 | 1078 | } |
goetz@6458 | 1079 | } |
goetz@6458 | 1080 | } |
goetz@6458 | 1081 | |
goetz@6458 | 1082 | // Call a C function. All constants needed reside in TOC. |
goetz@6458 | 1083 | // |
goetz@6458 | 1084 | // Read the address to call from the TOC. |
goetz@6458 | 1085 | // Read env from TOC, if fd specifies an env. |
goetz@6458 | 1086 | // Read new TOC from TOC. |
goetz@6458 | 1087 | address MacroAssembler::call_c_using_toc(const FunctionDescriptor* fd, |
goetz@6458 | 1088 | relocInfo::relocType rt, Register toc) { |
goetz@6458 | 1089 | if (!ReoptimizeCallSequences |
goetz@6458 | 1090 | || (rt != relocInfo::runtime_call_type && rt != relocInfo::none) |
goetz@6458 | 1091 | || !fd->is_friend_function()) { |
goetz@6458 | 1092 | // It's not a friend function as defined by class FunctionDescriptor, |
goetz@6458 | 1093 | // so do a full call-c here. |
goetz@6458 | 1094 | assert(fd->entry() != NULL, "function must be linked"); |
goetz@6458 | 1095 | |
goetz@6458 | 1096 | AddressLiteral fd_entry(fd->entry()); |
goetz@6458 | 1097 | load_const_from_method_toc(R11, fd_entry, toc); |
goetz@6458 | 1098 | mtctr(R11); |
goetz@6458 | 1099 | if (fd->env() == NULL) { |
goetz@6458 | 1100 | li(R11, 0); |
goetz@6458 | 1101 | nop(); |
goetz@6458 | 1102 | } else { |
goetz@6458 | 1103 | AddressLiteral fd_env(fd->env()); |
goetz@6458 | 1104 | load_const_from_method_toc(R11, fd_env, toc); |
goetz@6458 | 1105 | } |
goetz@6458 | 1106 | AddressLiteral fd_toc(fd->toc()); |
goetz@6458 | 1107 | load_toc_from_toc(R2_TOC, fd_toc, toc); |
goetz@6458 | 1108 | // R2_TOC is killed. |
goetz@6458 | 1109 | bctrl(); |
goetz@6458 | 1110 | _last_calls_return_pc = pc(); |
goetz@6458 | 1111 | } else { |
goetz@6458 | 1112 | // It's a friend function, load the entry point and don't care about |
goetz@6458 | 1113 | // toc and env. Use an optimizable call instruction, but ensure the |
goetz@6458 | 1114 | // same code-size as in the case of a non-friend function. |
goetz@6458 | 1115 | nop(); |
goetz@6458 | 1116 | bl64_patchable(fd->entry(), rt); |
goetz@6458 | 1117 | _last_calls_return_pc = pc(); |
goetz@6458 | 1118 | } |
goetz@6458 | 1119 | return _last_calls_return_pc; |
goetz@6458 | 1120 | } |
goetz@6515 | 1121 | #endif // ABI_ELFv2 |
goetz@6458 | 1122 | |
goetz@6458 | 1123 | void MacroAssembler::call_VM_base(Register oop_result, |
goetz@6458 | 1124 | Register last_java_sp, |
goetz@6458 | 1125 | address entry_point, |
goetz@6458 | 1126 | bool check_exceptions) { |
goetz@6458 | 1127 | BLOCK_COMMENT("call_VM {"); |
goetz@6458 | 1128 | // Determine last_java_sp register. |
goetz@6458 | 1129 | if (!last_java_sp->is_valid()) { |
goetz@6458 | 1130 | last_java_sp = R1_SP; |
goetz@6458 | 1131 | } |
goetz@6458 | 1132 | set_top_ijava_frame_at_SP_as_last_Java_frame(last_java_sp, R11_scratch1); |
goetz@6458 | 1133 | |
goetz@6458 | 1134 | // ARG1 must hold thread address. |
goetz@6458 | 1135 | mr(R3_ARG1, R16_thread); |
goetz@6511 | 1136 | #if defined(ABI_ELFv2) |
goetz@6511 | 1137 | address return_pc = call_c(entry_point, relocInfo::none); |
goetz@6511 | 1138 | #else |
goetz@6458 | 1139 | address return_pc = call_c((FunctionDescriptor*)entry_point, relocInfo::none); |
goetz@6511 | 1140 | #endif |
goetz@6458 | 1141 | |
goetz@6458 | 1142 | reset_last_Java_frame(); |
goetz@6458 | 1143 | |
goetz@6458 | 1144 | // Check for pending exceptions. |
goetz@6458 | 1145 | if (check_exceptions) { |
goetz@6458 | 1146 | // We don't check for exceptions here. |
goetz@6458 | 1147 | ShouldNotReachHere(); |
goetz@6458 | 1148 | } |
goetz@6458 | 1149 | |
goetz@6458 | 1150 | // Get oop result if there is one and reset the value in the thread. |
goetz@6458 | 1151 | if (oop_result->is_valid()) { |
goetz@6458 | 1152 | get_vm_result(oop_result); |
goetz@6458 | 1153 | } |
goetz@6458 | 1154 | |
goetz@6458 | 1155 | _last_calls_return_pc = return_pc; |
goetz@6458 | 1156 | BLOCK_COMMENT("} call_VM"); |
goetz@6458 | 1157 | } |
goetz@6458 | 1158 | |
goetz@6458 | 1159 | void MacroAssembler::call_VM_leaf_base(address entry_point) { |
goetz@6458 | 1160 | BLOCK_COMMENT("call_VM_leaf {"); |
goetz@6511 | 1161 | #if defined(ABI_ELFv2) |
goetz@6511 | 1162 | call_c(entry_point, relocInfo::none); |
goetz@6511 | 1163 | #else |
goetz@6458 | 1164 | call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, entry_point), relocInfo::none); |
goetz@6511 | 1165 | #endif |
goetz@6458 | 1166 | BLOCK_COMMENT("} call_VM_leaf"); |
goetz@6458 | 1167 | } |
goetz@6458 | 1168 | |
goetz@6458 | 1169 | void MacroAssembler::call_VM(Register oop_result, address entry_point, bool check_exceptions) { |
goetz@6458 | 1170 | call_VM_base(oop_result, noreg, entry_point, check_exceptions); |
goetz@6458 | 1171 | } |
goetz@6458 | 1172 | |
goetz@6458 | 1173 | void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, |
goetz@6458 | 1174 | bool check_exceptions) { |
goetz@6458 | 1175 | // R3_ARG1 is reserved for the thread. |
goetz@6458 | 1176 | mr_if_needed(R4_ARG2, arg_1); |
goetz@6458 | 1177 | call_VM(oop_result, entry_point, check_exceptions); |
goetz@6458 | 1178 | } |
goetz@6458 | 1179 | |
goetz@6458 | 1180 | void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, |
goetz@6458 | 1181 | bool check_exceptions) { |
goetz@6458 | 1182 | // R3_ARG1 is reserved for the thread |
goetz@6458 | 1183 | mr_if_needed(R4_ARG2, arg_1); |
goetz@6458 | 1184 | assert(arg_2 != R4_ARG2, "smashed argument"); |
goetz@6458 | 1185 | mr_if_needed(R5_ARG3, arg_2); |
goetz@6458 | 1186 | call_VM(oop_result, entry_point, check_exceptions); |
goetz@6458 | 1187 | } |
goetz@6458 | 1188 | |
goetz@6458 | 1189 | void MacroAssembler::call_VM_leaf(address entry_point) { |
goetz@6458 | 1190 | call_VM_leaf_base(entry_point); |
goetz@6458 | 1191 | } |
goetz@6458 | 1192 | |
goetz@6458 | 1193 | void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1) { |
goetz@6458 | 1194 | mr_if_needed(R3_ARG1, arg_1); |
goetz@6458 | 1195 | call_VM_leaf(entry_point); |
goetz@6458 | 1196 | } |
goetz@6458 | 1197 | |
goetz@6458 | 1198 | void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1, Register arg_2) { |
goetz@6458 | 1199 | mr_if_needed(R3_ARG1, arg_1); |
goetz@6458 | 1200 | assert(arg_2 != R3_ARG1, "smashed argument"); |
goetz@6458 | 1201 | mr_if_needed(R4_ARG2, arg_2); |
goetz@6458 | 1202 | call_VM_leaf(entry_point); |
goetz@6458 | 1203 | } |
goetz@6458 | 1204 | |
goetz@6458 | 1205 | void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3) { |
goetz@6458 | 1206 | mr_if_needed(R3_ARG1, arg_1); |
goetz@6458 | 1207 | assert(arg_2 != R3_ARG1, "smashed argument"); |
goetz@6458 | 1208 | mr_if_needed(R4_ARG2, arg_2); |
goetz@6458 | 1209 | assert(arg_3 != R3_ARG1 && arg_3 != R4_ARG2, "smashed argument"); |
goetz@6458 | 1210 | mr_if_needed(R5_ARG3, arg_3); |
goetz@6458 | 1211 | call_VM_leaf(entry_point); |
goetz@6458 | 1212 | } |
goetz@6458 | 1213 | |
goetz@6458 | 1214 | // Check whether instruction is a read access to the polling page |
goetz@6458 | 1215 | // which was emitted by load_from_polling_page(..). |
goetz@6458 | 1216 | bool MacroAssembler::is_load_from_polling_page(int instruction, void* ucontext, |
goetz@6458 | 1217 | address* polling_address_ptr) { |
goetz@6458 | 1218 | if (!is_ld(instruction)) |
goetz@6458 | 1219 | return false; // It's not a ld. Fail. |
goetz@6458 | 1220 | |
goetz@6458 | 1221 | int rt = inv_rt_field(instruction); |
goetz@6458 | 1222 | int ra = inv_ra_field(instruction); |
goetz@6458 | 1223 | int ds = inv_ds_field(instruction); |
goetz@6458 | 1224 | if (!(ds == 0 && ra != 0 && rt == 0)) { |
goetz@6458 | 1225 | return false; // It's not a ld(r0, X, ra). Fail. |
goetz@6458 | 1226 | } |
goetz@6458 | 1227 | |
goetz@6458 | 1228 | if (!ucontext) { |
goetz@6458 | 1229 | // Set polling address. |
goetz@6458 | 1230 | if (polling_address_ptr != NULL) { |
goetz@6458 | 1231 | *polling_address_ptr = NULL; |
goetz@6458 | 1232 | } |
goetz@6458 | 1233 | return true; // No ucontext given. Can't check value of ra. Assume true. |
goetz@6458 | 1234 | } |
goetz@6458 | 1235 | |
goetz@6458 | 1236 | #ifdef LINUX |
goetz@6458 | 1237 | // Ucontext given. Check that register ra contains the address of |
goetz@6458 | 1238 | // the safepoing polling page. |
goetz@6458 | 1239 | ucontext_t* uc = (ucontext_t*) ucontext; |
goetz@6458 | 1240 | // Set polling address. |
goetz@6458 | 1241 | address addr = (address)uc->uc_mcontext.regs->gpr[ra] + (ssize_t)ds; |
goetz@6458 | 1242 | if (polling_address_ptr != NULL) { |
goetz@6458 | 1243 | *polling_address_ptr = addr; |
goetz@6458 | 1244 | } |
goetz@6458 | 1245 | return os::is_poll_address(addr); |
goetz@6458 | 1246 | #else |
goetz@6458 | 1247 | // Not on Linux, ucontext must be NULL. |
goetz@6458 | 1248 | ShouldNotReachHere(); |
goetz@6458 | 1249 | return false; |
goetz@6458 | 1250 | #endif |
goetz@6458 | 1251 | } |
goetz@6458 | 1252 | |
goetz@6458 | 1253 | bool MacroAssembler::is_memory_serialization(int instruction, JavaThread* thread, void* ucontext) { |
goetz@6458 | 1254 | #ifdef LINUX |
goetz@6458 | 1255 | ucontext_t* uc = (ucontext_t*) ucontext; |
goetz@6458 | 1256 | |
goetz@6458 | 1257 | if (is_stwx(instruction) || is_stwux(instruction)) { |
goetz@6458 | 1258 | int ra = inv_ra_field(instruction); |
goetz@6458 | 1259 | int rb = inv_rb_field(instruction); |
goetz@6458 | 1260 | |
goetz@6458 | 1261 | // look up content of ra and rb in ucontext |
goetz@6458 | 1262 | address ra_val=(address)uc->uc_mcontext.regs->gpr[ra]; |
goetz@6458 | 1263 | long rb_val=(long)uc->uc_mcontext.regs->gpr[rb]; |
goetz@6458 | 1264 | return os::is_memory_serialize_page(thread, ra_val+rb_val); |
goetz@6458 | 1265 | } else if (is_stw(instruction) || is_stwu(instruction)) { |
goetz@6458 | 1266 | int ra = inv_ra_field(instruction); |
goetz@6458 | 1267 | int d1 = inv_d1_field(instruction); |
goetz@6458 | 1268 | |
goetz@6458 | 1269 | // look up content of ra in ucontext |
goetz@6458 | 1270 | address ra_val=(address)uc->uc_mcontext.regs->gpr[ra]; |
goetz@6458 | 1271 | return os::is_memory_serialize_page(thread, ra_val+d1); |
goetz@6458 | 1272 | } else { |
goetz@6458 | 1273 | return false; |
goetz@6458 | 1274 | } |
goetz@6458 | 1275 | #else |
goetz@6458 | 1276 | // workaround not needed on !LINUX :-) |
goetz@6458 | 1277 | ShouldNotCallThis(); |
goetz@6458 | 1278 | return false; |
goetz@6458 | 1279 | #endif |
goetz@6458 | 1280 | } |
goetz@6458 | 1281 | |
goetz@6458 | 1282 | void MacroAssembler::bang_stack_with_offset(int offset) { |
goetz@6458 | 1283 | // When increasing the stack, the old stack pointer will be written |
goetz@6458 | 1284 | // to the new top of stack according to the PPC64 abi. |
goetz@6458 | 1285 | // Therefore, stack banging is not necessary when increasing |
goetz@6458 | 1286 | // the stack by <= os::vm_page_size() bytes. |
goetz@6458 | 1287 | // When increasing the stack by a larger amount, this method is |
goetz@6458 | 1288 | // called repeatedly to bang the intermediate pages. |
goetz@6458 | 1289 | |
goetz@6458 | 1290 | // Stack grows down, caller passes positive offset. |
goetz@6458 | 1291 | assert(offset > 0, "must bang with positive offset"); |
goetz@6458 | 1292 | |
goetz@6458 | 1293 | long stdoffset = -offset; |
goetz@6458 | 1294 | |
goetz@6458 | 1295 | if (is_simm(stdoffset, 16)) { |
goetz@6458 | 1296 | // Signed 16 bit offset, a simple std is ok. |
goetz@6458 | 1297 | if (UseLoadInstructionsForStackBangingPPC64) { |
goetz@6458 | 1298 | ld(R0, (int)(signed short)stdoffset, R1_SP); |
goetz@6458 | 1299 | } else { |
goetz@6458 | 1300 | std(R0,(int)(signed short)stdoffset, R1_SP); |
goetz@6458 | 1301 | } |
goetz@6458 | 1302 | } else if (is_simm(stdoffset, 31)) { |
goetz@6458 | 1303 | const int hi = MacroAssembler::largeoffset_si16_si16_hi(stdoffset); |
goetz@6458 | 1304 | const int lo = MacroAssembler::largeoffset_si16_si16_lo(stdoffset); |
goetz@6458 | 1305 | |
goetz@6458 | 1306 | Register tmp = R11; |
goetz@6458 | 1307 | addis(tmp, R1_SP, hi); |
goetz@6458 | 1308 | if (UseLoadInstructionsForStackBangingPPC64) { |
goetz@6458 | 1309 | ld(R0, lo, tmp); |
goetz@6458 | 1310 | } else { |
goetz@6458 | 1311 | std(R0, lo, tmp); |
goetz@6458 | 1312 | } |
goetz@6458 | 1313 | } else { |
goetz@6458 | 1314 | ShouldNotReachHere(); |
goetz@6458 | 1315 | } |
goetz@6458 | 1316 | } |
goetz@6458 | 1317 | |
goetz@6458 | 1318 | // If instruction is a stack bang of the form |
goetz@6458 | 1319 | // std R0, x(Ry), (see bang_stack_with_offset()) |
goetz@6458 | 1320 | // stdu R1_SP, x(R1_SP), (see push_frame(), resize_frame()) |
goetz@6458 | 1321 | // or stdux R1_SP, Rx, R1_SP (see push_frame(), resize_frame()) |
goetz@6458 | 1322 | // return the banged address. Otherwise, return 0. |
goetz@6458 | 1323 | address MacroAssembler::get_stack_bang_address(int instruction, void *ucontext) { |
goetz@6458 | 1324 | #ifdef LINUX |
goetz@6458 | 1325 | ucontext_t* uc = (ucontext_t*) ucontext; |
goetz@6458 | 1326 | int rs = inv_rs_field(instruction); |
goetz@6458 | 1327 | int ra = inv_ra_field(instruction); |
goetz@6458 | 1328 | if ( (is_ld(instruction) && rs == 0 && UseLoadInstructionsForStackBangingPPC64) |
goetz@6458 | 1329 | || (is_std(instruction) && rs == 0 && !UseLoadInstructionsForStackBangingPPC64) |
goetz@6458 | 1330 | || (is_stdu(instruction) && rs == 1)) { |
goetz@6458 | 1331 | int ds = inv_ds_field(instruction); |
goetz@6458 | 1332 | // return banged address |
goetz@6458 | 1333 | return ds+(address)uc->uc_mcontext.regs->gpr[ra]; |
goetz@6458 | 1334 | } else if (is_stdux(instruction) && rs == 1) { |
goetz@6458 | 1335 | int rb = inv_rb_field(instruction); |
goetz@6458 | 1336 | address sp = (address)uc->uc_mcontext.regs->gpr[1]; |
goetz@6458 | 1337 | long rb_val = (long)uc->uc_mcontext.regs->gpr[rb]; |
goetz@6458 | 1338 | return ra != 1 || rb_val >= 0 ? NULL // not a stack bang |
goetz@6458 | 1339 | : sp + rb_val; // banged address |
goetz@6458 | 1340 | } |
goetz@6458 | 1341 | return NULL; // not a stack bang |
goetz@6458 | 1342 | #else |
goetz@6458 | 1343 | // workaround not needed on !LINUX :-) |
goetz@6458 | 1344 | ShouldNotCallThis(); |
goetz@6458 | 1345 | return NULL; |
goetz@6458 | 1346 | #endif |
goetz@6458 | 1347 | } |
goetz@6458 | 1348 | |
goetz@6458 | 1349 | // CmpxchgX sets condition register to cmpX(current, compare). |
goetz@6458 | 1350 | void MacroAssembler::cmpxchgw(ConditionRegister flag, Register dest_current_value, |
goetz@6458 | 1351 | Register compare_value, Register exchange_value, |
goetz@6458 | 1352 | Register addr_base, int semantics, bool cmpxchgx_hint, |
goetz@6458 | 1353 | Register int_flag_success, bool contention_hint) { |
goetz@6458 | 1354 | Label retry; |
goetz@6458 | 1355 | Label failed; |
goetz@6458 | 1356 | Label done; |
goetz@6458 | 1357 | |
goetz@6458 | 1358 | // Save one branch if result is returned via register and |
goetz@6458 | 1359 | // result register is different from the other ones. |
goetz@6458 | 1360 | bool use_result_reg = (int_flag_success != noreg); |
goetz@6458 | 1361 | bool preset_result_reg = (int_flag_success != dest_current_value && int_flag_success != compare_value && |
goetz@6458 | 1362 | int_flag_success != exchange_value && int_flag_success != addr_base); |
goetz@6458 | 1363 | |
goetz@6458 | 1364 | // release/fence semantics |
goetz@6458 | 1365 | if (semantics & MemBarRel) { |
goetz@6458 | 1366 | release(); |
goetz@6458 | 1367 | } |
goetz@6458 | 1368 | |
goetz@6458 | 1369 | if (use_result_reg && preset_result_reg) { |
goetz@6458 | 1370 | li(int_flag_success, 0); // preset (assume cas failed) |
goetz@6458 | 1371 | } |
goetz@6458 | 1372 | |
goetz@6458 | 1373 | // Add simple guard in order to reduce risk of starving under high contention (recommended by IBM). |
goetz@6458 | 1374 | if (contention_hint) { // Don't try to reserve if cmp fails. |
goetz@6458 | 1375 | lwz(dest_current_value, 0, addr_base); |
goetz@6458 | 1376 | cmpw(flag, dest_current_value, compare_value); |
goetz@6458 | 1377 | bne(flag, failed); |
goetz@6458 | 1378 | } |
goetz@6458 | 1379 | |
goetz@6458 | 1380 | // atomic emulation loop |
goetz@6458 | 1381 | bind(retry); |
goetz@6458 | 1382 | |
goetz@6458 | 1383 | lwarx(dest_current_value, addr_base, cmpxchgx_hint); |
goetz@6458 | 1384 | cmpw(flag, dest_current_value, compare_value); |
goetz@6458 | 1385 | if (UseStaticBranchPredictionInCompareAndSwapPPC64) { |
goetz@6458 | 1386 | bne_predict_not_taken(flag, failed); |
goetz@6458 | 1387 | } else { |
goetz@6458 | 1388 | bne( flag, failed); |
goetz@6458 | 1389 | } |
goetz@6458 | 1390 | // branch to done => (flag == ne), (dest_current_value != compare_value) |
goetz@6458 | 1391 | // fall through => (flag == eq), (dest_current_value == compare_value) |
goetz@6458 | 1392 | |
goetz@6458 | 1393 | stwcx_(exchange_value, addr_base); |
goetz@6458 | 1394 | if (UseStaticBranchPredictionInCompareAndSwapPPC64) { |
goetz@6458 | 1395 | bne_predict_not_taken(CCR0, retry); // StXcx_ sets CCR0. |
goetz@6458 | 1396 | } else { |
goetz@6458 | 1397 | bne( CCR0, retry); // StXcx_ sets CCR0. |
goetz@6458 | 1398 | } |
goetz@6458 | 1399 | // fall through => (flag == eq), (dest_current_value == compare_value), (swapped) |
goetz@6458 | 1400 | |
goetz@6458 | 1401 | // Result in register (must do this at the end because int_flag_success can be the |
goetz@6458 | 1402 | // same register as one above). |
goetz@6458 | 1403 | if (use_result_reg) { |
goetz@6458 | 1404 | li(int_flag_success, 1); |
goetz@6458 | 1405 | } |
goetz@6458 | 1406 | |
goetz@6458 | 1407 | if (semantics & MemBarFenceAfter) { |
goetz@6458 | 1408 | fence(); |
goetz@6458 | 1409 | } else if (semantics & MemBarAcq) { |
goetz@6458 | 1410 | isync(); |
goetz@6458 | 1411 | } |
goetz@6458 | 1412 | |
goetz@6458 | 1413 | if (use_result_reg && !preset_result_reg) { |
goetz@6458 | 1414 | b(done); |
goetz@6458 | 1415 | } |
goetz@6458 | 1416 | |
goetz@6458 | 1417 | bind(failed); |
goetz@6458 | 1418 | if (use_result_reg && !preset_result_reg) { |
goetz@6458 | 1419 | li(int_flag_success, 0); |
goetz@6458 | 1420 | } |
goetz@6458 | 1421 | |
goetz@6458 | 1422 | bind(done); |
goetz@6458 | 1423 | // (flag == ne) => (dest_current_value != compare_value), (!swapped) |
goetz@6458 | 1424 | // (flag == eq) => (dest_current_value == compare_value), ( swapped) |
goetz@6458 | 1425 | } |
goetz@6458 | 1426 | |
goetz@6458 | 1427 | // Preforms atomic compare exchange: |
goetz@6458 | 1428 | // if (compare_value == *addr_base) |
goetz@6458 | 1429 | // *addr_base = exchange_value |
goetz@6458 | 1430 | // int_flag_success = 1; |
goetz@6458 | 1431 | // else |
goetz@6458 | 1432 | // int_flag_success = 0; |
goetz@6458 | 1433 | // |
goetz@6458 | 1434 | // ConditionRegister flag = cmp(compare_value, *addr_base) |
goetz@6458 | 1435 | // Register dest_current_value = *addr_base |
goetz@6458 | 1436 | // Register compare_value Used to compare with value in memory |
goetz@6458 | 1437 | // Register exchange_value Written to memory if compare_value == *addr_base |
goetz@6458 | 1438 | // Register addr_base The memory location to compareXChange |
goetz@6458 | 1439 | // Register int_flag_success Set to 1 if exchange_value was written to *addr_base |
goetz@6458 | 1440 | // |
goetz@6458 | 1441 | // To avoid the costly compare exchange the value is tested beforehand. |
goetz@6458 | 1442 | // Several special cases exist to avoid that unnecessary information is generated. |
goetz@6458 | 1443 | // |
goetz@6458 | 1444 | void MacroAssembler::cmpxchgd(ConditionRegister flag, |
goetz@6458 | 1445 | Register dest_current_value, Register compare_value, Register exchange_value, |
goetz@6458 | 1446 | Register addr_base, int semantics, bool cmpxchgx_hint, |
goetz@6458 | 1447 | Register int_flag_success, Label* failed_ext, bool contention_hint) { |
goetz@6458 | 1448 | Label retry; |
goetz@6458 | 1449 | Label failed_int; |
goetz@6458 | 1450 | Label& failed = (failed_ext != NULL) ? *failed_ext : failed_int; |
goetz@6458 | 1451 | Label done; |
goetz@6458 | 1452 | |
goetz@6458 | 1453 | // Save one branch if result is returned via register and result register is different from the other ones. |
goetz@6458 | 1454 | bool use_result_reg = (int_flag_success!=noreg); |
goetz@6458 | 1455 | bool preset_result_reg = (int_flag_success!=dest_current_value && int_flag_success!=compare_value && |
goetz@6458 | 1456 | int_flag_success!=exchange_value && int_flag_success!=addr_base); |
goetz@6458 | 1457 | assert(int_flag_success == noreg || failed_ext == NULL, "cannot have both"); |
goetz@6458 | 1458 | |
goetz@6458 | 1459 | // release/fence semantics |
goetz@6458 | 1460 | if (semantics & MemBarRel) { |
goetz@6458 | 1461 | release(); |
goetz@6458 | 1462 | } |
goetz@6458 | 1463 | |
goetz@6458 | 1464 | if (use_result_reg && preset_result_reg) { |
goetz@6458 | 1465 | li(int_flag_success, 0); // preset (assume cas failed) |
goetz@6458 | 1466 | } |
goetz@6458 | 1467 | |
goetz@6458 | 1468 | // Add simple guard in order to reduce risk of starving under high contention (recommended by IBM). |
goetz@6458 | 1469 | if (contention_hint) { // Don't try to reserve if cmp fails. |
goetz@6458 | 1470 | ld(dest_current_value, 0, addr_base); |
goetz@6458 | 1471 | cmpd(flag, dest_current_value, compare_value); |
goetz@6458 | 1472 | bne(flag, failed); |
goetz@6458 | 1473 | } |
goetz@6458 | 1474 | |
goetz@6458 | 1475 | // atomic emulation loop |
goetz@6458 | 1476 | bind(retry); |
goetz@6458 | 1477 | |
goetz@6458 | 1478 | ldarx(dest_current_value, addr_base, cmpxchgx_hint); |
goetz@6458 | 1479 | cmpd(flag, dest_current_value, compare_value); |
goetz@6458 | 1480 | if (UseStaticBranchPredictionInCompareAndSwapPPC64) { |
goetz@6458 | 1481 | bne_predict_not_taken(flag, failed); |
goetz@6458 | 1482 | } else { |
goetz@6458 | 1483 | bne( flag, failed); |
goetz@6458 | 1484 | } |
goetz@6458 | 1485 | |
goetz@6458 | 1486 | stdcx_(exchange_value, addr_base); |
goetz@6458 | 1487 | if (UseStaticBranchPredictionInCompareAndSwapPPC64) { |
goetz@6458 | 1488 | bne_predict_not_taken(CCR0, retry); // stXcx_ sets CCR0 |
goetz@6458 | 1489 | } else { |
goetz@6458 | 1490 | bne( CCR0, retry); // stXcx_ sets CCR0 |
goetz@6458 | 1491 | } |
goetz@6458 | 1492 | |
goetz@6458 | 1493 | // result in register (must do this at the end because int_flag_success can be the same register as one above) |
goetz@6458 | 1494 | if (use_result_reg) { |
goetz@6458 | 1495 | li(int_flag_success, 1); |
goetz@6458 | 1496 | } |
goetz@6458 | 1497 | |
goetz@6458 | 1498 | // POWER6 doesn't need isync in CAS. |
goetz@6458 | 1499 | // Always emit isync to be on the safe side. |
goetz@6458 | 1500 | if (semantics & MemBarFenceAfter) { |
goetz@6458 | 1501 | fence(); |
goetz@6458 | 1502 | } else if (semantics & MemBarAcq) { |
goetz@6458 | 1503 | isync(); |
goetz@6458 | 1504 | } |
goetz@6458 | 1505 | |
goetz@6458 | 1506 | if (use_result_reg && !preset_result_reg) { |
goetz@6458 | 1507 | b(done); |
goetz@6458 | 1508 | } |
goetz@6458 | 1509 | |
goetz@6458 | 1510 | bind(failed_int); |
goetz@6458 | 1511 | if (use_result_reg && !preset_result_reg) { |
goetz@6458 | 1512 | li(int_flag_success, 0); |
goetz@6458 | 1513 | } |
goetz@6458 | 1514 | |
goetz@6458 | 1515 | bind(done); |
goetz@6458 | 1516 | // (flag == ne) => (dest_current_value != compare_value), (!swapped) |
goetz@6458 | 1517 | // (flag == eq) => (dest_current_value == compare_value), ( swapped) |
goetz@6458 | 1518 | } |
goetz@6458 | 1519 | |
goetz@6458 | 1520 | // Look up the method for a megamorphic invokeinterface call. |
goetz@6458 | 1521 | // The target method is determined by <intf_klass, itable_index>. |
goetz@6458 | 1522 | // The receiver klass is in recv_klass. |
goetz@6458 | 1523 | // On success, the result will be in method_result, and execution falls through. |
goetz@6458 | 1524 | // On failure, execution transfers to the given label. |
goetz@6458 | 1525 | void MacroAssembler::lookup_interface_method(Register recv_klass, |
goetz@6458 | 1526 | Register intf_klass, |
goetz@6458 | 1527 | RegisterOrConstant itable_index, |
goetz@6458 | 1528 | Register method_result, |
goetz@6458 | 1529 | Register scan_temp, |
goetz@6458 | 1530 | Register sethi_temp, |
goetz@6458 | 1531 | Label& L_no_such_interface) { |
goetz@6458 | 1532 | assert_different_registers(recv_klass, intf_klass, method_result, scan_temp); |
goetz@6458 | 1533 | assert(itable_index.is_constant() || itable_index.as_register() == method_result, |
goetz@6458 | 1534 | "caller must use same register for non-constant itable index as for method"); |
goetz@6458 | 1535 | |
goetz@6458 | 1536 | // Compute start of first itableOffsetEntry (which is at the end of the vtable). |
goetz@6458 | 1537 | int vtable_base = InstanceKlass::vtable_start_offset() * wordSize; |
goetz@6458 | 1538 | int itentry_off = itableMethodEntry::method_offset_in_bytes(); |
goetz@6458 | 1539 | int logMEsize = exact_log2(itableMethodEntry::size() * wordSize); |
goetz@6458 | 1540 | int scan_step = itableOffsetEntry::size() * wordSize; |
goetz@6458 | 1541 | int log_vte_size= exact_log2(vtableEntry::size() * wordSize); |
goetz@6458 | 1542 | |
goetz@6458 | 1543 | lwz(scan_temp, InstanceKlass::vtable_length_offset() * wordSize, recv_klass); |
goetz@6458 | 1544 | // %%% We should store the aligned, prescaled offset in the klassoop. |
goetz@6458 | 1545 | // Then the next several instructions would fold away. |
goetz@6458 | 1546 | |
goetz@6458 | 1547 | sldi(scan_temp, scan_temp, log_vte_size); |
goetz@6458 | 1548 | addi(scan_temp, scan_temp, vtable_base); |
goetz@6458 | 1549 | add(scan_temp, recv_klass, scan_temp); |
goetz@6458 | 1550 | |
goetz@6458 | 1551 | // Adjust recv_klass by scaled itable_index, so we can free itable_index. |
goetz@6458 | 1552 | if (itable_index.is_register()) { |
goetz@6458 | 1553 | Register itable_offset = itable_index.as_register(); |
goetz@6458 | 1554 | sldi(itable_offset, itable_offset, logMEsize); |
goetz@6458 | 1555 | if (itentry_off) addi(itable_offset, itable_offset, itentry_off); |
goetz@6458 | 1556 | add(recv_klass, itable_offset, recv_klass); |
goetz@6458 | 1557 | } else { |
goetz@6458 | 1558 | long itable_offset = (long)itable_index.as_constant(); |
goetz@6458 | 1559 | load_const_optimized(sethi_temp, (itable_offset<<logMEsize)+itentry_off); // static address, no relocation |
goetz@6458 | 1560 | add(recv_klass, sethi_temp, recv_klass); |
goetz@6458 | 1561 | } |
goetz@6458 | 1562 | |
goetz@6458 | 1563 | // for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) { |
goetz@6458 | 1564 | // if (scan->interface() == intf) { |
goetz@6458 | 1565 | // result = (klass + scan->offset() + itable_index); |
goetz@6458 | 1566 | // } |
goetz@6458 | 1567 | // } |
goetz@6458 | 1568 | Label search, found_method; |
goetz@6458 | 1569 | |
goetz@6458 | 1570 | for (int peel = 1; peel >= 0; peel--) { |
goetz@6458 | 1571 | // %%%% Could load both offset and interface in one ldx, if they were |
goetz@6458 | 1572 | // in the opposite order. This would save a load. |
goetz@6458 | 1573 | ld(method_result, itableOffsetEntry::interface_offset_in_bytes(), scan_temp); |
goetz@6458 | 1574 | |
goetz@6458 | 1575 | // Check that this entry is non-null. A null entry means that |
goetz@6458 | 1576 | // the receiver class doesn't implement the interface, and wasn't the |
goetz@6458 | 1577 | // same as when the caller was compiled. |
goetz@6458 | 1578 | cmpd(CCR0, method_result, intf_klass); |
goetz@6458 | 1579 | |
goetz@6458 | 1580 | if (peel) { |
goetz@6458 | 1581 | beq(CCR0, found_method); |
goetz@6458 | 1582 | } else { |
goetz@6458 | 1583 | bne(CCR0, search); |
goetz@6458 | 1584 | // (invert the test to fall through to found_method...) |
goetz@6458 | 1585 | } |
goetz@6458 | 1586 | |
goetz@6458 | 1587 | if (!peel) break; |
goetz@6458 | 1588 | |
goetz@6458 | 1589 | bind(search); |
goetz@6458 | 1590 | |
goetz@6458 | 1591 | cmpdi(CCR0, method_result, 0); |
goetz@6458 | 1592 | beq(CCR0, L_no_such_interface); |
goetz@6458 | 1593 | addi(scan_temp, scan_temp, scan_step); |
goetz@6458 | 1594 | } |
goetz@6458 | 1595 | |
goetz@6458 | 1596 | bind(found_method); |
goetz@6458 | 1597 | |
goetz@6458 | 1598 | // Got a hit. |
goetz@6458 | 1599 | int ito_offset = itableOffsetEntry::offset_offset_in_bytes(); |
goetz@6458 | 1600 | lwz(scan_temp, ito_offset, scan_temp); |
goetz@6458 | 1601 | ldx(method_result, scan_temp, recv_klass); |
goetz@6458 | 1602 | } |
goetz@6458 | 1603 | |
goetz@6458 | 1604 | // virtual method calling |
goetz@6458 | 1605 | void MacroAssembler::lookup_virtual_method(Register recv_klass, |
goetz@6458 | 1606 | RegisterOrConstant vtable_index, |
goetz@6458 | 1607 | Register method_result) { |
goetz@6458 | 1608 | |
goetz@6458 | 1609 | assert_different_registers(recv_klass, method_result, vtable_index.register_or_noreg()); |
goetz@6458 | 1610 | |
goetz@6458 | 1611 | const int base = InstanceKlass::vtable_start_offset() * wordSize; |
goetz@6458 | 1612 | assert(vtableEntry::size() * wordSize == wordSize, "adjust the scaling in the code below"); |
goetz@6458 | 1613 | |
goetz@6458 | 1614 | if (vtable_index.is_register()) { |
goetz@6458 | 1615 | sldi(vtable_index.as_register(), vtable_index.as_register(), LogBytesPerWord); |
goetz@6458 | 1616 | add(recv_klass, vtable_index.as_register(), recv_klass); |
goetz@6458 | 1617 | } else { |
goetz@6458 | 1618 | addi(recv_klass, recv_klass, vtable_index.as_constant() << LogBytesPerWord); |
goetz@6458 | 1619 | } |
goetz@6458 | 1620 | ld(R19_method, base + vtableEntry::method_offset_in_bytes(), recv_klass); |
goetz@6458 | 1621 | } |
goetz@6458 | 1622 | |
goetz@6458 | 1623 | /////////////////////////////////////////// subtype checking //////////////////////////////////////////// |
goetz@6458 | 1624 | |
goetz@6458 | 1625 | void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass, |
goetz@6458 | 1626 | Register super_klass, |
goetz@6458 | 1627 | Register temp1_reg, |
goetz@6458 | 1628 | Register temp2_reg, |
goetz@6458 | 1629 | Label& L_success, |
goetz@6458 | 1630 | Label& L_failure) { |
goetz@6458 | 1631 | |
goetz@6458 | 1632 | const Register check_cache_offset = temp1_reg; |
goetz@6458 | 1633 | const Register cached_super = temp2_reg; |
goetz@6458 | 1634 | |
goetz@6458 | 1635 | assert_different_registers(sub_klass, super_klass, check_cache_offset, cached_super); |
goetz@6458 | 1636 | |
goetz@6458 | 1637 | int sco_offset = in_bytes(Klass::super_check_offset_offset()); |
goetz@6458 | 1638 | int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); |
goetz@6458 | 1639 | |
goetz@6458 | 1640 | // If the pointers are equal, we are done (e.g., String[] elements). |
goetz@6458 | 1641 | // This self-check enables sharing of secondary supertype arrays among |
goetz@6458 | 1642 | // non-primary types such as array-of-interface. Otherwise, each such |
goetz@6458 | 1643 | // type would need its own customized SSA. |
goetz@6458 | 1644 | // We move this check to the front of the fast path because many |
goetz@6458 | 1645 | // type checks are in fact trivially successful in this manner, |
goetz@6458 | 1646 | // so we get a nicely predicted branch right at the start of the check. |
goetz@6458 | 1647 | cmpd(CCR0, sub_klass, super_klass); |
goetz@6458 | 1648 | beq(CCR0, L_success); |
goetz@6458 | 1649 | |
goetz@6458 | 1650 | // Check the supertype display: |
goetz@6458 | 1651 | lwz(check_cache_offset, sco_offset, super_klass); |
goetz@6458 | 1652 | // The loaded value is the offset from KlassOopDesc. |
goetz@6458 | 1653 | |
goetz@6458 | 1654 | ldx(cached_super, check_cache_offset, sub_klass); |
goetz@6458 | 1655 | cmpd(CCR0, cached_super, super_klass); |
goetz@6458 | 1656 | beq(CCR0, L_success); |
goetz@6458 | 1657 | |
goetz@6458 | 1658 | // This check has worked decisively for primary supers. |
goetz@6458 | 1659 | // Secondary supers are sought in the super_cache ('super_cache_addr'). |
goetz@6458 | 1660 | // (Secondary supers are interfaces and very deeply nested subtypes.) |
goetz@6458 | 1661 | // This works in the same check above because of a tricky aliasing |
goetz@6458 | 1662 | // between the super_cache and the primary super display elements. |
goetz@6458 | 1663 | // (The 'super_check_addr' can address either, as the case requires.) |
goetz@6458 | 1664 | // Note that the cache is updated below if it does not help us find |
goetz@6458 | 1665 | // what we need immediately. |
goetz@6458 | 1666 | // So if it was a primary super, we can just fail immediately. |
goetz@6458 | 1667 | // Otherwise, it's the slow path for us (no success at this point). |
goetz@6458 | 1668 | |
goetz@6458 | 1669 | cmpwi(CCR0, check_cache_offset, sc_offset); |
goetz@6458 | 1670 | bne(CCR0, L_failure); |
goetz@6458 | 1671 | // bind(slow_path); // fallthru |
goetz@6458 | 1672 | } |
goetz@6458 | 1673 | |
goetz@6458 | 1674 | void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass, |
goetz@6458 | 1675 | Register super_klass, |
goetz@6458 | 1676 | Register temp1_reg, |
goetz@6458 | 1677 | Register temp2_reg, |
goetz@6458 | 1678 | Label* L_success, |
goetz@6458 | 1679 | Register result_reg) { |
goetz@6458 | 1680 | const Register array_ptr = temp1_reg; // current value from cache array |
goetz@6458 | 1681 | const Register temp = temp2_reg; |
goetz@6458 | 1682 | |
goetz@6458 | 1683 | assert_different_registers(sub_klass, super_klass, array_ptr, temp); |
goetz@6458 | 1684 | |
goetz@6458 | 1685 | int source_offset = in_bytes(Klass::secondary_supers_offset()); |
goetz@6458 | 1686 | int target_offset = in_bytes(Klass::secondary_super_cache_offset()); |
goetz@6458 | 1687 | |
goetz@6458 | 1688 | int length_offset = Array<Klass*>::length_offset_in_bytes(); |
goetz@6458 | 1689 | int base_offset = Array<Klass*>::base_offset_in_bytes(); |
goetz@6458 | 1690 | |
goetz@6458 | 1691 | Label hit, loop, failure, fallthru; |
goetz@6458 | 1692 | |
goetz@6458 | 1693 | ld(array_ptr, source_offset, sub_klass); |
goetz@6458 | 1694 | |
goetz@6458 | 1695 | //assert(4 == arrayOopDesc::length_length_in_bytes(), "precondition violated."); |
goetz@6458 | 1696 | lwz(temp, length_offset, array_ptr); |
goetz@6458 | 1697 | cmpwi(CCR0, temp, 0); |
goetz@6458 | 1698 | beq(CCR0, result_reg!=noreg ? failure : fallthru); // length 0 |
goetz@6458 | 1699 | |
goetz@6458 | 1700 | mtctr(temp); // load ctr |
goetz@6458 | 1701 | |
goetz@6458 | 1702 | bind(loop); |
goetz@6458 | 1703 | // Oops in table are NO MORE compressed. |
goetz@6458 | 1704 | ld(temp, base_offset, array_ptr); |
goetz@6458 | 1705 | cmpd(CCR0, temp, super_klass); |
goetz@6458 | 1706 | beq(CCR0, hit); |
goetz@6458 | 1707 | addi(array_ptr, array_ptr, BytesPerWord); |
goetz@6458 | 1708 | bdnz(loop); |
goetz@6458 | 1709 | |
goetz@6458 | 1710 | bind(failure); |
goetz@6458 | 1711 | if (result_reg!=noreg) li(result_reg, 1); // load non-zero result (indicates a miss) |
goetz@6458 | 1712 | b(fallthru); |
goetz@6458 | 1713 | |
goetz@6458 | 1714 | bind(hit); |
goetz@6458 | 1715 | std(super_klass, target_offset, sub_klass); // save result to cache |
goetz@6458 | 1716 | if (result_reg != noreg) li(result_reg, 0); // load zero result (indicates a hit) |
goetz@6458 | 1717 | if (L_success != NULL) b(*L_success); |
goetz@6458 | 1718 | |
goetz@6458 | 1719 | bind(fallthru); |
goetz@6458 | 1720 | } |
goetz@6458 | 1721 | |
goetz@6458 | 1722 | // Try fast path, then go to slow one if not successful |
goetz@6458 | 1723 | void MacroAssembler::check_klass_subtype(Register sub_klass, |
goetz@6458 | 1724 | Register super_klass, |
goetz@6458 | 1725 | Register temp1_reg, |
goetz@6458 | 1726 | Register temp2_reg, |
goetz@6458 | 1727 | Label& L_success) { |
goetz@6458 | 1728 | Label L_failure; |
goetz@6458 | 1729 | check_klass_subtype_fast_path(sub_klass, super_klass, temp1_reg, temp2_reg, L_success, L_failure); |
goetz@6458 | 1730 | check_klass_subtype_slow_path(sub_klass, super_klass, temp1_reg, temp2_reg, &L_success); |
goetz@6458 | 1731 | bind(L_failure); // Fallthru if not successful. |
goetz@6458 | 1732 | } |
goetz@6458 | 1733 | |
goetz@6458 | 1734 | void MacroAssembler::check_method_handle_type(Register mtype_reg, Register mh_reg, |
goetz@6458 | 1735 | Register temp_reg, |
goetz@6458 | 1736 | Label& wrong_method_type) { |
goetz@6458 | 1737 | assert_different_registers(mtype_reg, mh_reg, temp_reg); |
goetz@6458 | 1738 | // Compare method type against that of the receiver. |
goetz@6458 | 1739 | load_heap_oop_not_null(temp_reg, delayed_value(java_lang_invoke_MethodHandle::type_offset_in_bytes, temp_reg), mh_reg); |
goetz@6458 | 1740 | cmpd(CCR0, temp_reg, mtype_reg); |
goetz@6458 | 1741 | bne(CCR0, wrong_method_type); |
goetz@6458 | 1742 | } |
goetz@6458 | 1743 | |
goetz@6458 | 1744 | RegisterOrConstant MacroAssembler::argument_offset(RegisterOrConstant arg_slot, |
goetz@6458 | 1745 | Register temp_reg, |
goetz@6458 | 1746 | int extra_slot_offset) { |
goetz@6458 | 1747 | // cf. TemplateTable::prepare_invoke(), if (load_receiver). |
goetz@6458 | 1748 | int stackElementSize = Interpreter::stackElementSize; |
goetz@6458 | 1749 | int offset = extra_slot_offset * stackElementSize; |
goetz@6458 | 1750 | if (arg_slot.is_constant()) { |
goetz@6458 | 1751 | offset += arg_slot.as_constant() * stackElementSize; |
goetz@6458 | 1752 | return offset; |
goetz@6458 | 1753 | } else { |
goetz@6458 | 1754 | assert(temp_reg != noreg, "must specify"); |
goetz@6458 | 1755 | sldi(temp_reg, arg_slot.as_register(), exact_log2(stackElementSize)); |
goetz@6458 | 1756 | if (offset != 0) |
goetz@6458 | 1757 | addi(temp_reg, temp_reg, offset); |
goetz@6458 | 1758 | return temp_reg; |
goetz@6458 | 1759 | } |
goetz@6458 | 1760 | } |
goetz@6458 | 1761 | |
goetz@6458 | 1762 | void MacroAssembler::biased_locking_enter(ConditionRegister cr_reg, Register obj_reg, |
goetz@6458 | 1763 | Register mark_reg, Register temp_reg, |
goetz@6458 | 1764 | Register temp2_reg, Label& done, Label* slow_case) { |
goetz@6458 | 1765 | assert(UseBiasedLocking, "why call this otherwise?"); |
goetz@6458 | 1766 | |
goetz@6458 | 1767 | #ifdef ASSERT |
goetz@6458 | 1768 | assert_different_registers(obj_reg, mark_reg, temp_reg, temp2_reg); |
goetz@6458 | 1769 | #endif |
goetz@6458 | 1770 | |
goetz@6458 | 1771 | Label cas_label; |
goetz@6458 | 1772 | |
goetz@6458 | 1773 | // Branch to done if fast path fails and no slow_case provided. |
goetz@6458 | 1774 | Label *slow_case_int = (slow_case != NULL) ? slow_case : &done; |
goetz@6458 | 1775 | |
goetz@6458 | 1776 | // Biased locking |
goetz@6458 | 1777 | // See whether the lock is currently biased toward our thread and |
goetz@6458 | 1778 | // whether the epoch is still valid |
goetz@6458 | 1779 | // Note that the runtime guarantees sufficient alignment of JavaThread |
goetz@6458 | 1780 | // pointers to allow age to be placed into low bits |
goetz@6458 | 1781 | assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, |
goetz@6458 | 1782 | "biased locking makes assumptions about bit layout"); |
goetz@6458 | 1783 | |
goetz@6458 | 1784 | if (PrintBiasedLockingStatistics) { |
goetz@6458 | 1785 | load_const(temp_reg, (address) BiasedLocking::total_entry_count_addr(), temp2_reg); |
goetz@6458 | 1786 | lwz(temp2_reg, 0, temp_reg); |
goetz@6458 | 1787 | addi(temp2_reg, temp2_reg, 1); |
goetz@6458 | 1788 | stw(temp2_reg, 0, temp_reg); |
goetz@6458 | 1789 | } |
goetz@6458 | 1790 | |
goetz@6458 | 1791 | andi(temp_reg, mark_reg, markOopDesc::biased_lock_mask_in_place); |
goetz@6458 | 1792 | cmpwi(cr_reg, temp_reg, markOopDesc::biased_lock_pattern); |
goetz@6458 | 1793 | bne(cr_reg, cas_label); |
goetz@6458 | 1794 | |
goetz@6515 | 1795 | load_klass(temp_reg, obj_reg); |
goetz@6458 | 1796 | |
goetz@6458 | 1797 | load_const_optimized(temp2_reg, ~((int) markOopDesc::age_mask_in_place)); |
goetz@6458 | 1798 | ld(temp_reg, in_bytes(Klass::prototype_header_offset()), temp_reg); |
goetz@6458 | 1799 | orr(temp_reg, R16_thread, temp_reg); |
goetz@6458 | 1800 | xorr(temp_reg, mark_reg, temp_reg); |
goetz@6458 | 1801 | andr(temp_reg, temp_reg, temp2_reg); |
goetz@6458 | 1802 | cmpdi(cr_reg, temp_reg, 0); |
goetz@6458 | 1803 | if (PrintBiasedLockingStatistics) { |
goetz@6458 | 1804 | Label l; |
goetz@6458 | 1805 | bne(cr_reg, l); |
goetz@6458 | 1806 | load_const(mark_reg, (address) BiasedLocking::biased_lock_entry_count_addr()); |
goetz@6458 | 1807 | lwz(temp2_reg, 0, mark_reg); |
goetz@6458 | 1808 | addi(temp2_reg, temp2_reg, 1); |
goetz@6458 | 1809 | stw(temp2_reg, 0, mark_reg); |
goetz@6458 | 1810 | // restore mark_reg |
goetz@6458 | 1811 | ld(mark_reg, oopDesc::mark_offset_in_bytes(), obj_reg); |
goetz@6458 | 1812 | bind(l); |
goetz@6458 | 1813 | } |
goetz@6458 | 1814 | beq(cr_reg, done); |
goetz@6458 | 1815 | |
goetz@6458 | 1816 | Label try_revoke_bias; |
goetz@6458 | 1817 | Label try_rebias; |
goetz@6458 | 1818 | |
goetz@6458 | 1819 | // At this point we know that the header has the bias pattern and |
goetz@6458 | 1820 | // that we are not the bias owner in the current epoch. We need to |
goetz@6458 | 1821 | // figure out more details about the state of the header in order to |
goetz@6458 | 1822 | // know what operations can be legally performed on the object's |
goetz@6458 | 1823 | // header. |
goetz@6458 | 1824 | |
goetz@6458 | 1825 | // If the low three bits in the xor result aren't clear, that means |
goetz@6458 | 1826 | // the prototype header is no longer biased and we have to revoke |
goetz@6458 | 1827 | // the bias on this object. |
goetz@6458 | 1828 | andi(temp2_reg, temp_reg, markOopDesc::biased_lock_mask_in_place); |
goetz@6458 | 1829 | cmpwi(cr_reg, temp2_reg, 0); |
goetz@6458 | 1830 | bne(cr_reg, try_revoke_bias); |
goetz@6458 | 1831 | |
goetz@6458 | 1832 | // Biasing is still enabled for this data type. See whether the |
goetz@6458 | 1833 | // epoch of the current bias is still valid, meaning that the epoch |
goetz@6458 | 1834 | // bits of the mark word are equal to the epoch bits of the |
goetz@6458 | 1835 | // prototype header. (Note that the prototype header's epoch bits |
goetz@6458 | 1836 | // only change at a safepoint.) If not, attempt to rebias the object |
goetz@6458 | 1837 | // toward the current thread. Note that we must be absolutely sure |
goetz@6458 | 1838 | // that the current epoch is invalid in order to do this because |
goetz@6458 | 1839 | // otherwise the manipulations it performs on the mark word are |
goetz@6458 | 1840 | // illegal. |
goetz@6458 | 1841 | |
goetz@6458 | 1842 | int shift_amount = 64 - markOopDesc::epoch_shift; |
goetz@6458 | 1843 | // rotate epoch bits to right (little) end and set other bits to 0 |
goetz@6458 | 1844 | // [ big part | epoch | little part ] -> [ 0..0 | epoch ] |
goetz@6458 | 1845 | rldicl_(temp2_reg, temp_reg, shift_amount, 64 - markOopDesc::epoch_bits); |
goetz@6458 | 1846 | // branch if epoch bits are != 0, i.e. they differ, because the epoch has been incremented |
goetz@6458 | 1847 | bne(CCR0, try_rebias); |
goetz@6458 | 1848 | |
goetz@6458 | 1849 | // The epoch of the current bias is still valid but we know nothing |
goetz@6458 | 1850 | // about the owner; it might be set or it might be clear. Try to |
goetz@6458 | 1851 | // acquire the bias of the object using an atomic operation. If this |
goetz@6458 | 1852 | // fails we will go in to the runtime to revoke the object's bias. |
goetz@6458 | 1853 | // Note that we first construct the presumed unbiased header so we |
goetz@6458 | 1854 | // don't accidentally blow away another thread's valid bias. |
goetz@6458 | 1855 | andi(mark_reg, mark_reg, (markOopDesc::biased_lock_mask_in_place | |
goetz@6458 | 1856 | markOopDesc::age_mask_in_place | |
goetz@6458 | 1857 | markOopDesc::epoch_mask_in_place)); |
goetz@6458 | 1858 | orr(temp_reg, R16_thread, mark_reg); |
goetz@6458 | 1859 | |
goetz@6458 | 1860 | assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0"); |
goetz@6458 | 1861 | |
goetz@6458 | 1862 | // CmpxchgX sets cr_reg to cmpX(temp2_reg, mark_reg). |
goetz@6458 | 1863 | fence(); // TODO: replace by MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq ? |
goetz@6458 | 1864 | cmpxchgd(/*flag=*/cr_reg, /*current_value=*/temp2_reg, |
goetz@6458 | 1865 | /*compare_value=*/mark_reg, /*exchange_value=*/temp_reg, |
goetz@6458 | 1866 | /*where=*/obj_reg, |
goetz@6458 | 1867 | MacroAssembler::MemBarAcq, |
goetz@6458 | 1868 | MacroAssembler::cmpxchgx_hint_acquire_lock(), |
goetz@6458 | 1869 | noreg, slow_case_int); // bail out if failed |
goetz@6458 | 1870 | |
goetz@6458 | 1871 | // If the biasing toward our thread failed, this means that |
goetz@6458 | 1872 | // another thread succeeded in biasing it toward itself and we |
goetz@6458 | 1873 | // need to revoke that bias. The revocation will occur in the |
goetz@6458 | 1874 | // interpreter runtime in the slow case. |
goetz@6458 | 1875 | if (PrintBiasedLockingStatistics) { |
goetz@6458 | 1876 | load_const(temp_reg, (address) BiasedLocking::anonymously_biased_lock_entry_count_addr(), temp2_reg); |
goetz@6458 | 1877 | lwz(temp2_reg, 0, temp_reg); |
goetz@6458 | 1878 | addi(temp2_reg, temp2_reg, 1); |
goetz@6458 | 1879 | stw(temp2_reg, 0, temp_reg); |
goetz@6458 | 1880 | } |
goetz@6458 | 1881 | b(done); |
goetz@6458 | 1882 | |
goetz@6458 | 1883 | bind(try_rebias); |
goetz@6458 | 1884 | // At this point we know the epoch has expired, meaning that the |
goetz@6458 | 1885 | // current "bias owner", if any, is actually invalid. Under these |
goetz@6458 | 1886 | // circumstances _only_, we are allowed to use the current header's |
goetz@6458 | 1887 | // value as the comparison value when doing the cas to acquire the |
goetz@6458 | 1888 | // bias in the current epoch. In other words, we allow transfer of |
goetz@6458 | 1889 | // the bias from one thread to another directly in this situation. |
goetz@6458 | 1890 | andi(temp_reg, mark_reg, markOopDesc::age_mask_in_place); |
goetz@6458 | 1891 | orr(temp_reg, R16_thread, temp_reg); |
goetz@6515 | 1892 | load_klass(temp2_reg, obj_reg); |
goetz@6458 | 1893 | ld(temp2_reg, in_bytes(Klass::prototype_header_offset()), temp2_reg); |
goetz@6458 | 1894 | orr(temp_reg, temp_reg, temp2_reg); |
goetz@6458 | 1895 | |
goetz@6458 | 1896 | assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0"); |
goetz@6458 | 1897 | |
goetz@6458 | 1898 | // CmpxchgX sets cr_reg to cmpX(temp2_reg, mark_reg). |
goetz@6458 | 1899 | fence(); // TODO: replace by MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq ? |
goetz@6458 | 1900 | cmpxchgd(/*flag=*/cr_reg, /*current_value=*/temp2_reg, |
goetz@6458 | 1901 | /*compare_value=*/mark_reg, /*exchange_value=*/temp_reg, |
goetz@6458 | 1902 | /*where=*/obj_reg, |
goetz@6458 | 1903 | MacroAssembler::MemBarAcq, |
goetz@6458 | 1904 | MacroAssembler::cmpxchgx_hint_acquire_lock(), |
goetz@6458 | 1905 | noreg, slow_case_int); // bail out if failed |
goetz@6458 | 1906 | |
goetz@6458 | 1907 | // If the biasing toward our thread failed, this means that |
goetz@6458 | 1908 | // another thread succeeded in biasing it toward itself and we |
goetz@6458 | 1909 | // need to revoke that bias. The revocation will occur in the |
goetz@6458 | 1910 | // interpreter runtime in the slow case. |
goetz@6458 | 1911 | if (PrintBiasedLockingStatistics) { |
goetz@6458 | 1912 | load_const(temp_reg, (address) BiasedLocking::rebiased_lock_entry_count_addr(), temp2_reg); |
goetz@6458 | 1913 | lwz(temp2_reg, 0, temp_reg); |
goetz@6458 | 1914 | addi(temp2_reg, temp2_reg, 1); |
goetz@6458 | 1915 | stw(temp2_reg, 0, temp_reg); |
goetz@6458 | 1916 | } |
goetz@6458 | 1917 | b(done); |
goetz@6458 | 1918 | |
goetz@6458 | 1919 | bind(try_revoke_bias); |
goetz@6458 | 1920 | // The prototype mark in the klass doesn't have the bias bit set any |
goetz@6458 | 1921 | // more, indicating that objects of this data type are not supposed |
goetz@6458 | 1922 | // to be biased any more. We are going to try to reset the mark of |
goetz@6458 | 1923 | // this object to the prototype value and fall through to the |
goetz@6458 | 1924 | // CAS-based locking scheme. Note that if our CAS fails, it means |
goetz@6458 | 1925 | // that another thread raced us for the privilege of revoking the |
goetz@6458 | 1926 | // bias of this particular object, so it's okay to continue in the |
goetz@6458 | 1927 | // normal locking code. |
goetz@6515 | 1928 | load_klass(temp_reg, obj_reg); |
goetz@6458 | 1929 | ld(temp_reg, in_bytes(Klass::prototype_header_offset()), temp_reg); |
goetz@6458 | 1930 | andi(temp2_reg, mark_reg, markOopDesc::age_mask_in_place); |
goetz@6458 | 1931 | orr(temp_reg, temp_reg, temp2_reg); |
goetz@6458 | 1932 | |
goetz@6458 | 1933 | assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0"); |
goetz@6458 | 1934 | |
goetz@6458 | 1935 | // CmpxchgX sets cr_reg to cmpX(temp2_reg, mark_reg). |
goetz@6458 | 1936 | fence(); // TODO: replace by MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq ? |
goetz@6458 | 1937 | cmpxchgd(/*flag=*/cr_reg, /*current_value=*/temp2_reg, |
goetz@6458 | 1938 | /*compare_value=*/mark_reg, /*exchange_value=*/temp_reg, |
goetz@6458 | 1939 | /*where=*/obj_reg, |
goetz@6458 | 1940 | MacroAssembler::MemBarAcq, |
goetz@6458 | 1941 | MacroAssembler::cmpxchgx_hint_acquire_lock()); |
goetz@6458 | 1942 | |
goetz@6458 | 1943 | // reload markOop in mark_reg before continuing with lightweight locking |
goetz@6458 | 1944 | ld(mark_reg, oopDesc::mark_offset_in_bytes(), obj_reg); |
goetz@6458 | 1945 | |
goetz@6458 | 1946 | // Fall through to the normal CAS-based lock, because no matter what |
goetz@6458 | 1947 | // the result of the above CAS, some thread must have succeeded in |
goetz@6458 | 1948 | // removing the bias bit from the object's header. |
goetz@6458 | 1949 | if (PrintBiasedLockingStatistics) { |
goetz@6458 | 1950 | Label l; |
goetz@6458 | 1951 | bne(cr_reg, l); |
goetz@6458 | 1952 | load_const(temp_reg, (address) BiasedLocking::revoked_lock_entry_count_addr(), temp2_reg); |
goetz@6458 | 1953 | lwz(temp2_reg, 0, temp_reg); |
goetz@6458 | 1954 | addi(temp2_reg, temp2_reg, 1); |
goetz@6458 | 1955 | stw(temp2_reg, 0, temp_reg); |
goetz@6458 | 1956 | bind(l); |
goetz@6458 | 1957 | } |
goetz@6458 | 1958 | |
goetz@6458 | 1959 | bind(cas_label); |
goetz@6458 | 1960 | } |
goetz@6458 | 1961 | |
goetz@6458 | 1962 | void MacroAssembler::biased_locking_exit (ConditionRegister cr_reg, Register mark_addr, Register temp_reg, Label& done) { |
goetz@6458 | 1963 | // Check for biased locking unlock case, which is a no-op |
goetz@6458 | 1964 | // Note: we do not have to check the thread ID for two reasons. |
goetz@6458 | 1965 | // First, the interpreter checks for IllegalMonitorStateException at |
goetz@6458 | 1966 | // a higher level. Second, if the bias was revoked while we held the |
goetz@6458 | 1967 | // lock, the object could not be rebiased toward another thread, so |
goetz@6458 | 1968 | // the bias bit would be clear. |
goetz@6458 | 1969 | |
goetz@6458 | 1970 | ld(temp_reg, 0, mark_addr); |
goetz@6458 | 1971 | andi(temp_reg, temp_reg, markOopDesc::biased_lock_mask_in_place); |
goetz@6458 | 1972 | |
goetz@6458 | 1973 | cmpwi(cr_reg, temp_reg, markOopDesc::biased_lock_pattern); |
goetz@6458 | 1974 | beq(cr_reg, done); |
goetz@6458 | 1975 | } |
goetz@6458 | 1976 | |
goetz@6458 | 1977 | // "The box" is the space on the stack where we copy the object mark. |
goetz@6458 | 1978 | void MacroAssembler::compiler_fast_lock_object(ConditionRegister flag, Register oop, Register box, |
goetz@6458 | 1979 | Register temp, Register displaced_header, Register current_header) { |
goetz@6458 | 1980 | assert_different_registers(oop, box, temp, displaced_header, current_header); |
goetz@6458 | 1981 | assert(flag != CCR0, "bad condition register"); |
goetz@6458 | 1982 | Label cont; |
goetz@6458 | 1983 | Label object_has_monitor; |
goetz@6458 | 1984 | Label cas_failed; |
goetz@6458 | 1985 | |
goetz@6458 | 1986 | // Load markOop from object into displaced_header. |
goetz@6458 | 1987 | ld(displaced_header, oopDesc::mark_offset_in_bytes(), oop); |
goetz@6458 | 1988 | |
goetz@6458 | 1989 | |
goetz@6458 | 1990 | // Always do locking in runtime. |
goetz@6458 | 1991 | if (EmitSync & 0x01) { |
goetz@6458 | 1992 | cmpdi(flag, oop, 0); // Oop can't be 0 here => always false. |
goetz@6458 | 1993 | return; |
goetz@6458 | 1994 | } |
goetz@6458 | 1995 | |
goetz@6458 | 1996 | if (UseBiasedLocking) { |
goetz@6458 | 1997 | biased_locking_enter(flag, oop, displaced_header, temp, current_header, cont); |
goetz@6458 | 1998 | } |
goetz@6458 | 1999 | |
goetz@6458 | 2000 | // Handle existing monitor. |
goetz@6458 | 2001 | if ((EmitSync & 0x02) == 0) { |
goetz@6458 | 2002 | // The object has an existing monitor iff (mark & monitor_value) != 0. |
goetz@6458 | 2003 | andi_(temp, displaced_header, markOopDesc::monitor_value); |
goetz@6458 | 2004 | bne(CCR0, object_has_monitor); |
goetz@6458 | 2005 | } |
goetz@6458 | 2006 | |
goetz@6458 | 2007 | // Set displaced_header to be (markOop of object | UNLOCK_VALUE). |
goetz@6458 | 2008 | ori(displaced_header, displaced_header, markOopDesc::unlocked_value); |
goetz@6458 | 2009 | |
goetz@6458 | 2010 | // Load Compare Value application register. |
goetz@6458 | 2011 | |
goetz@6458 | 2012 | // Initialize the box. (Must happen before we update the object mark!) |
goetz@6458 | 2013 | std(displaced_header, BasicLock::displaced_header_offset_in_bytes(), box); |
goetz@6458 | 2014 | |
goetz@6458 | 2015 | // Must fence, otherwise, preceding store(s) may float below cmpxchg. |
goetz@6458 | 2016 | // Compare object markOop with mark and if equal exchange scratch1 with object markOop. |
goetz@6458 | 2017 | // CmpxchgX sets cr_reg to cmpX(current, displaced). |
goetz@6501 | 2018 | membar(Assembler::StoreStore); |
goetz@6458 | 2019 | cmpxchgd(/*flag=*/flag, |
goetz@6458 | 2020 | /*current_value=*/current_header, |
goetz@6458 | 2021 | /*compare_value=*/displaced_header, |
goetz@6458 | 2022 | /*exchange_value=*/box, |
goetz@6458 | 2023 | /*where=*/oop, |
goetz@6501 | 2024 | MacroAssembler::MemBarAcq, |
goetz@6458 | 2025 | MacroAssembler::cmpxchgx_hint_acquire_lock(), |
goetz@6458 | 2026 | noreg, |
goetz@6458 | 2027 | &cas_failed); |
goetz@6458 | 2028 | assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0"); |
goetz@6458 | 2029 | |
goetz@6458 | 2030 | // If the compare-and-exchange succeeded, then we found an unlocked |
goetz@6458 | 2031 | // object and we have now locked it. |
goetz@6458 | 2032 | b(cont); |
goetz@6458 | 2033 | |
goetz@6458 | 2034 | bind(cas_failed); |
goetz@6458 | 2035 | // We did not see an unlocked object so try the fast recursive case. |
goetz@6458 | 2036 | |
goetz@6458 | 2037 | // Check if the owner is self by comparing the value in the markOop of object |
goetz@6458 | 2038 | // (current_header) with the stack pointer. |
goetz@6458 | 2039 | sub(current_header, current_header, R1_SP); |
goetz@6458 | 2040 | load_const_optimized(temp, (address) (~(os::vm_page_size()-1) | |
goetz@6458 | 2041 | markOopDesc::lock_mask_in_place)); |
goetz@6458 | 2042 | |
goetz@6458 | 2043 | and_(R0/*==0?*/, current_header, temp); |
goetz@6458 | 2044 | // If condition is true we are cont and hence we can store 0 as the |
goetz@6458 | 2045 | // displaced header in the box, which indicates that it is a recursive lock. |
goetz@6458 | 2046 | mcrf(flag,CCR0); |
goetz@6458 | 2047 | std(R0/*==0, perhaps*/, BasicLock::displaced_header_offset_in_bytes(), box); |
goetz@6458 | 2048 | |
goetz@6458 | 2049 | // Handle existing monitor. |
goetz@6458 | 2050 | if ((EmitSync & 0x02) == 0) { |
goetz@6458 | 2051 | b(cont); |
goetz@6458 | 2052 | |
goetz@6458 | 2053 | bind(object_has_monitor); |
goetz@6458 | 2054 | // The object's monitor m is unlocked iff m->owner == NULL, |
goetz@6458 | 2055 | // otherwise m->owner may contain a thread or a stack address. |
goetz@6458 | 2056 | // |
goetz@6458 | 2057 | // Try to CAS m->owner from NULL to current thread. |
goetz@6458 | 2058 | addi(temp, displaced_header, ObjectMonitor::owner_offset_in_bytes()-markOopDesc::monitor_value); |
goetz@6458 | 2059 | li(displaced_header, 0); |
goetz@6458 | 2060 | // CmpxchgX sets flag to cmpX(current, displaced). |
goetz@6458 | 2061 | cmpxchgd(/*flag=*/flag, |
goetz@6458 | 2062 | /*current_value=*/current_header, |
goetz@6458 | 2063 | /*compare_value=*/displaced_header, |
goetz@6458 | 2064 | /*exchange_value=*/R16_thread, |
goetz@6458 | 2065 | /*where=*/temp, |
goetz@6458 | 2066 | MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq, |
goetz@6458 | 2067 | MacroAssembler::cmpxchgx_hint_acquire_lock()); |
goetz@6458 | 2068 | |
goetz@6458 | 2069 | // Store a non-null value into the box. |
goetz@6458 | 2070 | std(box, BasicLock::displaced_header_offset_in_bytes(), box); |
goetz@6458 | 2071 | |
goetz@6458 | 2072 | # ifdef ASSERT |
goetz@6458 | 2073 | bne(flag, cont); |
goetz@6458 | 2074 | // We have acquired the monitor, check some invariants. |
goetz@6458 | 2075 | addi(/*monitor=*/temp, temp, -ObjectMonitor::owner_offset_in_bytes()); |
goetz@6458 | 2076 | // Invariant 1: _recursions should be 0. |
goetz@6458 | 2077 | //assert(ObjectMonitor::recursions_size_in_bytes() == 8, "unexpected size"); |
goetz@6458 | 2078 | asm_assert_mem8_is_zero(ObjectMonitor::recursions_offset_in_bytes(), temp, |
goetz@6458 | 2079 | "monitor->_recursions should be 0", -1); |
goetz@6458 | 2080 | // Invariant 2: OwnerIsThread shouldn't be 0. |
goetz@6458 | 2081 | //assert(ObjectMonitor::OwnerIsThread_size_in_bytes() == 4, "unexpected size"); |
goetz@6458 | 2082 | //asm_assert_mem4_isnot_zero(ObjectMonitor::OwnerIsThread_offset_in_bytes(), temp, |
goetz@6458 | 2083 | // "monitor->OwnerIsThread shouldn't be 0", -1); |
goetz@6458 | 2084 | # endif |
goetz@6458 | 2085 | } |
goetz@6458 | 2086 | |
goetz@6458 | 2087 | bind(cont); |
goetz@6458 | 2088 | // flag == EQ indicates success |
goetz@6458 | 2089 | // flag == NE indicates failure |
goetz@6458 | 2090 | } |
goetz@6458 | 2091 | |
goetz@6458 | 2092 | void MacroAssembler::compiler_fast_unlock_object(ConditionRegister flag, Register oop, Register box, |
goetz@6458 | 2093 | Register temp, Register displaced_header, Register current_header) { |
goetz@6458 | 2094 | assert_different_registers(oop, box, temp, displaced_header, current_header); |
goetz@6458 | 2095 | assert(flag != CCR0, "bad condition register"); |
goetz@6458 | 2096 | Label cont; |
goetz@6458 | 2097 | Label object_has_monitor; |
goetz@6458 | 2098 | |
goetz@6458 | 2099 | // Always do locking in runtime. |
goetz@6458 | 2100 | if (EmitSync & 0x01) { |
goetz@6458 | 2101 | cmpdi(flag, oop, 0); // Oop can't be 0 here => always false. |
goetz@6458 | 2102 | return; |
goetz@6458 | 2103 | } |
goetz@6458 | 2104 | |
goetz@6458 | 2105 | if (UseBiasedLocking) { |
goetz@6458 | 2106 | biased_locking_exit(flag, oop, current_header, cont); |
goetz@6458 | 2107 | } |
goetz@6458 | 2108 | |
goetz@6458 | 2109 | // Find the lock address and load the displaced header from the stack. |
goetz@6458 | 2110 | ld(displaced_header, BasicLock::displaced_header_offset_in_bytes(), box); |
goetz@6458 | 2111 | |
goetz@6458 | 2112 | // If the displaced header is 0, we have a recursive unlock. |
goetz@6458 | 2113 | cmpdi(flag, displaced_header, 0); |
goetz@6458 | 2114 | beq(flag, cont); |
goetz@6458 | 2115 | |
goetz@6458 | 2116 | // Handle existing monitor. |
goetz@6458 | 2117 | if ((EmitSync & 0x02) == 0) { |
goetz@6458 | 2118 | // The object has an existing monitor iff (mark & monitor_value) != 0. |
goetz@6458 | 2119 | ld(current_header, oopDesc::mark_offset_in_bytes(), oop); |
goetz@6458 | 2120 | andi(temp, current_header, markOopDesc::monitor_value); |
goetz@6458 | 2121 | cmpdi(flag, temp, 0); |
goetz@6458 | 2122 | bne(flag, object_has_monitor); |
goetz@6458 | 2123 | } |
goetz@6458 | 2124 | |
goetz@6458 | 2125 | |
goetz@6458 | 2126 | // Check if it is still a light weight lock, this is is true if we see |
goetz@6458 | 2127 | // the stack address of the basicLock in the markOop of the object. |
goetz@6458 | 2128 | // Cmpxchg sets flag to cmpd(current_header, box). |
goetz@6458 | 2129 | cmpxchgd(/*flag=*/flag, |
goetz@6458 | 2130 | /*current_value=*/current_header, |
goetz@6458 | 2131 | /*compare_value=*/box, |
goetz@6458 | 2132 | /*exchange_value=*/displaced_header, |
goetz@6458 | 2133 | /*where=*/oop, |
goetz@6458 | 2134 | MacroAssembler::MemBarRel, |
goetz@6458 | 2135 | MacroAssembler::cmpxchgx_hint_release_lock(), |
goetz@6458 | 2136 | noreg, |
goetz@6458 | 2137 | &cont); |
goetz@6458 | 2138 | |
goetz@6458 | 2139 | assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0"); |
goetz@6458 | 2140 | |
goetz@6458 | 2141 | // Handle existing monitor. |
goetz@6458 | 2142 | if ((EmitSync & 0x02) == 0) { |
goetz@6458 | 2143 | b(cont); |
goetz@6458 | 2144 | |
goetz@6458 | 2145 | bind(object_has_monitor); |
goetz@6458 | 2146 | addi(current_header, current_header, -markOopDesc::monitor_value); // monitor |
goetz@6458 | 2147 | ld(temp, ObjectMonitor::owner_offset_in_bytes(), current_header); |
goetz@6458 | 2148 | ld(displaced_header, ObjectMonitor::recursions_offset_in_bytes(), current_header); |
goetz@6458 | 2149 | xorr(temp, R16_thread, temp); // Will be 0 if we are the owner. |
goetz@6458 | 2150 | orr(temp, temp, displaced_header); // Will be 0 if there are 0 recursions. |
goetz@6458 | 2151 | cmpdi(flag, temp, 0); |
goetz@6458 | 2152 | bne(flag, cont); |
goetz@6458 | 2153 | |
goetz@6458 | 2154 | ld(temp, ObjectMonitor::EntryList_offset_in_bytes(), current_header); |
goetz@6458 | 2155 | ld(displaced_header, ObjectMonitor::cxq_offset_in_bytes(), current_header); |
goetz@6458 | 2156 | orr(temp, temp, displaced_header); // Will be 0 if both are 0. |
goetz@6458 | 2157 | cmpdi(flag, temp, 0); |
goetz@6458 | 2158 | bne(flag, cont); |
goetz@6458 | 2159 | release(); |
goetz@6458 | 2160 | std(temp, ObjectMonitor::owner_offset_in_bytes(), current_header); |
goetz@6458 | 2161 | } |
goetz@6458 | 2162 | |
goetz@6458 | 2163 | bind(cont); |
goetz@6458 | 2164 | // flag == EQ indicates success |
goetz@6458 | 2165 | // flag == NE indicates failure |
goetz@6458 | 2166 | } |
goetz@6458 | 2167 | |
goetz@6458 | 2168 | // Write serialization page so VM thread can do a pseudo remote membar. |
goetz@6458 | 2169 | // We use the current thread pointer to calculate a thread specific |
goetz@6458 | 2170 | // offset to write to within the page. This minimizes bus traffic |
goetz@6458 | 2171 | // due to cache line collision. |
goetz@6458 | 2172 | void MacroAssembler::serialize_memory(Register thread, Register tmp1, Register tmp2) { |
goetz@6458 | 2173 | srdi(tmp2, thread, os::get_serialize_page_shift_count()); |
goetz@6458 | 2174 | |
goetz@6458 | 2175 | int mask = os::vm_page_size() - sizeof(int); |
goetz@6458 | 2176 | if (Assembler::is_simm(mask, 16)) { |
goetz@6458 | 2177 | andi(tmp2, tmp2, mask); |
goetz@6458 | 2178 | } else { |
goetz@6458 | 2179 | lis(tmp1, (int)((signed short) (mask >> 16))); |
goetz@6458 | 2180 | ori(tmp1, tmp1, mask & 0x0000ffff); |
goetz@6458 | 2181 | andr(tmp2, tmp2, tmp1); |
goetz@6458 | 2182 | } |
goetz@6458 | 2183 | |
goetz@6458 | 2184 | load_const(tmp1, (long) os::get_memory_serialize_page()); |
goetz@6458 | 2185 | release(); |
goetz@6458 | 2186 | stwx(R0, tmp1, tmp2); |
goetz@6458 | 2187 | } |
goetz@6458 | 2188 | |
goetz@6458 | 2189 | |
goetz@6458 | 2190 | // GC barrier helper macros |
goetz@6458 | 2191 | |
goetz@6458 | 2192 | // Write the card table byte if needed. |
goetz@6458 | 2193 | void MacroAssembler::card_write_barrier_post(Register Rstore_addr, Register Rnew_val, Register Rtmp) { |
goetz@6458 | 2194 | CardTableModRefBS* bs = (CardTableModRefBS*) Universe::heap()->barrier_set(); |
goetz@6458 | 2195 | assert(bs->kind() == BarrierSet::CardTableModRef || |
goetz@6458 | 2196 | bs->kind() == BarrierSet::CardTableExtension, "wrong barrier"); |
goetz@6458 | 2197 | #ifdef ASSERT |
goetz@6458 | 2198 | cmpdi(CCR0, Rnew_val, 0); |
goetz@6458 | 2199 | asm_assert_ne("null oop not allowed", 0x321); |
goetz@6458 | 2200 | #endif |
goetz@6458 | 2201 | card_table_write(bs->byte_map_base, Rtmp, Rstore_addr); |
goetz@6458 | 2202 | } |
goetz@6458 | 2203 | |
goetz@6458 | 2204 | // Write the card table byte. |
goetz@6458 | 2205 | void MacroAssembler::card_table_write(jbyte* byte_map_base, Register Rtmp, Register Robj) { |
goetz@6458 | 2206 | assert_different_registers(Robj, Rtmp, R0); |
goetz@6458 | 2207 | load_const_optimized(Rtmp, (address)byte_map_base, R0); |
goetz@6458 | 2208 | srdi(Robj, Robj, CardTableModRefBS::card_shift); |
goetz@6458 | 2209 | li(R0, 0); // dirty |
goetz@6501 | 2210 | if (UseConcMarkSweepGC) membar(Assembler::StoreStore); |
goetz@6458 | 2211 | stbx(R0, Rtmp, Robj); |
goetz@6458 | 2212 | } |
goetz@6458 | 2213 | |
goetz@6515 | 2214 | #if INCLUDE_ALL_GCS |
goetz@6458 | 2215 | // General G1 pre-barrier generator. |
goetz@6458 | 2216 | // Goal: record the previous value if it is not null. |
goetz@6458 | 2217 | void MacroAssembler::g1_write_barrier_pre(Register Robj, RegisterOrConstant offset, Register Rpre_val, |
goetz@6458 | 2218 | Register Rtmp1, Register Rtmp2, bool needs_frame) { |
goetz@6458 | 2219 | Label runtime, filtered; |
goetz@6458 | 2220 | |
goetz@6458 | 2221 | // Is marking active? |
goetz@6458 | 2222 | if (in_bytes(PtrQueue::byte_width_of_active()) == 4) { |
goetz@6458 | 2223 | lwz(Rtmp1, in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_active()), R16_thread); |
goetz@6458 | 2224 | } else { |
goetz@6458 | 2225 | guarantee(in_bytes(PtrQueue::byte_width_of_active()) == 1, "Assumption"); |
goetz@6458 | 2226 | lbz(Rtmp1, in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_active()), R16_thread); |
goetz@6458 | 2227 | } |
goetz@6458 | 2228 | cmpdi(CCR0, Rtmp1, 0); |
goetz@6458 | 2229 | beq(CCR0, filtered); |
goetz@6458 | 2230 | |
goetz@6458 | 2231 | // Do we need to load the previous value? |
goetz@6458 | 2232 | if (Robj != noreg) { |
goetz@6458 | 2233 | // Load the previous value... |
goetz@6458 | 2234 | if (UseCompressedOops) { |
goetz@6458 | 2235 | lwz(Rpre_val, offset, Robj); |
goetz@6458 | 2236 | } else { |
goetz@6458 | 2237 | ld(Rpre_val, offset, Robj); |
goetz@6458 | 2238 | } |
goetz@6458 | 2239 | // Previous value has been loaded into Rpre_val. |
goetz@6458 | 2240 | } |
goetz@6458 | 2241 | assert(Rpre_val != noreg, "must have a real register"); |
goetz@6458 | 2242 | |
goetz@6458 | 2243 | // Is the previous value null? |
goetz@6458 | 2244 | cmpdi(CCR0, Rpre_val, 0); |
goetz@6458 | 2245 | beq(CCR0, filtered); |
goetz@6458 | 2246 | |
goetz@6458 | 2247 | if (Robj != noreg && UseCompressedOops) { |
goetz@6458 | 2248 | decode_heap_oop_not_null(Rpre_val); |
goetz@6458 | 2249 | } |
goetz@6458 | 2250 | |
goetz@6458 | 2251 | // OK, it's not filtered, so we'll need to call enqueue. In the normal |
goetz@6458 | 2252 | // case, pre_val will be a scratch G-reg, but there are some cases in |
goetz@6458 | 2253 | // which it's an O-reg. In the first case, do a normal call. In the |
goetz@6458 | 2254 | // latter, do a save here and call the frameless version. |
goetz@6458 | 2255 | |
goetz@6458 | 2256 | // Can we store original value in the thread's buffer? |
goetz@6458 | 2257 | // Is index == 0? |
goetz@6458 | 2258 | // (The index field is typed as size_t.) |
goetz@6458 | 2259 | const Register Rbuffer = Rtmp1, Rindex = Rtmp2; |
goetz@6458 | 2260 | |
goetz@6458 | 2261 | ld(Rindex, in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_index()), R16_thread); |
goetz@6458 | 2262 | cmpdi(CCR0, Rindex, 0); |
goetz@6458 | 2263 | beq(CCR0, runtime); // If index == 0, goto runtime. |
goetz@6458 | 2264 | ld(Rbuffer, in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_buf()), R16_thread); |
goetz@6458 | 2265 | |
goetz@6458 | 2266 | addi(Rindex, Rindex, -wordSize); // Decrement index. |
goetz@6458 | 2267 | std(Rindex, in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_index()), R16_thread); |
goetz@6458 | 2268 | |
goetz@6458 | 2269 | // Record the previous value. |
goetz@6458 | 2270 | stdx(Rpre_val, Rbuffer, Rindex); |
goetz@6458 | 2271 | b(filtered); |
goetz@6458 | 2272 | |
goetz@6458 | 2273 | bind(runtime); |
goetz@6458 | 2274 | |
goetz@6458 | 2275 | // VM call need frame to access(write) O register. |
goetz@6458 | 2276 | if (needs_frame) { |
goetz@6458 | 2277 | save_LR_CR(Rtmp1); |
goetz@6511 | 2278 | push_frame_reg_args(0, Rtmp2); |
goetz@6458 | 2279 | } |
goetz@6458 | 2280 | |
goetz@6458 | 2281 | if (Rpre_val->is_volatile() && Robj == noreg) mr(R31, Rpre_val); // Save pre_val across C call if it was preloaded. |
goetz@6458 | 2282 | call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), Rpre_val, R16_thread); |
goetz@6458 | 2283 | if (Rpre_val->is_volatile() && Robj == noreg) mr(Rpre_val, R31); // restore |
goetz@6458 | 2284 | |
goetz@6458 | 2285 | if (needs_frame) { |
goetz@6458 | 2286 | pop_frame(); |
goetz@6458 | 2287 | restore_LR_CR(Rtmp1); |
goetz@6458 | 2288 | } |
goetz@6458 | 2289 | |
goetz@6458 | 2290 | bind(filtered); |
goetz@6458 | 2291 | } |
goetz@6458 | 2292 | |
goetz@6458 | 2293 | // General G1 post-barrier generator |
goetz@6458 | 2294 | // Store cross-region card. |
goetz@6458 | 2295 | void MacroAssembler::g1_write_barrier_post(Register Rstore_addr, Register Rnew_val, Register Rtmp1, Register Rtmp2, Register Rtmp3, Label *filtered_ext) { |
goetz@6458 | 2296 | Label runtime, filtered_int; |
goetz@6458 | 2297 | Label& filtered = (filtered_ext != NULL) ? *filtered_ext : filtered_int; |
goetz@6458 | 2298 | assert_different_registers(Rstore_addr, Rnew_val, Rtmp1, Rtmp2); |
goetz@6458 | 2299 | |
goetz@6458 | 2300 | G1SATBCardTableModRefBS* bs = (G1SATBCardTableModRefBS*) Universe::heap()->barrier_set(); |
goetz@6458 | 2301 | assert(bs->kind() == BarrierSet::G1SATBCT || |
goetz@6458 | 2302 | bs->kind() == BarrierSet::G1SATBCTLogging, "wrong barrier"); |
goetz@6458 | 2303 | |
goetz@6458 | 2304 | // Does store cross heap regions? |
goetz@6458 | 2305 | if (G1RSBarrierRegionFilter) { |
goetz@6458 | 2306 | xorr(Rtmp1, Rstore_addr, Rnew_val); |
goetz@6458 | 2307 | srdi_(Rtmp1, Rtmp1, HeapRegion::LogOfHRGrainBytes); |
goetz@6458 | 2308 | beq(CCR0, filtered); |
goetz@6458 | 2309 | } |
goetz@6458 | 2310 | |
goetz@6458 | 2311 | // Crosses regions, storing NULL? |
goetz@6458 | 2312 | #ifdef ASSERT |
goetz@6458 | 2313 | cmpdi(CCR0, Rnew_val, 0); |
goetz@6458 | 2314 | asm_assert_ne("null oop not allowed (G1)", 0x322); // Checked by caller on PPC64, so following branch is obsolete: |
goetz@6458 | 2315 | //beq(CCR0, filtered); |
goetz@6458 | 2316 | #endif |
goetz@6458 | 2317 | |
goetz@6458 | 2318 | // Storing region crossing non-NULL, is card already dirty? |
goetz@6458 | 2319 | assert(sizeof(*bs->byte_map_base) == sizeof(jbyte), "adjust this code"); |
goetz@6458 | 2320 | const Register Rcard_addr = Rtmp1; |
goetz@6458 | 2321 | Register Rbase = Rtmp2; |
goetz@6458 | 2322 | load_const_optimized(Rbase, (address)bs->byte_map_base, /*temp*/ Rtmp3); |
goetz@6458 | 2323 | |
goetz@6458 | 2324 | srdi(Rcard_addr, Rstore_addr, CardTableModRefBS::card_shift); |
goetz@6458 | 2325 | |
goetz@6458 | 2326 | // Get the address of the card. |
goetz@6458 | 2327 | lbzx(/*card value*/ Rtmp3, Rbase, Rcard_addr); |
goetz@6515 | 2328 | cmpwi(CCR0, Rtmp3, (int)G1SATBCardTableModRefBS::g1_young_card_val()); |
goetz@6515 | 2329 | beq(CCR0, filtered); |
goetz@6515 | 2330 | |
goetz@6515 | 2331 | membar(Assembler::StoreLoad); |
goetz@6515 | 2332 | lbzx(/*card value*/ Rtmp3, Rbase, Rcard_addr); // Reload after membar. |
goetz@6515 | 2333 | cmpwi(CCR0, Rtmp3 /* card value */, CardTableModRefBS::dirty_card_val()); |
goetz@6458 | 2334 | beq(CCR0, filtered); |
goetz@6458 | 2335 | |
goetz@6458 | 2336 | // Storing a region crossing, non-NULL oop, card is clean. |
goetz@6458 | 2337 | // Dirty card and log. |
goetz@6515 | 2338 | li(Rtmp3, CardTableModRefBS::dirty_card_val()); |
goetz@6458 | 2339 | //release(); // G1: oops are allowed to get visible after dirty marking. |
goetz@6458 | 2340 | stbx(Rtmp3, Rbase, Rcard_addr); |
goetz@6458 | 2341 | |
goetz@6458 | 2342 | add(Rcard_addr, Rbase, Rcard_addr); // This is the address which needs to get enqueued. |
goetz@6458 | 2343 | Rbase = noreg; // end of lifetime |
goetz@6458 | 2344 | |
goetz@6458 | 2345 | const Register Rqueue_index = Rtmp2, |
goetz@6458 | 2346 | Rqueue_buf = Rtmp3; |
goetz@6458 | 2347 | ld(Rqueue_index, in_bytes(JavaThread::dirty_card_queue_offset() + PtrQueue::byte_offset_of_index()), R16_thread); |
goetz@6458 | 2348 | cmpdi(CCR0, Rqueue_index, 0); |
goetz@6458 | 2349 | beq(CCR0, runtime); // index == 0 then jump to runtime |
goetz@6458 | 2350 | ld(Rqueue_buf, in_bytes(JavaThread::dirty_card_queue_offset() + PtrQueue::byte_offset_of_buf()), R16_thread); |
goetz@6458 | 2351 | |
goetz@6458 | 2352 | addi(Rqueue_index, Rqueue_index, -wordSize); // decrement index |
goetz@6458 | 2353 | std(Rqueue_index, in_bytes(JavaThread::dirty_card_queue_offset() + PtrQueue::byte_offset_of_index()), R16_thread); |
goetz@6458 | 2354 | |
goetz@6458 | 2355 | stdx(Rcard_addr, Rqueue_buf, Rqueue_index); // store card |
goetz@6458 | 2356 | b(filtered); |
goetz@6458 | 2357 | |
goetz@6458 | 2358 | bind(runtime); |
goetz@6458 | 2359 | |
goetz@6458 | 2360 | // Save the live input values. |
goetz@6458 | 2361 | call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), Rcard_addr, R16_thread); |
goetz@6458 | 2362 | |
goetz@6458 | 2363 | bind(filtered_int); |
goetz@6458 | 2364 | } |
goetz@6515 | 2365 | #endif // INCLUDE_ALL_GCS |
goetz@6458 | 2366 | |
goetz@6458 | 2367 | // Values for last_Java_pc, and last_Java_sp must comply to the rules |
goetz@6458 | 2368 | // in frame_ppc64.hpp. |
goetz@6458 | 2369 | void MacroAssembler::set_last_Java_frame(Register last_Java_sp, Register last_Java_pc) { |
goetz@6458 | 2370 | // Always set last_Java_pc and flags first because once last_Java_sp |
goetz@6458 | 2371 | // is visible has_last_Java_frame is true and users will look at the |
goetz@6458 | 2372 | // rest of the fields. (Note: flags should always be zero before we |
goetz@6458 | 2373 | // get here so doesn't need to be set.) |
goetz@6458 | 2374 | |
goetz@6458 | 2375 | // Verify that last_Java_pc was zeroed on return to Java |
goetz@6458 | 2376 | asm_assert_mem8_is_zero(in_bytes(JavaThread::last_Java_pc_offset()), R16_thread, |
goetz@6458 | 2377 | "last_Java_pc not zeroed before leaving Java", 0x200); |
goetz@6458 | 2378 | |
goetz@6458 | 2379 | // When returning from calling out from Java mode the frame anchor's |
goetz@6458 | 2380 | // last_Java_pc will always be set to NULL. It is set here so that |
goetz@6458 | 2381 | // if we are doing a call to native (not VM) that we capture the |
goetz@6458 | 2382 | // known pc and don't have to rely on the native call having a |
goetz@6458 | 2383 | // standard frame linkage where we can find the pc. |
goetz@6458 | 2384 | if (last_Java_pc != noreg) |
goetz@6458 | 2385 | std(last_Java_pc, in_bytes(JavaThread::last_Java_pc_offset()), R16_thread); |
goetz@6458 | 2386 | |
goetz@6495 | 2387 | // Set last_Java_sp last. |
goetz@6458 | 2388 | std(last_Java_sp, in_bytes(JavaThread::last_Java_sp_offset()), R16_thread); |
goetz@6458 | 2389 | } |
goetz@6458 | 2390 | |
goetz@6458 | 2391 | void MacroAssembler::reset_last_Java_frame(void) { |
goetz@6458 | 2392 | asm_assert_mem8_isnot_zero(in_bytes(JavaThread::last_Java_sp_offset()), |
goetz@6458 | 2393 | R16_thread, "SP was not set, still zero", 0x202); |
goetz@6458 | 2394 | |
goetz@6458 | 2395 | BLOCK_COMMENT("reset_last_Java_frame {"); |
goetz@6458 | 2396 | li(R0, 0); |
goetz@6458 | 2397 | |
goetz@6458 | 2398 | // _last_Java_sp = 0 |
goetz@6458 | 2399 | std(R0, in_bytes(JavaThread::last_Java_sp_offset()), R16_thread); |
goetz@6458 | 2400 | |
goetz@6458 | 2401 | // _last_Java_pc = 0 |
goetz@6458 | 2402 | std(R0, in_bytes(JavaThread::last_Java_pc_offset()), R16_thread); |
goetz@6458 | 2403 | BLOCK_COMMENT("} reset_last_Java_frame"); |
goetz@6458 | 2404 | } |
goetz@6458 | 2405 | |
goetz@6458 | 2406 | void MacroAssembler::set_top_ijava_frame_at_SP_as_last_Java_frame(Register sp, Register tmp1) { |
goetz@6458 | 2407 | assert_different_registers(sp, tmp1); |
goetz@6458 | 2408 | |
goetz@6458 | 2409 | // sp points to a TOP_IJAVA_FRAME, retrieve frame's PC via |
goetz@6458 | 2410 | // TOP_IJAVA_FRAME_ABI. |
goetz@6458 | 2411 | // FIXME: assert that we really have a TOP_IJAVA_FRAME here! |
goetz@6458 | 2412 | #ifdef CC_INTERP |
goetz@6458 | 2413 | ld(tmp1/*pc*/, _top_ijava_frame_abi(frame_manager_lr), sp); |
goetz@6458 | 2414 | #else |
goetz@6512 | 2415 | address entry = pc(); |
goetz@6512 | 2416 | load_const_optimized(tmp1, entry); |
goetz@6458 | 2417 | #endif |
goetz@6458 | 2418 | |
goetz@6458 | 2419 | set_last_Java_frame(/*sp=*/sp, /*pc=*/tmp1); |
goetz@6458 | 2420 | } |
goetz@6458 | 2421 | |
goetz@6458 | 2422 | void MacroAssembler::get_vm_result(Register oop_result) { |
goetz@6458 | 2423 | // Read: |
goetz@6458 | 2424 | // R16_thread |
goetz@6458 | 2425 | // R16_thread->in_bytes(JavaThread::vm_result_offset()) |
goetz@6458 | 2426 | // |
goetz@6458 | 2427 | // Updated: |
goetz@6458 | 2428 | // oop_result |
goetz@6458 | 2429 | // R16_thread->in_bytes(JavaThread::vm_result_offset()) |
goetz@6458 | 2430 | |
goetz@6458 | 2431 | ld(oop_result, in_bytes(JavaThread::vm_result_offset()), R16_thread); |
goetz@6458 | 2432 | li(R0, 0); |
goetz@6458 | 2433 | std(R0, in_bytes(JavaThread::vm_result_offset()), R16_thread); |
goetz@6458 | 2434 | |
goetz@6458 | 2435 | verify_oop(oop_result); |
goetz@6458 | 2436 | } |
goetz@6458 | 2437 | |
goetz@6458 | 2438 | void MacroAssembler::get_vm_result_2(Register metadata_result) { |
goetz@6458 | 2439 | // Read: |
goetz@6458 | 2440 | // R16_thread |
goetz@6458 | 2441 | // R16_thread->in_bytes(JavaThread::vm_result_2_offset()) |
goetz@6458 | 2442 | // |
goetz@6458 | 2443 | // Updated: |
goetz@6458 | 2444 | // metadata_result |
goetz@6458 | 2445 | // R16_thread->in_bytes(JavaThread::vm_result_2_offset()) |
goetz@6458 | 2446 | |
goetz@6458 | 2447 | ld(metadata_result, in_bytes(JavaThread::vm_result_2_offset()), R16_thread); |
goetz@6458 | 2448 | li(R0, 0); |
goetz@6458 | 2449 | std(R0, in_bytes(JavaThread::vm_result_2_offset()), R16_thread); |
goetz@6458 | 2450 | } |
goetz@6458 | 2451 | |
goetz@6458 | 2452 | |
goetz@6458 | 2453 | void MacroAssembler::encode_klass_not_null(Register dst, Register src) { |
goetz@6501 | 2454 | Register current = (src != noreg) ? src : dst; // Klass is in dst if no src provided. |
goetz@6477 | 2455 | if (Universe::narrow_klass_base() != 0) { |
goetz@6515 | 2456 | // Use dst as temp if it is free. |
goetz@6515 | 2457 | load_const(R0, Universe::narrow_klass_base(), (dst != current && dst != R0) ? dst : noreg); |
goetz@6501 | 2458 | sub(dst, current, R0); |
goetz@6501 | 2459 | current = dst; |
goetz@6477 | 2460 | } |
goetz@6501 | 2461 | if (Universe::narrow_klass_shift() != 0) { |
goetz@6501 | 2462 | srdi(dst, current, Universe::narrow_klass_shift()); |
goetz@6501 | 2463 | current = dst; |
goetz@6458 | 2464 | } |
goetz@6501 | 2465 | mr_if_needed(dst, current); // Move may be required. |
goetz@6458 | 2466 | } |
goetz@6458 | 2467 | |
goetz@6458 | 2468 | void MacroAssembler::store_klass(Register dst_oop, Register klass, Register ck) { |
goetz@6474 | 2469 | if (UseCompressedClassPointers) { |
goetz@6458 | 2470 | encode_klass_not_null(ck, klass); |
goetz@6458 | 2471 | stw(ck, oopDesc::klass_offset_in_bytes(), dst_oop); |
goetz@6458 | 2472 | } else { |
goetz@6458 | 2473 | std(klass, oopDesc::klass_offset_in_bytes(), dst_oop); |
goetz@6458 | 2474 | } |
goetz@6458 | 2475 | } |
goetz@6458 | 2476 | |
goetz@6512 | 2477 | void MacroAssembler::store_klass_gap(Register dst_oop, Register val) { |
goetz@6512 | 2478 | if (UseCompressedClassPointers) { |
goetz@6512 | 2479 | if (val == noreg) { |
goetz@6512 | 2480 | val = R0; |
goetz@6512 | 2481 | li(val, 0); |
goetz@6512 | 2482 | } |
goetz@6512 | 2483 | stw(val, oopDesc::klass_gap_offset_in_bytes(), dst_oop); // klass gap if compressed |
goetz@6512 | 2484 | } |
goetz@6512 | 2485 | } |
goetz@6512 | 2486 | |
goetz@6477 | 2487 | int MacroAssembler::instr_size_for_decode_klass_not_null() { |
goetz@6477 | 2488 | if (!UseCompressedClassPointers) return 0; |
goetz@6477 | 2489 | int num_instrs = 1; // shift or move |
goetz@6477 | 2490 | if (Universe::narrow_klass_base() != 0) num_instrs = 7; // shift + load const + add |
goetz@6477 | 2491 | return num_instrs * BytesPerInstWord; |
goetz@6477 | 2492 | } |
goetz@6477 | 2493 | |
goetz@6458 | 2494 | void MacroAssembler::decode_klass_not_null(Register dst, Register src) { |
goetz@6458 | 2495 | if (src == noreg) src = dst; |
goetz@6463 | 2496 | Register shifted_src = src; |
goetz@6477 | 2497 | if (Universe::narrow_klass_shift() != 0 || |
goetz@6477 | 2498 | Universe::narrow_klass_base() == 0 && src != dst) { // Move required. |
goetz@6463 | 2499 | shifted_src = dst; |
goetz@6463 | 2500 | sldi(shifted_src, src, Universe::narrow_klass_shift()); |
goetz@6458 | 2501 | } |
goetz@6477 | 2502 | if (Universe::narrow_klass_base() != 0) { |
goetz@6477 | 2503 | load_const(R0, Universe::narrow_klass_base()); |
goetz@6477 | 2504 | add(dst, shifted_src, R0); |
goetz@6477 | 2505 | } |
goetz@6458 | 2506 | } |
goetz@6458 | 2507 | |
goetz@6458 | 2508 | void MacroAssembler::load_klass(Register dst, Register src) { |
goetz@6474 | 2509 | if (UseCompressedClassPointers) { |
goetz@6458 | 2510 | lwz(dst, oopDesc::klass_offset_in_bytes(), src); |
goetz@6458 | 2511 | // Attention: no null check here! |
goetz@6458 | 2512 | decode_klass_not_null(dst, dst); |
goetz@6458 | 2513 | } else { |
goetz@6458 | 2514 | ld(dst, oopDesc::klass_offset_in_bytes(), src); |
goetz@6458 | 2515 | } |
goetz@6458 | 2516 | } |
goetz@6458 | 2517 | |
goetz@6458 | 2518 | void MacroAssembler::load_klass_with_trap_null_check(Register dst, Register src) { |
goetz@6486 | 2519 | if (!os::zero_page_read_protected()) { |
goetz@6458 | 2520 | if (TrapBasedNullChecks) { |
goetz@6458 | 2521 | trap_null_check(src); |
goetz@6458 | 2522 | } |
goetz@6458 | 2523 | } |
goetz@6458 | 2524 | load_klass(dst, src); |
goetz@6458 | 2525 | } |
goetz@6458 | 2526 | |
goetz@6458 | 2527 | void MacroAssembler::reinit_heapbase(Register d, Register tmp) { |
goetz@6463 | 2528 | if (Universe::heap() != NULL) { |
goetz@6463 | 2529 | if (Universe::narrow_oop_base() == NULL) { |
goetz@6463 | 2530 | Assembler::xorr(R30, R30, R30); |
goetz@6463 | 2531 | } else { |
goetz@6463 | 2532 | load_const(R30, Universe::narrow_ptrs_base(), tmp); |
goetz@6463 | 2533 | } |
goetz@6463 | 2534 | } else { |
goetz@6458 | 2535 | load_const(R30, Universe::narrow_ptrs_base_addr(), tmp); |
goetz@6458 | 2536 | ld(R30, 0, R30); |
goetz@6458 | 2537 | } |
goetz@6458 | 2538 | } |
goetz@6458 | 2539 | |
goetz@6495 | 2540 | // Clear Array |
goetz@6495 | 2541 | // Kills both input registers. tmp == R0 is allowed. |
goetz@6495 | 2542 | void MacroAssembler::clear_memory_doubleword(Register base_ptr, Register cnt_dwords, Register tmp) { |
goetz@6495 | 2543 | // Procedure for large arrays (uses data cache block zero instruction). |
goetz@6495 | 2544 | Label startloop, fast, fastloop, small_rest, restloop, done; |
goetz@6495 | 2545 | const int cl_size = VM_Version::get_cache_line_size(), |
goetz@6495 | 2546 | cl_dwords = cl_size>>3, |
goetz@6495 | 2547 | cl_dw_addr_bits = exact_log2(cl_dwords), |
goetz@6495 | 2548 | dcbz_min = 1; // Min count of dcbz executions, needs to be >0. |
goetz@6495 | 2549 | |
goetz@6495 | 2550 | //2: |
goetz@6495 | 2551 | cmpdi(CCR1, cnt_dwords, ((dcbz_min+1)<<cl_dw_addr_bits)-1); // Big enough? (ensure >=dcbz_min lines included). |
goetz@6495 | 2552 | blt(CCR1, small_rest); // Too small. |
goetz@6495 | 2553 | rldicl_(tmp, base_ptr, 64-3, 64-cl_dw_addr_bits); // Extract dword offset within first cache line. |
goetz@6495 | 2554 | beq(CCR0, fast); // Already 128byte aligned. |
goetz@6495 | 2555 | |
goetz@6495 | 2556 | subfic(tmp, tmp, cl_dwords); |
goetz@6495 | 2557 | mtctr(tmp); // Set ctr to hit 128byte boundary (0<ctr<cl_dwords). |
goetz@6495 | 2558 | subf(cnt_dwords, tmp, cnt_dwords); // rest. |
goetz@6495 | 2559 | li(tmp, 0); |
goetz@6495 | 2560 | //10: |
goetz@6495 | 2561 | bind(startloop); // Clear at the beginning to reach 128byte boundary. |
goetz@6495 | 2562 | std(tmp, 0, base_ptr); // Clear 8byte aligned block. |
goetz@6495 | 2563 | addi(base_ptr, base_ptr, 8); |
goetz@6495 | 2564 | bdnz(startloop); |
goetz@6495 | 2565 | //13: |
goetz@6495 | 2566 | bind(fast); // Clear 128byte blocks. |
goetz@6495 | 2567 | srdi(tmp, cnt_dwords, cl_dw_addr_bits); // Loop count for 128byte loop (>0). |
goetz@6495 | 2568 | andi(cnt_dwords, cnt_dwords, cl_dwords-1); // Rest in dwords. |
goetz@6495 | 2569 | mtctr(tmp); // Load counter. |
goetz@6495 | 2570 | //16: |
goetz@6495 | 2571 | bind(fastloop); |
goetz@6495 | 2572 | dcbz(base_ptr); // Clear 128byte aligned block. |
goetz@6495 | 2573 | addi(base_ptr, base_ptr, cl_size); |
goetz@6495 | 2574 | bdnz(fastloop); |
goetz@6495 | 2575 | if (InsertEndGroupPPC64) { endgroup(); } else { nop(); } |
goetz@6495 | 2576 | //20: |
goetz@6495 | 2577 | bind(small_rest); |
goetz@6495 | 2578 | cmpdi(CCR0, cnt_dwords, 0); // size 0? |
goetz@6495 | 2579 | beq(CCR0, done); // rest == 0 |
goetz@6495 | 2580 | li(tmp, 0); |
goetz@6495 | 2581 | mtctr(cnt_dwords); // Load counter. |
goetz@6495 | 2582 | //24: |
goetz@6495 | 2583 | bind(restloop); // Clear rest. |
goetz@6495 | 2584 | std(tmp, 0, base_ptr); // Clear 8byte aligned block. |
goetz@6495 | 2585 | addi(base_ptr, base_ptr, 8); |
goetz@6495 | 2586 | bdnz(restloop); |
goetz@6495 | 2587 | //27: |
goetz@6495 | 2588 | bind(done); |
goetz@6495 | 2589 | } |
goetz@6495 | 2590 | |
goetz@6458 | 2591 | /////////////////////////////////////////// String intrinsics //////////////////////////////////////////// |
goetz@6458 | 2592 | |
goetz@6458 | 2593 | // Search for a single jchar in an jchar[]. |
goetz@6458 | 2594 | // |
goetz@6458 | 2595 | // Assumes that result differs from all other registers. |
goetz@6458 | 2596 | // |
goetz@6458 | 2597 | // Haystack, needle are the addresses of jchar-arrays. |
goetz@6458 | 2598 | // NeedleChar is needle[0] if it is known at compile time. |
goetz@6458 | 2599 | // Haycnt is the length of the haystack. We assume haycnt >=1. |
goetz@6458 | 2600 | // |
goetz@6458 | 2601 | // Preserves haystack, haycnt, kills all other registers. |
goetz@6458 | 2602 | // |
goetz@6458 | 2603 | // If needle == R0, we search for the constant needleChar. |
goetz@6458 | 2604 | void MacroAssembler::string_indexof_1(Register result, Register haystack, Register haycnt, |
goetz@6458 | 2605 | Register needle, jchar needleChar, |
goetz@6458 | 2606 | Register tmp1, Register tmp2) { |
goetz@6458 | 2607 | |
goetz@6458 | 2608 | assert_different_registers(result, haystack, haycnt, needle, tmp1, tmp2); |
goetz@6458 | 2609 | |
goetz@6458 | 2610 | Label L_InnerLoop, L_FinalCheck, L_Found1, L_Found2, L_Found3, L_NotFound, L_End; |
goetz@6458 | 2611 | Register needle0 = needle, // Contains needle[0]. |
goetz@6458 | 2612 | addr = tmp1, |
goetz@6458 | 2613 | ch1 = tmp2, |
goetz@6458 | 2614 | ch2 = R0; |
goetz@6458 | 2615 | |
goetz@6458 | 2616 | //2 (variable) or 3 (const): |
goetz@6458 | 2617 | if (needle != R0) lhz(needle0, 0, needle); // Preload needle character, needle has len==1. |
goetz@6458 | 2618 | dcbtct(haystack, 0x00); // Indicate R/O access to haystack. |
goetz@6458 | 2619 | |
goetz@6458 | 2620 | srwi_(tmp2, haycnt, 1); // Shift right by exact_log2(UNROLL_FACTOR). |
goetz@6458 | 2621 | mr(addr, haystack); |
goetz@6458 | 2622 | beq(CCR0, L_FinalCheck); |
goetz@6458 | 2623 | mtctr(tmp2); // Move to count register. |
goetz@6458 | 2624 | //8: |
goetz@6458 | 2625 | bind(L_InnerLoop); // Main work horse (2x unrolled search loop). |
goetz@6458 | 2626 | lhz(ch1, 0, addr); // Load characters from haystack. |
goetz@6458 | 2627 | lhz(ch2, 2, addr); |
goetz@6458 | 2628 | (needle != R0) ? cmpw(CCR0, ch1, needle0) : cmplwi(CCR0, ch1, needleChar); |
goetz@6458 | 2629 | (needle != R0) ? cmpw(CCR1, ch2, needle0) : cmplwi(CCR1, ch2, needleChar); |
goetz@6458 | 2630 | beq(CCR0, L_Found1); // Did we find the needle? |
goetz@6458 | 2631 | beq(CCR1, L_Found2); |
goetz@6458 | 2632 | addi(addr, addr, 4); |
goetz@6458 | 2633 | bdnz(L_InnerLoop); |
goetz@6458 | 2634 | //16: |
goetz@6458 | 2635 | bind(L_FinalCheck); |
goetz@6458 | 2636 | andi_(R0, haycnt, 1); |
goetz@6458 | 2637 | beq(CCR0, L_NotFound); |
goetz@6458 | 2638 | lhz(ch1, 0, addr); // One position left at which we have to compare. |
goetz@6458 | 2639 | (needle != R0) ? cmpw(CCR1, ch1, needle0) : cmplwi(CCR1, ch1, needleChar); |
goetz@6458 | 2640 | beq(CCR1, L_Found3); |
goetz@6458 | 2641 | //21: |
goetz@6458 | 2642 | bind(L_NotFound); |
goetz@6458 | 2643 | li(result, -1); // Not found. |
goetz@6458 | 2644 | b(L_End); |
goetz@6458 | 2645 | |
goetz@6458 | 2646 | bind(L_Found2); |
goetz@6458 | 2647 | addi(addr, addr, 2); |
goetz@6458 | 2648 | //24: |
goetz@6458 | 2649 | bind(L_Found1); |
goetz@6458 | 2650 | bind(L_Found3); // Return index ... |
goetz@6458 | 2651 | subf(addr, haystack, addr); // relative to haystack, |
goetz@6458 | 2652 | srdi(result, addr, 1); // in characters. |
goetz@6458 | 2653 | bind(L_End); |
goetz@6458 | 2654 | } |
goetz@6458 | 2655 | |
goetz@6458 | 2656 | |
goetz@6458 | 2657 | // Implementation of IndexOf for jchar arrays. |
goetz@6458 | 2658 | // |
goetz@6458 | 2659 | // The length of haystack and needle are not constant, i.e. passed in a register. |
goetz@6458 | 2660 | // |
goetz@6458 | 2661 | // Preserves registers haystack, needle. |
goetz@6458 | 2662 | // Kills registers haycnt, needlecnt. |
goetz@6458 | 2663 | // Assumes that result differs from all other registers. |
goetz@6458 | 2664 | // Haystack, needle are the addresses of jchar-arrays. |
goetz@6458 | 2665 | // Haycnt, needlecnt are the lengths of them, respectively. |
goetz@6458 | 2666 | // |
goetz@6458 | 2667 | // Needlecntval must be zero or 15-bit unsigned immediate and > 1. |
goetz@6458 | 2668 | void MacroAssembler::string_indexof(Register result, Register haystack, Register haycnt, |
goetz@6458 | 2669 | Register needle, ciTypeArray* needle_values, Register needlecnt, int needlecntval, |
goetz@6458 | 2670 | Register tmp1, Register tmp2, Register tmp3, Register tmp4) { |
goetz@6458 | 2671 | |
goetz@6458 | 2672 | // Ensure 0<needlecnt<=haycnt in ideal graph as prerequisite! |
goetz@6458 | 2673 | Label L_TooShort, L_Found, L_NotFound, L_End; |
goetz@6458 | 2674 | Register last_addr = haycnt, // Kill haycnt at the beginning. |
goetz@6458 | 2675 | addr = tmp1, |
goetz@6458 | 2676 | n_start = tmp2, |
goetz@6458 | 2677 | ch1 = tmp3, |
goetz@6458 | 2678 | ch2 = R0; |
goetz@6458 | 2679 | |
goetz@6458 | 2680 | // ************************************************************************************************** |
goetz@6458 | 2681 | // Prepare for main loop: optimized for needle count >=2, bail out otherwise. |
goetz@6458 | 2682 | // ************************************************************************************************** |
goetz@6458 | 2683 | |
goetz@6458 | 2684 | //1 (variable) or 3 (const): |
goetz@6458 | 2685 | dcbtct(needle, 0x00); // Indicate R/O access to str1. |
goetz@6458 | 2686 | dcbtct(haystack, 0x00); // Indicate R/O access to str2. |
goetz@6458 | 2687 | |
goetz@6458 | 2688 | // Compute last haystack addr to use if no match gets found. |
goetz@6458 | 2689 | if (needlecntval == 0) { // variable needlecnt |
goetz@6458 | 2690 | //3: |
goetz@6458 | 2691 | subf(ch1, needlecnt, haycnt); // Last character index to compare is haycnt-needlecnt. |
goetz@6458 | 2692 | addi(addr, haystack, -2); // Accesses use pre-increment. |
goetz@6458 | 2693 | cmpwi(CCR6, needlecnt, 2); |
goetz@6458 | 2694 | blt(CCR6, L_TooShort); // Variable needlecnt: handle short needle separately. |
goetz@6458 | 2695 | slwi(ch1, ch1, 1); // Scale to number of bytes. |
goetz@6458 | 2696 | lwz(n_start, 0, needle); // Load first 2 characters of needle. |
goetz@6458 | 2697 | add(last_addr, haystack, ch1); // Point to last address to compare (haystack+2*(haycnt-needlecnt)). |
goetz@6458 | 2698 | addi(needlecnt, needlecnt, -2); // Rest of needle. |
goetz@6458 | 2699 | } else { // constant needlecnt |
goetz@6458 | 2700 | guarantee(needlecntval != 1, "IndexOf with single-character needle must be handled separately"); |
goetz@6458 | 2701 | assert((needlecntval & 0x7fff) == needlecntval, "wrong immediate"); |
goetz@6458 | 2702 | //5: |
goetz@6458 | 2703 | addi(ch1, haycnt, -needlecntval); // Last character index to compare is haycnt-needlecnt. |
goetz@6458 | 2704 | lwz(n_start, 0, needle); // Load first 2 characters of needle. |
goetz@6458 | 2705 | addi(addr, haystack, -2); // Accesses use pre-increment. |
goetz@6458 | 2706 | slwi(ch1, ch1, 1); // Scale to number of bytes. |
goetz@6458 | 2707 | add(last_addr, haystack, ch1); // Point to last address to compare (haystack+2*(haycnt-needlecnt)). |
goetz@6458 | 2708 | li(needlecnt, needlecntval-2); // Rest of needle. |
goetz@6458 | 2709 | } |
goetz@6458 | 2710 | |
goetz@6458 | 2711 | // Main Loop (now we have at least 3 characters). |
goetz@6458 | 2712 | //11: |
goetz@6458 | 2713 | Label L_OuterLoop, L_InnerLoop, L_FinalCheck, L_Comp1, L_Comp2, L_Comp3; |
goetz@6458 | 2714 | bind(L_OuterLoop); // Search for 1st 2 characters. |
goetz@6458 | 2715 | Register addr_diff = tmp4; |
goetz@6458 | 2716 | subf(addr_diff, addr, last_addr); // Difference between already checked address and last address to check. |
goetz@6458 | 2717 | addi(addr, addr, 2); // This is the new address we want to use for comparing. |
goetz@6458 | 2718 | srdi_(ch2, addr_diff, 2); |
goetz@6458 | 2719 | beq(CCR0, L_FinalCheck); // 2 characters left? |
goetz@6458 | 2720 | mtctr(ch2); // addr_diff/4 |
goetz@6458 | 2721 | //16: |
goetz@6458 | 2722 | bind(L_InnerLoop); // Main work horse (2x unrolled search loop) |
goetz@6458 | 2723 | lwz(ch1, 0, addr); // Load 2 characters of haystack (ignore alignment). |
goetz@6458 | 2724 | lwz(ch2, 2, addr); |
goetz@6458 | 2725 | cmpw(CCR0, ch1, n_start); // Compare 2 characters (1 would be sufficient but try to reduce branches to CompLoop). |
goetz@6458 | 2726 | cmpw(CCR1, ch2, n_start); |
goetz@6458 | 2727 | beq(CCR0, L_Comp1); // Did we find the needle start? |
goetz@6458 | 2728 | beq(CCR1, L_Comp2); |
goetz@6458 | 2729 | addi(addr, addr, 4); |
goetz@6458 | 2730 | bdnz(L_InnerLoop); |
goetz@6458 | 2731 | //24: |
goetz@6458 | 2732 | bind(L_FinalCheck); |
goetz@6458 | 2733 | rldicl_(addr_diff, addr_diff, 64-1, 63); // Remaining characters not covered by InnerLoop: (addr_diff>>1)&1. |
goetz@6458 | 2734 | beq(CCR0, L_NotFound); |
goetz@6458 | 2735 | lwz(ch1, 0, addr); // One position left at which we have to compare. |
goetz@6458 | 2736 | cmpw(CCR1, ch1, n_start); |
goetz@6458 | 2737 | beq(CCR1, L_Comp3); |
goetz@6458 | 2738 | //29: |
goetz@6458 | 2739 | bind(L_NotFound); |
goetz@6458 | 2740 | li(result, -1); // not found |
goetz@6458 | 2741 | b(L_End); |
goetz@6458 | 2742 | |
goetz@6458 | 2743 | |
goetz@6458 | 2744 | // ************************************************************************************************** |
goetz@6458 | 2745 | // Special Case: unfortunately, the variable needle case can be called with needlecnt<2 |
goetz@6458 | 2746 | // ************************************************************************************************** |
goetz@6458 | 2747 | //31: |
goetz@6458 | 2748 | if ((needlecntval>>1) !=1 ) { // Const needlecnt is 2 or 3? Reduce code size. |
goetz@6458 | 2749 | int nopcnt = 5; |
goetz@6458 | 2750 | if (needlecntval !=0 ) ++nopcnt; // Balance alignment (other case: see below). |
goetz@6458 | 2751 | if (needlecntval == 0) { // We have to handle these cases separately. |
goetz@6458 | 2752 | Label L_OneCharLoop; |
goetz@6458 | 2753 | bind(L_TooShort); |
goetz@6458 | 2754 | mtctr(haycnt); |
goetz@6458 | 2755 | lhz(n_start, 0, needle); // First character of needle |
goetz@6458 | 2756 | bind(L_OneCharLoop); |
goetz@6458 | 2757 | lhzu(ch1, 2, addr); |
goetz@6458 | 2758 | cmpw(CCR1, ch1, n_start); |
goetz@6458 | 2759 | beq(CCR1, L_Found); // Did we find the one character needle? |
goetz@6458 | 2760 | bdnz(L_OneCharLoop); |
goetz@6458 | 2761 | li(result, -1); // Not found. |
goetz@6458 | 2762 | b(L_End); |
goetz@6458 | 2763 | } // 8 instructions, so no impact on alignment. |
goetz@6458 | 2764 | for (int x = 0; x < nopcnt; ++x) nop(); |
goetz@6458 | 2765 | } |
goetz@6458 | 2766 | |
goetz@6458 | 2767 | // ************************************************************************************************** |
goetz@6458 | 2768 | // Regular Case Part II: compare rest of needle (first 2 characters have been compared already) |
goetz@6458 | 2769 | // ************************************************************************************************** |
goetz@6458 | 2770 | |
goetz@6458 | 2771 | // Compare the rest |
goetz@6458 | 2772 | //36 if needlecntval==0, else 37: |
goetz@6458 | 2773 | bind(L_Comp2); |
goetz@6458 | 2774 | addi(addr, addr, 2); // First comparison has failed, 2nd one hit. |
goetz@6458 | 2775 | bind(L_Comp1); // Addr points to possible needle start. |
goetz@6458 | 2776 | bind(L_Comp3); // Could have created a copy and use a different return address but saving code size here. |
goetz@6458 | 2777 | if (needlecntval != 2) { // Const needlecnt==2? |
goetz@6458 | 2778 | if (needlecntval != 3) { |
goetz@6458 | 2779 | if (needlecntval == 0) beq(CCR6, L_Found); // Variable needlecnt==2? |
goetz@6458 | 2780 | Register ind_reg = tmp4; |
goetz@6458 | 2781 | li(ind_reg, 2*2); // First 2 characters are already compared, use index 2. |
goetz@6458 | 2782 | mtctr(needlecnt); // Decremented by 2, still > 0. |
goetz@6458 | 2783 | //40: |
goetz@6458 | 2784 | Label L_CompLoop; |
goetz@6458 | 2785 | bind(L_CompLoop); |
goetz@6458 | 2786 | lhzx(ch2, needle, ind_reg); |
goetz@6458 | 2787 | lhzx(ch1, addr, ind_reg); |
goetz@6458 | 2788 | cmpw(CCR1, ch1, ch2); |
goetz@6458 | 2789 | bne(CCR1, L_OuterLoop); |
goetz@6458 | 2790 | addi(ind_reg, ind_reg, 2); |
goetz@6458 | 2791 | bdnz(L_CompLoop); |
goetz@6458 | 2792 | } else { // No loop required if there's only one needle character left. |
goetz@6458 | 2793 | lhz(ch2, 2*2, needle); |
goetz@6458 | 2794 | lhz(ch1, 2*2, addr); |
goetz@6458 | 2795 | cmpw(CCR1, ch1, ch2); |
goetz@6458 | 2796 | bne(CCR1, L_OuterLoop); |
goetz@6458 | 2797 | } |
goetz@6458 | 2798 | } |
goetz@6458 | 2799 | // Return index ... |
goetz@6458 | 2800 | //46: |
goetz@6458 | 2801 | bind(L_Found); |
goetz@6458 | 2802 | subf(addr, haystack, addr); // relative to haystack, ... |
goetz@6458 | 2803 | srdi(result, addr, 1); // in characters. |
goetz@6458 | 2804 | //48: |
goetz@6458 | 2805 | bind(L_End); |
goetz@6458 | 2806 | } |
goetz@6458 | 2807 | |
goetz@6458 | 2808 | // Implementation of Compare for jchar arrays. |
goetz@6458 | 2809 | // |
goetz@6458 | 2810 | // Kills the registers str1, str2, cnt1, cnt2. |
goetz@6458 | 2811 | // Kills cr0, ctr. |
goetz@6458 | 2812 | // Assumes that result differes from the input registers. |
goetz@6458 | 2813 | void MacroAssembler::string_compare(Register str1_reg, Register str2_reg, Register cnt1_reg, Register cnt2_reg, |
goetz@6458 | 2814 | Register result_reg, Register tmp_reg) { |
goetz@6458 | 2815 | assert_different_registers(result_reg, str1_reg, str2_reg, cnt1_reg, cnt2_reg, tmp_reg); |
goetz@6458 | 2816 | |
goetz@6458 | 2817 | Label Ldone, Lslow_case, Lslow_loop, Lfast_loop; |
goetz@6458 | 2818 | Register cnt_diff = R0, |
goetz@6458 | 2819 | limit_reg = cnt1_reg, |
goetz@6458 | 2820 | chr1_reg = result_reg, |
goetz@6458 | 2821 | chr2_reg = cnt2_reg, |
goetz@6458 | 2822 | addr_diff = str2_reg; |
goetz@6458 | 2823 | |
goetz@6458 | 2824 | // Offset 0 should be 32 byte aligned. |
goetz@6458 | 2825 | //-4: |
goetz@6458 | 2826 | dcbtct(str1_reg, 0x00); // Indicate R/O access to str1. |
goetz@6458 | 2827 | dcbtct(str2_reg, 0x00); // Indicate R/O access to str2. |
goetz@6458 | 2828 | //-2: |
goetz@6458 | 2829 | // Compute min(cnt1, cnt2) and check if 0 (bail out if we don't need to compare characters). |
goetz@6458 | 2830 | subf(result_reg, cnt2_reg, cnt1_reg); // difference between cnt1/2 |
goetz@6458 | 2831 | subf_(addr_diff, str1_reg, str2_reg); // alias? |
goetz@6458 | 2832 | beq(CCR0, Ldone); // return cnt difference if both ones are identical |
goetz@6458 | 2833 | srawi(limit_reg, result_reg, 31); // generate signmask (cnt1/2 must be non-negative so cnt_diff can't overflow) |
goetz@6458 | 2834 | mr(cnt_diff, result_reg); |
goetz@6458 | 2835 | andr(limit_reg, result_reg, limit_reg); // difference or zero (negative): cnt1<cnt2 ? cnt1-cnt2 : 0 |
goetz@6458 | 2836 | add_(limit_reg, cnt2_reg, limit_reg); // min(cnt1, cnt2)==0? |
goetz@6458 | 2837 | beq(CCR0, Ldone); // return cnt difference if one has 0 length |
goetz@6458 | 2838 | |
goetz@6458 | 2839 | lhz(chr1_reg, 0, str1_reg); // optional: early out if first characters mismatch |
goetz@6458 | 2840 | lhzx(chr2_reg, str1_reg, addr_diff); // optional: early out if first characters mismatch |
goetz@6458 | 2841 | addi(tmp_reg, limit_reg, -1); // min(cnt1, cnt2)-1 |
goetz@6458 | 2842 | subf_(result_reg, chr2_reg, chr1_reg); // optional: early out if first characters mismatch |
goetz@6458 | 2843 | bne(CCR0, Ldone); // optional: early out if first characters mismatch |
goetz@6458 | 2844 | |
goetz@6458 | 2845 | // Set loop counter by scaling down tmp_reg |
goetz@6458 | 2846 | srawi_(chr2_reg, tmp_reg, exact_log2(4)); // (min(cnt1, cnt2)-1)/4 |
goetz@6458 | 2847 | ble(CCR0, Lslow_case); // need >4 characters for fast loop |
goetz@6458 | 2848 | andi(limit_reg, tmp_reg, 4-1); // remaining characters |
goetz@6458 | 2849 | |
goetz@6458 | 2850 | // Adapt str1_reg str2_reg for the first loop iteration |
goetz@6458 | 2851 | mtctr(chr2_reg); // (min(cnt1, cnt2)-1)/4 |
goetz@6458 | 2852 | addi(limit_reg, limit_reg, 4+1); // compare last 5-8 characters in slow_case if mismatch found in fast_loop |
goetz@6458 | 2853 | //16: |
goetz@6458 | 2854 | // Compare the rest of the characters |
goetz@6458 | 2855 | bind(Lfast_loop); |
goetz@6458 | 2856 | ld(chr1_reg, 0, str1_reg); |
goetz@6458 | 2857 | ldx(chr2_reg, str1_reg, addr_diff); |
goetz@6458 | 2858 | cmpd(CCR0, chr2_reg, chr1_reg); |
goetz@6458 | 2859 | bne(CCR0, Lslow_case); // return chr1_reg |
goetz@6458 | 2860 | addi(str1_reg, str1_reg, 4*2); |
goetz@6458 | 2861 | bdnz(Lfast_loop); |
goetz@6458 | 2862 | addi(limit_reg, limit_reg, -4); // no mismatch found in fast_loop, only 1-4 characters missing |
goetz@6458 | 2863 | //23: |
goetz@6458 | 2864 | bind(Lslow_case); |
goetz@6458 | 2865 | mtctr(limit_reg); |
goetz@6458 | 2866 | //24: |
goetz@6458 | 2867 | bind(Lslow_loop); |
goetz@6458 | 2868 | lhz(chr1_reg, 0, str1_reg); |
goetz@6458 | 2869 | lhzx(chr2_reg, str1_reg, addr_diff); |
goetz@6458 | 2870 | subf_(result_reg, chr2_reg, chr1_reg); |
goetz@6458 | 2871 | bne(CCR0, Ldone); // return chr1_reg |
goetz@6458 | 2872 | addi(str1_reg, str1_reg, 1*2); |
goetz@6458 | 2873 | bdnz(Lslow_loop); |
goetz@6458 | 2874 | //30: |
goetz@6458 | 2875 | // If strings are equal up to min length, return the length difference. |
goetz@6458 | 2876 | mr(result_reg, cnt_diff); |
goetz@6458 | 2877 | nop(); // alignment |
goetz@6458 | 2878 | //32: |
goetz@6458 | 2879 | // Otherwise, return the difference between the first mismatched chars. |
goetz@6458 | 2880 | bind(Ldone); |
goetz@6458 | 2881 | } |
goetz@6458 | 2882 | |
goetz@6458 | 2883 | |
goetz@6458 | 2884 | // Compare char[] arrays. |
goetz@6458 | 2885 | // |
goetz@6458 | 2886 | // str1_reg USE only |
goetz@6458 | 2887 | // str2_reg USE only |
goetz@6458 | 2888 | // cnt_reg USE_DEF, due to tmp reg shortage |
goetz@6458 | 2889 | // result_reg DEF only, might compromise USE only registers |
goetz@6458 | 2890 | void MacroAssembler::char_arrays_equals(Register str1_reg, Register str2_reg, Register cnt_reg, Register result_reg, |
goetz@6458 | 2891 | Register tmp1_reg, Register tmp2_reg, Register tmp3_reg, Register tmp4_reg, |
goetz@6458 | 2892 | Register tmp5_reg) { |
goetz@6458 | 2893 | |
goetz@6458 | 2894 | // Str1 may be the same register as str2 which can occur e.g. after scalar replacement. |
goetz@6458 | 2895 | assert_different_registers(result_reg, str1_reg, cnt_reg, tmp1_reg, tmp2_reg, tmp3_reg, tmp4_reg, tmp5_reg); |
goetz@6458 | 2896 | assert_different_registers(result_reg, str2_reg, cnt_reg, tmp1_reg, tmp2_reg, tmp3_reg, tmp4_reg, tmp5_reg); |
goetz@6458 | 2897 | |
goetz@6458 | 2898 | // Offset 0 should be 32 byte aligned. |
goetz@6458 | 2899 | Label Linit_cbc, Lcbc, Lloop, Ldone_true, Ldone_false; |
goetz@6458 | 2900 | Register index_reg = tmp5_reg; |
goetz@6458 | 2901 | Register cbc_iter = tmp4_reg; |
goetz@6458 | 2902 | |
goetz@6458 | 2903 | //-1: |
goetz@6458 | 2904 | dcbtct(str1_reg, 0x00); // Indicate R/O access to str1. |
goetz@6458 | 2905 | dcbtct(str2_reg, 0x00); // Indicate R/O access to str2. |
goetz@6458 | 2906 | //1: |
goetz@6458 | 2907 | andi(cbc_iter, cnt_reg, 4-1); // Remaining iterations after 4 java characters per iteration loop. |
goetz@6458 | 2908 | li(index_reg, 0); // init |
goetz@6458 | 2909 | li(result_reg, 0); // assume false |
goetz@6458 | 2910 | srwi_(tmp2_reg, cnt_reg, exact_log2(4)); // Div: 4 java characters per iteration (main loop). |
goetz@6458 | 2911 | |
goetz@6458 | 2912 | cmpwi(CCR1, cbc_iter, 0); // CCR1 = (cbc_iter==0) |
goetz@6458 | 2913 | beq(CCR0, Linit_cbc); // too short |
goetz@6458 | 2914 | mtctr(tmp2_reg); |
goetz@6458 | 2915 | //8: |
goetz@6458 | 2916 | bind(Lloop); |
goetz@6458 | 2917 | ldx(tmp1_reg, str1_reg, index_reg); |
goetz@6458 | 2918 | ldx(tmp2_reg, str2_reg, index_reg); |
goetz@6458 | 2919 | cmpd(CCR0, tmp1_reg, tmp2_reg); |
goetz@6458 | 2920 | bne(CCR0, Ldone_false); // Unequal char pair found -> done. |
goetz@6458 | 2921 | addi(index_reg, index_reg, 4*sizeof(jchar)); |
goetz@6458 | 2922 | bdnz(Lloop); |
goetz@6458 | 2923 | //14: |
goetz@6458 | 2924 | bind(Linit_cbc); |
goetz@6458 | 2925 | beq(CCR1, Ldone_true); |
goetz@6458 | 2926 | mtctr(cbc_iter); |
goetz@6458 | 2927 | //16: |
goetz@6458 | 2928 | bind(Lcbc); |
goetz@6458 | 2929 | lhzx(tmp1_reg, str1_reg, index_reg); |
goetz@6458 | 2930 | lhzx(tmp2_reg, str2_reg, index_reg); |
goetz@6458 | 2931 | cmpw(CCR0, tmp1_reg, tmp2_reg); |
goetz@6458 | 2932 | bne(CCR0, Ldone_false); // Unequal char pair found -> done. |
goetz@6458 | 2933 | addi(index_reg, index_reg, 1*sizeof(jchar)); |
goetz@6458 | 2934 | bdnz(Lcbc); |
goetz@6458 | 2935 | nop(); |
goetz@6458 | 2936 | bind(Ldone_true); |
goetz@6458 | 2937 | li(result_reg, 1); |
goetz@6458 | 2938 | //24: |
goetz@6458 | 2939 | bind(Ldone_false); |
goetz@6458 | 2940 | } |
goetz@6458 | 2941 | |
goetz@6458 | 2942 | |
goetz@6458 | 2943 | void MacroAssembler::char_arrays_equalsImm(Register str1_reg, Register str2_reg, int cntval, Register result_reg, |
goetz@6458 | 2944 | Register tmp1_reg, Register tmp2_reg) { |
goetz@6458 | 2945 | // Str1 may be the same register as str2 which can occur e.g. after scalar replacement. |
goetz@6458 | 2946 | assert_different_registers(result_reg, str1_reg, tmp1_reg, tmp2_reg); |
goetz@6458 | 2947 | assert_different_registers(result_reg, str2_reg, tmp1_reg, tmp2_reg); |
goetz@6458 | 2948 | assert(sizeof(jchar) == 2, "must be"); |
goetz@6458 | 2949 | assert(cntval >= 0 && ((cntval & 0x7fff) == cntval), "wrong immediate"); |
goetz@6458 | 2950 | |
goetz@6458 | 2951 | Label Ldone_false; |
goetz@6458 | 2952 | |
goetz@6458 | 2953 | if (cntval < 16) { // short case |
goetz@6458 | 2954 | if (cntval != 0) li(result_reg, 0); // assume false |
goetz@6458 | 2955 | |
goetz@6458 | 2956 | const int num_bytes = cntval*sizeof(jchar); |
goetz@6458 | 2957 | int index = 0; |
goetz@6458 | 2958 | for (int next_index; (next_index = index + 8) <= num_bytes; index = next_index) { |
goetz@6458 | 2959 | ld(tmp1_reg, index, str1_reg); |
goetz@6458 | 2960 | ld(tmp2_reg, index, str2_reg); |
goetz@6458 | 2961 | cmpd(CCR0, tmp1_reg, tmp2_reg); |
goetz@6458 | 2962 | bne(CCR0, Ldone_false); |
goetz@6458 | 2963 | } |
goetz@6458 | 2964 | if (cntval & 2) { |
goetz@6458 | 2965 | lwz(tmp1_reg, index, str1_reg); |
goetz@6458 | 2966 | lwz(tmp2_reg, index, str2_reg); |
goetz@6458 | 2967 | cmpw(CCR0, tmp1_reg, tmp2_reg); |
goetz@6458 | 2968 | bne(CCR0, Ldone_false); |
goetz@6458 | 2969 | index += 4; |
goetz@6458 | 2970 | } |
goetz@6458 | 2971 | if (cntval & 1) { |
goetz@6458 | 2972 | lhz(tmp1_reg, index, str1_reg); |
goetz@6458 | 2973 | lhz(tmp2_reg, index, str2_reg); |
goetz@6458 | 2974 | cmpw(CCR0, tmp1_reg, tmp2_reg); |
goetz@6458 | 2975 | bne(CCR0, Ldone_false); |
goetz@6458 | 2976 | } |
goetz@6458 | 2977 | // fallthrough: true |
goetz@6458 | 2978 | } else { |
goetz@6458 | 2979 | Label Lloop; |
goetz@6458 | 2980 | Register index_reg = tmp1_reg; |
goetz@6458 | 2981 | const int loopcnt = cntval/4; |
goetz@6458 | 2982 | assert(loopcnt > 0, "must be"); |
goetz@6458 | 2983 | // Offset 0 should be 32 byte aligned. |
goetz@6458 | 2984 | //2: |
goetz@6458 | 2985 | dcbtct(str1_reg, 0x00); // Indicate R/O access to str1. |
goetz@6458 | 2986 | dcbtct(str2_reg, 0x00); // Indicate R/O access to str2. |
goetz@6458 | 2987 | li(tmp2_reg, loopcnt); |
goetz@6458 | 2988 | li(index_reg, 0); // init |
goetz@6458 | 2989 | li(result_reg, 0); // assume false |
goetz@6458 | 2990 | mtctr(tmp2_reg); |
goetz@6458 | 2991 | //8: |
goetz@6458 | 2992 | bind(Lloop); |
goetz@6458 | 2993 | ldx(R0, str1_reg, index_reg); |
goetz@6458 | 2994 | ldx(tmp2_reg, str2_reg, index_reg); |
goetz@6458 | 2995 | cmpd(CCR0, R0, tmp2_reg); |
goetz@6458 | 2996 | bne(CCR0, Ldone_false); // Unequal char pair found -> done. |
goetz@6458 | 2997 | addi(index_reg, index_reg, 4*sizeof(jchar)); |
goetz@6458 | 2998 | bdnz(Lloop); |
goetz@6458 | 2999 | //14: |
goetz@6458 | 3000 | if (cntval & 2) { |
goetz@6458 | 3001 | lwzx(R0, str1_reg, index_reg); |
goetz@6458 | 3002 | lwzx(tmp2_reg, str2_reg, index_reg); |
goetz@6458 | 3003 | cmpw(CCR0, R0, tmp2_reg); |
goetz@6458 | 3004 | bne(CCR0, Ldone_false); |
goetz@6458 | 3005 | if (cntval & 1) addi(index_reg, index_reg, 2*sizeof(jchar)); |
goetz@6458 | 3006 | } |
goetz@6458 | 3007 | if (cntval & 1) { |
goetz@6458 | 3008 | lhzx(R0, str1_reg, index_reg); |
goetz@6458 | 3009 | lhzx(tmp2_reg, str2_reg, index_reg); |
goetz@6458 | 3010 | cmpw(CCR0, R0, tmp2_reg); |
goetz@6458 | 3011 | bne(CCR0, Ldone_false); |
goetz@6458 | 3012 | } |
goetz@6458 | 3013 | // fallthru: true |
goetz@6458 | 3014 | } |
goetz@6458 | 3015 | li(result_reg, 1); |
goetz@6458 | 3016 | bind(Ldone_false); |
goetz@6458 | 3017 | } |
goetz@6458 | 3018 | |
goetz@6458 | 3019 | |
goetz@6458 | 3020 | void MacroAssembler::asm_assert(bool check_equal, const char *msg, int id) { |
goetz@6458 | 3021 | #ifdef ASSERT |
goetz@6458 | 3022 | Label ok; |
goetz@6458 | 3023 | if (check_equal) { |
goetz@6458 | 3024 | beq(CCR0, ok); |
goetz@6458 | 3025 | } else { |
goetz@6458 | 3026 | bne(CCR0, ok); |
goetz@6458 | 3027 | } |
goetz@6458 | 3028 | stop(msg, id); |
goetz@6458 | 3029 | bind(ok); |
goetz@6458 | 3030 | #endif |
goetz@6458 | 3031 | } |
goetz@6458 | 3032 | |
goetz@6458 | 3033 | void MacroAssembler::asm_assert_mems_zero(bool check_equal, int size, int mem_offset, |
goetz@6458 | 3034 | Register mem_base, const char* msg, int id) { |
goetz@6458 | 3035 | #ifdef ASSERT |
goetz@6458 | 3036 | switch (size) { |
goetz@6458 | 3037 | case 4: |
goetz@6458 | 3038 | lwz(R0, mem_offset, mem_base); |
goetz@6458 | 3039 | cmpwi(CCR0, R0, 0); |
goetz@6458 | 3040 | break; |
goetz@6458 | 3041 | case 8: |
goetz@6458 | 3042 | ld(R0, mem_offset, mem_base); |
goetz@6458 | 3043 | cmpdi(CCR0, R0, 0); |
goetz@6458 | 3044 | break; |
goetz@6458 | 3045 | default: |
goetz@6458 | 3046 | ShouldNotReachHere(); |
goetz@6458 | 3047 | } |
goetz@6458 | 3048 | asm_assert(check_equal, msg, id); |
goetz@6458 | 3049 | #endif // ASSERT |
goetz@6458 | 3050 | } |
goetz@6458 | 3051 | |
goetz@6458 | 3052 | void MacroAssembler::verify_thread() { |
goetz@6458 | 3053 | if (VerifyThread) { |
goetz@6458 | 3054 | unimplemented("'VerifyThread' currently not implemented on PPC"); |
goetz@6458 | 3055 | } |
goetz@6458 | 3056 | } |
goetz@6458 | 3057 | |
goetz@6458 | 3058 | // READ: oop. KILL: R0. Volatile floats perhaps. |
goetz@6458 | 3059 | void MacroAssembler::verify_oop(Register oop, const char* msg) { |
goetz@6458 | 3060 | if (!VerifyOops) { |
goetz@6458 | 3061 | return; |
goetz@6458 | 3062 | } |
goetz@6495 | 3063 | // Will be preserved. |
goetz@6458 | 3064 | Register tmp = R11; |
goetz@6458 | 3065 | assert(oop != tmp, "precondition"); |
goetz@6458 | 3066 | unsigned int nbytes_save = 10*8; // 10 volatile gprs |
goetz@6495 | 3067 | address/* FunctionDescriptor** */fd = StubRoutines::verify_oop_subroutine_entry_address(); |
goetz@6458 | 3068 | // save tmp |
goetz@6458 | 3069 | mr(R0, tmp); |
goetz@6458 | 3070 | // kill tmp |
goetz@6458 | 3071 | save_LR_CR(tmp); |
goetz@6511 | 3072 | push_frame_reg_args(nbytes_save, tmp); |
goetz@6458 | 3073 | // restore tmp |
goetz@6458 | 3074 | mr(tmp, R0); |
goetz@6458 | 3075 | save_volatile_gprs(R1_SP, 112); // except R0 |
goetz@6511 | 3076 | // load FunctionDescriptor** / entry_address * |
goetz@6458 | 3077 | load_const(tmp, fd); |
goetz@6511 | 3078 | // load FunctionDescriptor* / entry_address |
goetz@6458 | 3079 | ld(tmp, 0, tmp); |
goetz@6458 | 3080 | mr(R4_ARG2, oop); |
goetz@6458 | 3081 | load_const(R3_ARG1, (address)msg); |
goetz@6458 | 3082 | // call destination for its side effect |
goetz@6458 | 3083 | call_c(tmp); |
goetz@6458 | 3084 | restore_volatile_gprs(R1_SP, 112); // except R0 |
goetz@6458 | 3085 | pop_frame(); |
goetz@6458 | 3086 | // save tmp |
goetz@6458 | 3087 | mr(R0, tmp); |
goetz@6458 | 3088 | // kill tmp |
goetz@6458 | 3089 | restore_LR_CR(tmp); |
goetz@6458 | 3090 | // restore tmp |
goetz@6458 | 3091 | mr(tmp, R0); |
goetz@6458 | 3092 | } |
goetz@6458 | 3093 | |
goetz@6458 | 3094 | const char* stop_types[] = { |
goetz@6458 | 3095 | "stop", |
goetz@6458 | 3096 | "untested", |
goetz@6458 | 3097 | "unimplemented", |
goetz@6458 | 3098 | "shouldnotreachhere" |
goetz@6458 | 3099 | }; |
goetz@6458 | 3100 | |
goetz@6458 | 3101 | static void stop_on_request(int tp, const char* msg) { |
goetz@6458 | 3102 | tty->print("PPC assembly code requires stop: (%s) %s\n", (void *)stop_types[tp%/*stop_end*/4], msg); |
goetz@6458 | 3103 | guarantee(false, err_msg("PPC assembly code requires stop: %s", msg)); |
goetz@6458 | 3104 | } |
goetz@6458 | 3105 | |
goetz@6458 | 3106 | // Call a C-function that prints output. |
goetz@6458 | 3107 | void MacroAssembler::stop(int type, const char* msg, int id) { |
goetz@6458 | 3108 | #ifndef PRODUCT |
goetz@6458 | 3109 | block_comment(err_msg("stop: %s %s {", stop_types[type%stop_end], msg)); |
goetz@6458 | 3110 | #else |
goetz@6458 | 3111 | block_comment("stop {"); |
goetz@6458 | 3112 | #endif |
goetz@6458 | 3113 | |
goetz@6458 | 3114 | // setup arguments |
goetz@6458 | 3115 | load_const_optimized(R3_ARG1, type); |
goetz@6458 | 3116 | load_const_optimized(R4_ARG2, (void *)msg, /*tmp=*/R0); |
goetz@6458 | 3117 | call_VM_leaf(CAST_FROM_FN_PTR(address, stop_on_request), R3_ARG1, R4_ARG2); |
goetz@6458 | 3118 | illtrap(); |
goetz@6458 | 3119 | emit_int32(id); |
goetz@6458 | 3120 | block_comment("} stop;"); |
goetz@6458 | 3121 | } |
goetz@6458 | 3122 | |
goetz@6458 | 3123 | #ifndef PRODUCT |
goetz@6458 | 3124 | // Write pattern 0x0101010101010101 in memory region [low-before, high+after]. |
goetz@6458 | 3125 | // Val, addr are temp registers. |
goetz@6458 | 3126 | // If low == addr, addr is killed. |
goetz@6458 | 3127 | // High is preserved. |
goetz@6458 | 3128 | void MacroAssembler::zap_from_to(Register low, int before, Register high, int after, Register val, Register addr) { |
goetz@6458 | 3129 | if (!ZapMemory) return; |
goetz@6458 | 3130 | |
goetz@6458 | 3131 | assert_different_registers(low, val); |
goetz@6458 | 3132 | |
goetz@6458 | 3133 | BLOCK_COMMENT("zap memory region {"); |
goetz@6458 | 3134 | load_const_optimized(val, 0x0101010101010101); |
goetz@6458 | 3135 | int size = before + after; |
goetz@6458 | 3136 | if (low == high && size < 5 && size > 0) { |
goetz@6458 | 3137 | int offset = -before*BytesPerWord; |
goetz@6458 | 3138 | for (int i = 0; i < size; ++i) { |
goetz@6458 | 3139 | std(val, offset, low); |
goetz@6458 | 3140 | offset += (1*BytesPerWord); |
goetz@6458 | 3141 | } |
goetz@6458 | 3142 | } else { |
goetz@6458 | 3143 | addi(addr, low, -before*BytesPerWord); |
goetz@6458 | 3144 | assert_different_registers(high, val); |
goetz@6458 | 3145 | if (after) addi(high, high, after * BytesPerWord); |
goetz@6458 | 3146 | Label loop; |
goetz@6458 | 3147 | bind(loop); |
goetz@6458 | 3148 | std(val, 0, addr); |
goetz@6458 | 3149 | addi(addr, addr, 8); |
goetz@6458 | 3150 | cmpd(CCR6, addr, high); |
goetz@6458 | 3151 | ble(CCR6, loop); |
goetz@6458 | 3152 | if (after) addi(high, high, -after * BytesPerWord); // Correct back to old value. |
goetz@6458 | 3153 | } |
goetz@6458 | 3154 | BLOCK_COMMENT("} zap memory region"); |
goetz@6458 | 3155 | } |
goetz@6458 | 3156 | |
goetz@6458 | 3157 | #endif // !PRODUCT |
goetz@6512 | 3158 | |
goetz@6512 | 3159 | SkipIfEqualZero::SkipIfEqualZero(MacroAssembler* masm, Register temp, const bool* flag_addr) : _masm(masm), _label() { |
goetz@6512 | 3160 | int simm16_offset = masm->load_const_optimized(temp, (address)flag_addr, R0, true); |
goetz@6512 | 3161 | assert(sizeof(bool) == 1, "PowerPC ABI"); |
goetz@6512 | 3162 | masm->lbz(temp, simm16_offset, temp); |
goetz@6512 | 3163 | masm->cmpwi(CCR0, temp, 0); |
goetz@6512 | 3164 | masm->beq(CCR0, _label); |
goetz@6512 | 3165 | } |
goetz@6512 | 3166 | |
goetz@6512 | 3167 | SkipIfEqualZero::~SkipIfEqualZero() { |
goetz@6512 | 3168 | _masm->bind(_label); |
goetz@6512 | 3169 | } |