src/cpu/ppc/vm/macroAssembler_ppc.cpp

Tue, 17 Oct 2017 12:58:25 +0800

author
aoqi
date
Tue, 17 Oct 2017 12:58:25 +0800
changeset 7994
04ff2f6cd0eb
parent 7535
7ae4e26cb1e0
child 8604
04d83ba48607
permissions
-rw-r--r--

merge

aoqi@0 1 /*
coleenp@7358 2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
aoqi@0 3 * Copyright 2012, 2014 SAP AG. All rights reserved.
aoqi@0 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
aoqi@0 5 *
aoqi@0 6 * This code is free software; you can redistribute it and/or modify it
aoqi@0 7 * under the terms of the GNU General Public License version 2 only, as
aoqi@0 8 * published by the Free Software Foundation.
aoqi@0 9 *
aoqi@0 10 * This code is distributed in the hope that it will be useful, but WITHOUT
aoqi@0 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
aoqi@0 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
aoqi@0 13 * version 2 for more details (a copy is included in the LICENSE file that
aoqi@0 14 * accompanied this code).
aoqi@0 15 *
aoqi@0 16 * You should have received a copy of the GNU General Public License version
aoqi@0 17 * 2 along with this work; if not, write to the Free Software Foundation,
aoqi@0 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
aoqi@0 19 *
aoqi@0 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
aoqi@0 21 * or visit www.oracle.com if you need additional information or have any
aoqi@0 22 * questions.
aoqi@0 23 *
aoqi@0 24 */
aoqi@0 25
aoqi@0 26 #include "precompiled.hpp"
aoqi@0 27 #include "asm/macroAssembler.inline.hpp"
aoqi@0 28 #include "compiler/disassembler.hpp"
aoqi@0 29 #include "gc_interface/collectedHeap.inline.hpp"
aoqi@0 30 #include "interpreter/interpreter.hpp"
aoqi@0 31 #include "memory/cardTableModRefBS.hpp"
aoqi@0 32 #include "memory/resourceArea.hpp"
aoqi@0 33 #include "prims/methodHandles.hpp"
aoqi@0 34 #include "runtime/biasedLocking.hpp"
aoqi@0 35 #include "runtime/interfaceSupport.hpp"
aoqi@0 36 #include "runtime/objectMonitor.hpp"
aoqi@0 37 #include "runtime/os.hpp"
aoqi@0 38 #include "runtime/sharedRuntime.hpp"
aoqi@0 39 #include "runtime/stubRoutines.hpp"
aoqi@0 40 #include "utilities/macros.hpp"
aoqi@0 41 #if INCLUDE_ALL_GCS
aoqi@0 42 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
aoqi@0 43 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
aoqi@0 44 #include "gc_implementation/g1/heapRegion.hpp"
aoqi@0 45 #endif // INCLUDE_ALL_GCS
aoqi@0 46
aoqi@0 47 #ifdef PRODUCT
aoqi@0 48 #define BLOCK_COMMENT(str) // nothing
aoqi@0 49 #else
aoqi@0 50 #define BLOCK_COMMENT(str) block_comment(str)
aoqi@0 51 #endif
aoqi@0 52
aoqi@0 53 #ifdef ASSERT
aoqi@0 54 // On RISC, there's no benefit to verifying instruction boundaries.
aoqi@0 55 bool AbstractAssembler::pd_check_instruction_mark() { return false; }
aoqi@0 56 #endif
aoqi@0 57
aoqi@0 58 void MacroAssembler::ld_largeoffset_unchecked(Register d, int si31, Register a, int emit_filler_nop) {
aoqi@0 59 assert(Assembler::is_simm(si31, 31) && si31 >= 0, "si31 out of range");
aoqi@0 60 if (Assembler::is_simm(si31, 16)) {
aoqi@0 61 ld(d, si31, a);
aoqi@0 62 if (emit_filler_nop) nop();
aoqi@0 63 } else {
aoqi@0 64 const int hi = MacroAssembler::largeoffset_si16_si16_hi(si31);
aoqi@0 65 const int lo = MacroAssembler::largeoffset_si16_si16_lo(si31);
aoqi@0 66 addis(d, a, hi);
aoqi@0 67 ld(d, lo, d);
aoqi@0 68 }
aoqi@0 69 }
aoqi@0 70
aoqi@0 71 void MacroAssembler::ld_largeoffset(Register d, int si31, Register a, int emit_filler_nop) {
aoqi@0 72 assert_different_registers(d, a);
aoqi@0 73 ld_largeoffset_unchecked(d, si31, a, emit_filler_nop);
aoqi@0 74 }
aoqi@0 75
aoqi@0 76 void MacroAssembler::load_sized_value(Register dst, RegisterOrConstant offs, Register base,
aoqi@0 77 size_t size_in_bytes, bool is_signed) {
aoqi@0 78 switch (size_in_bytes) {
aoqi@0 79 case 8: ld(dst, offs, base); break;
aoqi@0 80 case 4: is_signed ? lwa(dst, offs, base) : lwz(dst, offs, base); break;
aoqi@0 81 case 2: is_signed ? lha(dst, offs, base) : lhz(dst, offs, base); break;
aoqi@0 82 case 1: lbz(dst, offs, base); if (is_signed) extsb(dst, dst); break; // lba doesn't exist :(
aoqi@0 83 default: ShouldNotReachHere();
aoqi@0 84 }
aoqi@0 85 }
aoqi@0 86
aoqi@0 87 void MacroAssembler::store_sized_value(Register dst, RegisterOrConstant offs, Register base,
aoqi@0 88 size_t size_in_bytes) {
aoqi@0 89 switch (size_in_bytes) {
aoqi@0 90 case 8: std(dst, offs, base); break;
aoqi@0 91 case 4: stw(dst, offs, base); break;
aoqi@0 92 case 2: sth(dst, offs, base); break;
aoqi@0 93 case 1: stb(dst, offs, base); break;
aoqi@0 94 default: ShouldNotReachHere();
aoqi@0 95 }
aoqi@0 96 }
aoqi@0 97
aoqi@0 98 void MacroAssembler::align(int modulus, int max, int rem) {
aoqi@0 99 int padding = (rem + modulus - (offset() % modulus)) % modulus;
aoqi@0 100 if (padding > max) return;
aoqi@0 101 for (int c = (padding >> 2); c > 0; --c) { nop(); }
aoqi@0 102 }
aoqi@0 103
aoqi@0 104 // Issue instructions that calculate given TOC from global TOC.
aoqi@0 105 void MacroAssembler::calculate_address_from_global_toc(Register dst, address addr, bool hi16, bool lo16,
aoqi@0 106 bool add_relocation, bool emit_dummy_addr) {
aoqi@0 107 int offset = -1;
aoqi@0 108 if (emit_dummy_addr) {
aoqi@0 109 offset = -128; // dummy address
aoqi@0 110 } else if (addr != (address)(intptr_t)-1) {
aoqi@0 111 offset = MacroAssembler::offset_to_global_toc(addr);
aoqi@0 112 }
aoqi@0 113
aoqi@0 114 if (hi16) {
aoqi@0 115 addis(dst, R29, MacroAssembler::largeoffset_si16_si16_hi(offset));
aoqi@0 116 }
aoqi@0 117 if (lo16) {
aoqi@0 118 if (add_relocation) {
aoqi@0 119 // Relocate at the addi to avoid confusion with a load from the method's TOC.
aoqi@0 120 relocate(internal_word_Relocation::spec(addr));
aoqi@0 121 }
aoqi@0 122 addi(dst, dst, MacroAssembler::largeoffset_si16_si16_lo(offset));
aoqi@0 123 }
aoqi@0 124 }
aoqi@0 125
aoqi@0 126 int MacroAssembler::patch_calculate_address_from_global_toc_at(address a, address bound, address addr) {
aoqi@0 127 const int offset = MacroAssembler::offset_to_global_toc(addr);
aoqi@0 128
aoqi@0 129 const address inst2_addr = a;
aoqi@0 130 const int inst2 = *(int *)inst2_addr;
aoqi@0 131
aoqi@0 132 // The relocation points to the second instruction, the addi,
aoqi@0 133 // and the addi reads and writes the same register dst.
aoqi@0 134 const int dst = inv_rt_field(inst2);
aoqi@0 135 assert(is_addi(inst2) && inv_ra_field(inst2) == dst, "must be addi reading and writing dst");
aoqi@0 136
aoqi@0 137 // Now, find the preceding addis which writes to dst.
aoqi@0 138 int inst1 = 0;
aoqi@0 139 address inst1_addr = inst2_addr - BytesPerInstWord;
aoqi@0 140 while (inst1_addr >= bound) {
aoqi@0 141 inst1 = *(int *) inst1_addr;
aoqi@0 142 if (is_addis(inst1) && inv_rt_field(inst1) == dst) {
aoqi@0 143 // Stop, found the addis which writes dst.
aoqi@0 144 break;
aoqi@0 145 }
aoqi@0 146 inst1_addr -= BytesPerInstWord;
aoqi@0 147 }
aoqi@0 148
aoqi@0 149 assert(is_addis(inst1) && inv_ra_field(inst1) == 29 /* R29 */, "source must be global TOC");
aoqi@0 150 set_imm((int *)inst1_addr, MacroAssembler::largeoffset_si16_si16_hi(offset));
aoqi@0 151 set_imm((int *)inst2_addr, MacroAssembler::largeoffset_si16_si16_lo(offset));
aoqi@0 152 return (int)((intptr_t)addr - (intptr_t)inst1_addr);
aoqi@0 153 }
aoqi@0 154
aoqi@0 155 address MacroAssembler::get_address_of_calculate_address_from_global_toc_at(address a, address bound) {
aoqi@0 156 const address inst2_addr = a;
aoqi@0 157 const int inst2 = *(int *)inst2_addr;
aoqi@0 158
aoqi@0 159 // The relocation points to the second instruction, the addi,
aoqi@0 160 // and the addi reads and writes the same register dst.
aoqi@0 161 const int dst = inv_rt_field(inst2);
aoqi@0 162 assert(is_addi(inst2) && inv_ra_field(inst2) == dst, "must be addi reading and writing dst");
aoqi@0 163
aoqi@0 164 // Now, find the preceding addis which writes to dst.
aoqi@0 165 int inst1 = 0;
aoqi@0 166 address inst1_addr = inst2_addr - BytesPerInstWord;
aoqi@0 167 while (inst1_addr >= bound) {
aoqi@0 168 inst1 = *(int *) inst1_addr;
aoqi@0 169 if (is_addis(inst1) && inv_rt_field(inst1) == dst) {
aoqi@0 170 // stop, found the addis which writes dst
aoqi@0 171 break;
aoqi@0 172 }
aoqi@0 173 inst1_addr -= BytesPerInstWord;
aoqi@0 174 }
aoqi@0 175
aoqi@0 176 assert(is_addis(inst1) && inv_ra_field(inst1) == 29 /* R29 */, "source must be global TOC");
aoqi@0 177
aoqi@0 178 int offset = (get_imm(inst1_addr, 0) << 16) + get_imm(inst2_addr, 0);
aoqi@0 179 // -1 is a special case
aoqi@0 180 if (offset == -1) {
aoqi@0 181 return (address)(intptr_t)-1;
aoqi@0 182 } else {
aoqi@0 183 return global_toc() + offset;
aoqi@0 184 }
aoqi@0 185 }
aoqi@0 186
aoqi@0 187 #ifdef _LP64
aoqi@0 188 // Patch compressed oops or klass constants.
aoqi@0 189 // Assembler sequence is
aoqi@0 190 // 1) compressed oops:
aoqi@0 191 // lis rx = const.hi
aoqi@0 192 // ori rx = rx | const.lo
aoqi@0 193 // 2) compressed klass:
aoqi@0 194 // lis rx = const.hi
aoqi@0 195 // clrldi rx = rx & 0xFFFFffff // clearMS32b, optional
aoqi@0 196 // ori rx = rx | const.lo
aoqi@0 197 // Clrldi will be passed by.
aoqi@0 198 int MacroAssembler::patch_set_narrow_oop(address a, address bound, narrowOop data) {
aoqi@0 199 assert(UseCompressedOops, "Should only patch compressed oops");
aoqi@0 200
aoqi@0 201 const address inst2_addr = a;
aoqi@0 202 const int inst2 = *(int *)inst2_addr;
aoqi@0 203
aoqi@0 204 // The relocation points to the second instruction, the ori,
aoqi@0 205 // and the ori reads and writes the same register dst.
aoqi@0 206 const int dst = inv_rta_field(inst2);
aoqi@0 207 assert(is_ori(inst2) && inv_rs_field(inst2) == dst, "must be ori reading and writing dst");
aoqi@0 208 // Now, find the preceding addis which writes to dst.
aoqi@0 209 int inst1 = 0;
aoqi@0 210 address inst1_addr = inst2_addr - BytesPerInstWord;
aoqi@0 211 bool inst1_found = false;
aoqi@0 212 while (inst1_addr >= bound) {
aoqi@0 213 inst1 = *(int *)inst1_addr;
aoqi@0 214 if (is_lis(inst1) && inv_rs_field(inst1) == dst) { inst1_found = true; break; }
aoqi@0 215 inst1_addr -= BytesPerInstWord;
aoqi@0 216 }
aoqi@0 217 assert(inst1_found, "inst is not lis");
aoqi@0 218
aoqi@0 219 int xc = (data >> 16) & 0xffff;
aoqi@0 220 int xd = (data >> 0) & 0xffff;
aoqi@0 221
aoqi@0 222 set_imm((int *)inst1_addr, (short)(xc)); // see enc_load_con_narrow_hi/_lo
aoqi@0 223 set_imm((int *)inst2_addr, (xd)); // unsigned int
aoqi@0 224 return (int)((intptr_t)inst2_addr - (intptr_t)inst1_addr);
aoqi@0 225 }
aoqi@0 226
aoqi@0 227 // Get compressed oop or klass constant.
aoqi@0 228 narrowOop MacroAssembler::get_narrow_oop(address a, address bound) {
aoqi@0 229 assert(UseCompressedOops, "Should only patch compressed oops");
aoqi@0 230
aoqi@0 231 const address inst2_addr = a;
aoqi@0 232 const int inst2 = *(int *)inst2_addr;
aoqi@0 233
aoqi@0 234 // The relocation points to the second instruction, the ori,
aoqi@0 235 // and the ori reads and writes the same register dst.
aoqi@0 236 const int dst = inv_rta_field(inst2);
aoqi@0 237 assert(is_ori(inst2) && inv_rs_field(inst2) == dst, "must be ori reading and writing dst");
aoqi@0 238 // Now, find the preceding lis which writes to dst.
aoqi@0 239 int inst1 = 0;
aoqi@0 240 address inst1_addr = inst2_addr - BytesPerInstWord;
aoqi@0 241 bool inst1_found = false;
aoqi@0 242
aoqi@0 243 while (inst1_addr >= bound) {
aoqi@0 244 inst1 = *(int *) inst1_addr;
aoqi@0 245 if (is_lis(inst1) && inv_rs_field(inst1) == dst) { inst1_found = true; break;}
aoqi@0 246 inst1_addr -= BytesPerInstWord;
aoqi@0 247 }
aoqi@0 248 assert(inst1_found, "inst is not lis");
aoqi@0 249
aoqi@0 250 uint xl = ((unsigned int) (get_imm(inst2_addr, 0) & 0xffff));
aoqi@0 251 uint xh = (((get_imm(inst1_addr, 0)) & 0xffff) << 16);
aoqi@0 252
aoqi@0 253 return (int) (xl | xh);
aoqi@0 254 }
aoqi@0 255 #endif // _LP64
aoqi@0 256
aoqi@0 257 void MacroAssembler::load_const_from_method_toc(Register dst, AddressLiteral& a, Register toc) {
aoqi@0 258 int toc_offset = 0;
aoqi@0 259 // Use RelocationHolder::none for the constant pool entry, otherwise
aoqi@0 260 // we will end up with a failing NativeCall::verify(x) where x is
aoqi@0 261 // the address of the constant pool entry.
aoqi@0 262 // FIXME: We should insert relocation information for oops at the constant
aoqi@0 263 // pool entries instead of inserting it at the loads; patching of a constant
aoqi@0 264 // pool entry should be less expensive.
aoqi@0 265 address oop_address = address_constant((address)a.value(), RelocationHolder::none);
aoqi@0 266 // Relocate at the pc of the load.
aoqi@0 267 relocate(a.rspec());
aoqi@0 268 toc_offset = (int)(oop_address - code()->consts()->start());
aoqi@0 269 ld_largeoffset_unchecked(dst, toc_offset, toc, true);
aoqi@0 270 }
aoqi@0 271
aoqi@0 272 bool MacroAssembler::is_load_const_from_method_toc_at(address a) {
aoqi@0 273 const address inst1_addr = a;
aoqi@0 274 const int inst1 = *(int *)inst1_addr;
aoqi@0 275
aoqi@0 276 // The relocation points to the ld or the addis.
aoqi@0 277 return (is_ld(inst1)) ||
aoqi@0 278 (is_addis(inst1) && inv_ra_field(inst1) != 0);
aoqi@0 279 }
aoqi@0 280
aoqi@0 281 int MacroAssembler::get_offset_of_load_const_from_method_toc_at(address a) {
aoqi@0 282 assert(is_load_const_from_method_toc_at(a), "must be load_const_from_method_toc");
aoqi@0 283
aoqi@0 284 const address inst1_addr = a;
aoqi@0 285 const int inst1 = *(int *)inst1_addr;
aoqi@0 286
aoqi@0 287 if (is_ld(inst1)) {
aoqi@0 288 return inv_d1_field(inst1);
aoqi@0 289 } else if (is_addis(inst1)) {
aoqi@0 290 const int dst = inv_rt_field(inst1);
aoqi@0 291
aoqi@0 292 // Now, find the succeeding ld which reads and writes to dst.
aoqi@0 293 address inst2_addr = inst1_addr + BytesPerInstWord;
aoqi@0 294 int inst2 = 0;
aoqi@0 295 while (true) {
aoqi@0 296 inst2 = *(int *) inst2_addr;
aoqi@0 297 if (is_ld(inst2) && inv_ra_field(inst2) == dst && inv_rt_field(inst2) == dst) {
aoqi@0 298 // Stop, found the ld which reads and writes dst.
aoqi@0 299 break;
aoqi@0 300 }
aoqi@0 301 inst2_addr += BytesPerInstWord;
aoqi@0 302 }
aoqi@0 303 return (inv_d1_field(inst1) << 16) + inv_d1_field(inst2);
aoqi@0 304 }
aoqi@0 305 ShouldNotReachHere();
aoqi@0 306 return 0;
aoqi@0 307 }
aoqi@0 308
aoqi@0 309 // Get the constant from a `load_const' sequence.
aoqi@0 310 long MacroAssembler::get_const(address a) {
aoqi@0 311 assert(is_load_const_at(a), "not a load of a constant");
aoqi@0 312 const int *p = (const int*) a;
aoqi@0 313 unsigned long x = (((unsigned long) (get_imm(a,0) & 0xffff)) << 48);
aoqi@0 314 if (is_ori(*(p+1))) {
aoqi@0 315 x |= (((unsigned long) (get_imm(a,1) & 0xffff)) << 32);
aoqi@0 316 x |= (((unsigned long) (get_imm(a,3) & 0xffff)) << 16);
aoqi@0 317 x |= (((unsigned long) (get_imm(a,4) & 0xffff)));
aoqi@0 318 } else if (is_lis(*(p+1))) {
aoqi@0 319 x |= (((unsigned long) (get_imm(a,2) & 0xffff)) << 32);
aoqi@0 320 x |= (((unsigned long) (get_imm(a,1) & 0xffff)) << 16);
aoqi@0 321 x |= (((unsigned long) (get_imm(a,3) & 0xffff)));
aoqi@0 322 } else {
aoqi@0 323 ShouldNotReachHere();
aoqi@0 324 return (long) 0;
aoqi@0 325 }
aoqi@0 326 return (long) x;
aoqi@0 327 }
aoqi@0 328
aoqi@0 329 // Patch the 64 bit constant of a `load_const' sequence. This is a low
aoqi@0 330 // level procedure. It neither flushes the instruction cache nor is it
aoqi@0 331 // mt safe.
aoqi@0 332 void MacroAssembler::patch_const(address a, long x) {
aoqi@0 333 assert(is_load_const_at(a), "not a load of a constant");
aoqi@0 334 int *p = (int*) a;
aoqi@0 335 if (is_ori(*(p+1))) {
aoqi@0 336 set_imm(0 + p, (x >> 48) & 0xffff);
aoqi@0 337 set_imm(1 + p, (x >> 32) & 0xffff);
aoqi@0 338 set_imm(3 + p, (x >> 16) & 0xffff);
aoqi@0 339 set_imm(4 + p, x & 0xffff);
aoqi@0 340 } else if (is_lis(*(p+1))) {
aoqi@0 341 set_imm(0 + p, (x >> 48) & 0xffff);
aoqi@0 342 set_imm(2 + p, (x >> 32) & 0xffff);
aoqi@0 343 set_imm(1 + p, (x >> 16) & 0xffff);
aoqi@0 344 set_imm(3 + p, x & 0xffff);
aoqi@0 345 } else {
aoqi@0 346 ShouldNotReachHere();
aoqi@0 347 }
aoqi@0 348 }
aoqi@0 349
aoqi@0 350 AddressLiteral MacroAssembler::allocate_metadata_address(Metadata* obj) {
aoqi@0 351 assert(oop_recorder() != NULL, "this assembler needs a Recorder");
aoqi@0 352 int index = oop_recorder()->allocate_metadata_index(obj);
aoqi@0 353 RelocationHolder rspec = metadata_Relocation::spec(index);
aoqi@0 354 return AddressLiteral((address)obj, rspec);
aoqi@0 355 }
aoqi@0 356
aoqi@0 357 AddressLiteral MacroAssembler::constant_metadata_address(Metadata* obj) {
aoqi@0 358 assert(oop_recorder() != NULL, "this assembler needs a Recorder");
aoqi@0 359 int index = oop_recorder()->find_index(obj);
aoqi@0 360 RelocationHolder rspec = metadata_Relocation::spec(index);
aoqi@0 361 return AddressLiteral((address)obj, rspec);
aoqi@0 362 }
aoqi@0 363
aoqi@0 364 AddressLiteral MacroAssembler::allocate_oop_address(jobject obj) {
aoqi@0 365 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
aoqi@0 366 int oop_index = oop_recorder()->allocate_oop_index(obj);
aoqi@0 367 return AddressLiteral(address(obj), oop_Relocation::spec(oop_index));
aoqi@0 368 }
aoqi@0 369
aoqi@0 370 AddressLiteral MacroAssembler::constant_oop_address(jobject obj) {
aoqi@0 371 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
aoqi@0 372 int oop_index = oop_recorder()->find_index(obj);
aoqi@0 373 return AddressLiteral(address(obj), oop_Relocation::spec(oop_index));
aoqi@0 374 }
aoqi@0 375
aoqi@0 376 RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr,
aoqi@0 377 Register tmp, int offset) {
aoqi@0 378 intptr_t value = *delayed_value_addr;
aoqi@0 379 if (value != 0) {
aoqi@0 380 return RegisterOrConstant(value + offset);
aoqi@0 381 }
aoqi@0 382
aoqi@0 383 // Load indirectly to solve generation ordering problem.
aoqi@0 384 // static address, no relocation
aoqi@0 385 int simm16_offset = load_const_optimized(tmp, delayed_value_addr, noreg, true);
aoqi@0 386 ld(tmp, simm16_offset, tmp); // must be aligned ((xa & 3) == 0)
aoqi@0 387
aoqi@0 388 if (offset != 0) {
aoqi@0 389 addi(tmp, tmp, offset);
aoqi@0 390 }
aoqi@0 391
aoqi@0 392 return RegisterOrConstant(tmp);
aoqi@0 393 }
aoqi@0 394
aoqi@0 395 #ifndef PRODUCT
aoqi@0 396 void MacroAssembler::pd_print_patched_instruction(address branch) {
aoqi@0 397 Unimplemented(); // TODO: PPC port
aoqi@0 398 }
aoqi@0 399 #endif // ndef PRODUCT
aoqi@0 400
aoqi@0 401 // Conditional far branch for destinations encodable in 24+2 bits.
aoqi@0 402 void MacroAssembler::bc_far(int boint, int biint, Label& dest, int optimize) {
aoqi@0 403
aoqi@0 404 // If requested by flag optimize, relocate the bc_far as a
aoqi@0 405 // runtime_call and prepare for optimizing it when the code gets
aoqi@0 406 // relocated.
aoqi@0 407 if (optimize == bc_far_optimize_on_relocate) {
aoqi@0 408 relocate(relocInfo::runtime_call_type);
aoqi@0 409 }
aoqi@0 410
aoqi@0 411 // variant 2:
aoqi@0 412 //
aoqi@0 413 // b!cxx SKIP
aoqi@0 414 // bxx DEST
aoqi@0 415 // SKIP:
aoqi@0 416 //
aoqi@0 417
aoqi@0 418 const int opposite_boint = add_bhint_to_boint(opposite_bhint(inv_boint_bhint(boint)),
aoqi@0 419 opposite_bcond(inv_boint_bcond(boint)));
aoqi@0 420
aoqi@0 421 // We emit two branches.
aoqi@0 422 // First, a conditional branch which jumps around the far branch.
aoqi@0 423 const address not_taken_pc = pc() + 2 * BytesPerInstWord;
aoqi@0 424 const address bc_pc = pc();
aoqi@0 425 bc(opposite_boint, biint, not_taken_pc);
aoqi@0 426
aoqi@0 427 const int bc_instr = *(int*)bc_pc;
aoqi@0 428 assert(not_taken_pc == (address)inv_bd_field(bc_instr, (intptr_t)bc_pc), "postcondition");
aoqi@0 429 assert(opposite_boint == inv_bo_field(bc_instr), "postcondition");
aoqi@0 430 assert(boint == add_bhint_to_boint(opposite_bhint(inv_boint_bhint(inv_bo_field(bc_instr))),
aoqi@0 431 opposite_bcond(inv_boint_bcond(inv_bo_field(bc_instr)))),
aoqi@0 432 "postcondition");
aoqi@0 433 assert(biint == inv_bi_field(bc_instr), "postcondition");
aoqi@0 434
aoqi@0 435 // Second, an unconditional far branch which jumps to dest.
aoqi@0 436 // Note: target(dest) remembers the current pc (see CodeSection::target)
aoqi@0 437 // and returns the current pc if the label is not bound yet; when
aoqi@0 438 // the label gets bound, the unconditional far branch will be patched.
aoqi@0 439 const address target_pc = target(dest);
aoqi@0 440 const address b_pc = pc();
aoqi@0 441 b(target_pc);
aoqi@0 442
aoqi@0 443 assert(not_taken_pc == pc(), "postcondition");
aoqi@0 444 assert(dest.is_bound() || target_pc == b_pc, "postcondition");
aoqi@0 445 }
aoqi@0 446
aoqi@0 447 bool MacroAssembler::is_bc_far_at(address instruction_addr) {
aoqi@0 448 return is_bc_far_variant1_at(instruction_addr) ||
aoqi@0 449 is_bc_far_variant2_at(instruction_addr) ||
aoqi@0 450 is_bc_far_variant3_at(instruction_addr);
aoqi@0 451 }
aoqi@0 452
aoqi@0 453 address MacroAssembler::get_dest_of_bc_far_at(address instruction_addr) {
aoqi@0 454 if (is_bc_far_variant1_at(instruction_addr)) {
aoqi@0 455 const address instruction_1_addr = instruction_addr;
aoqi@0 456 const int instruction_1 = *(int*)instruction_1_addr;
aoqi@0 457 return (address)inv_bd_field(instruction_1, (intptr_t)instruction_1_addr);
aoqi@0 458 } else if (is_bc_far_variant2_at(instruction_addr)) {
aoqi@0 459 const address instruction_2_addr = instruction_addr + 4;
aoqi@0 460 return bxx_destination(instruction_2_addr);
aoqi@0 461 } else if (is_bc_far_variant3_at(instruction_addr)) {
aoqi@0 462 return instruction_addr + 8;
aoqi@0 463 }
aoqi@0 464 // variant 4 ???
aoqi@0 465 ShouldNotReachHere();
aoqi@0 466 return NULL;
aoqi@0 467 }
aoqi@0 468 void MacroAssembler::set_dest_of_bc_far_at(address instruction_addr, address dest) {
aoqi@0 469
aoqi@0 470 if (is_bc_far_variant3_at(instruction_addr)) {
aoqi@0 471 // variant 3, far cond branch to the next instruction, already patched to nops:
aoqi@0 472 //
aoqi@0 473 // nop
aoqi@0 474 // endgroup
aoqi@0 475 // SKIP/DEST:
aoqi@0 476 //
aoqi@0 477 return;
aoqi@0 478 }
aoqi@0 479
aoqi@0 480 // first, extract boint and biint from the current branch
aoqi@0 481 int boint = 0;
aoqi@0 482 int biint = 0;
aoqi@0 483
aoqi@0 484 ResourceMark rm;
aoqi@0 485 const int code_size = 2 * BytesPerInstWord;
aoqi@0 486 CodeBuffer buf(instruction_addr, code_size);
aoqi@0 487 MacroAssembler masm(&buf);
aoqi@0 488 if (is_bc_far_variant2_at(instruction_addr) && dest == instruction_addr + 8) {
aoqi@0 489 // Far branch to next instruction: Optimize it by patching nops (produce variant 3).
aoqi@0 490 masm.nop();
aoqi@0 491 masm.endgroup();
aoqi@0 492 } else {
aoqi@0 493 if (is_bc_far_variant1_at(instruction_addr)) {
aoqi@0 494 // variant 1, the 1st instruction contains the destination address:
aoqi@0 495 //
aoqi@0 496 // bcxx DEST
aoqi@0 497 // endgroup
aoqi@0 498 //
aoqi@0 499 const int instruction_1 = *(int*)(instruction_addr);
aoqi@0 500 boint = inv_bo_field(instruction_1);
aoqi@0 501 biint = inv_bi_field(instruction_1);
aoqi@0 502 } else if (is_bc_far_variant2_at(instruction_addr)) {
aoqi@0 503 // variant 2, the 2nd instruction contains the destination address:
aoqi@0 504 //
aoqi@0 505 // b!cxx SKIP
aoqi@0 506 // bxx DEST
aoqi@0 507 // SKIP:
aoqi@0 508 //
aoqi@0 509 const int instruction_1 = *(int*)(instruction_addr);
aoqi@0 510 boint = add_bhint_to_boint(opposite_bhint(inv_boint_bhint(inv_bo_field(instruction_1))),
aoqi@0 511 opposite_bcond(inv_boint_bcond(inv_bo_field(instruction_1))));
aoqi@0 512 biint = inv_bi_field(instruction_1);
aoqi@0 513 } else {
aoqi@0 514 // variant 4???
aoqi@0 515 ShouldNotReachHere();
aoqi@0 516 }
aoqi@0 517
aoqi@0 518 // second, set the new branch destination and optimize the code
aoqi@0 519 if (dest != instruction_addr + 4 && // the bc_far is still unbound!
aoqi@0 520 masm.is_within_range_of_bcxx(dest, instruction_addr)) {
aoqi@0 521 // variant 1:
aoqi@0 522 //
aoqi@0 523 // bcxx DEST
aoqi@0 524 // endgroup
aoqi@0 525 //
aoqi@0 526 masm.bc(boint, biint, dest);
aoqi@0 527 masm.endgroup();
aoqi@0 528 } else {
aoqi@0 529 // variant 2:
aoqi@0 530 //
aoqi@0 531 // b!cxx SKIP
aoqi@0 532 // bxx DEST
aoqi@0 533 // SKIP:
aoqi@0 534 //
aoqi@0 535 const int opposite_boint = add_bhint_to_boint(opposite_bhint(inv_boint_bhint(boint)),
aoqi@0 536 opposite_bcond(inv_boint_bcond(boint)));
aoqi@0 537 const address not_taken_pc = masm.pc() + 2 * BytesPerInstWord;
aoqi@0 538 masm.bc(opposite_boint, biint, not_taken_pc);
aoqi@0 539 masm.b(dest);
aoqi@0 540 }
aoqi@0 541 }
aoqi@0 542 ICache::ppc64_flush_icache_bytes(instruction_addr, code_size);
aoqi@0 543 }
aoqi@0 544
aoqi@0 545 // Emit a NOT mt-safe patchable 64 bit absolute call/jump.
aoqi@0 546 void MacroAssembler::bxx64_patchable(address dest, relocInfo::relocType rt, bool link) {
aoqi@0 547 // get current pc
aoqi@0 548 uint64_t start_pc = (uint64_t) pc();
aoqi@0 549
aoqi@0 550 const address pc_of_bl = (address) (start_pc + (6*BytesPerInstWord)); // bl is last
aoqi@0 551 const address pc_of_b = (address) (start_pc + (0*BytesPerInstWord)); // b is first
aoqi@0 552
aoqi@0 553 // relocate here
aoqi@0 554 if (rt != relocInfo::none) {
aoqi@0 555 relocate(rt);
aoqi@0 556 }
aoqi@0 557
aoqi@0 558 if ( ReoptimizeCallSequences &&
aoqi@0 559 (( link && is_within_range_of_b(dest, pc_of_bl)) ||
aoqi@0 560 (!link && is_within_range_of_b(dest, pc_of_b)))) {
aoqi@0 561 // variant 2:
aoqi@0 562 // Emit an optimized, pc-relative call/jump.
aoqi@0 563
aoqi@0 564 if (link) {
aoqi@0 565 // some padding
aoqi@0 566 nop();
aoqi@0 567 nop();
aoqi@0 568 nop();
aoqi@0 569 nop();
aoqi@0 570 nop();
aoqi@0 571 nop();
aoqi@0 572
aoqi@0 573 // do the call
aoqi@0 574 assert(pc() == pc_of_bl, "just checking");
aoqi@0 575 bl(dest, relocInfo::none);
aoqi@0 576 } else {
aoqi@0 577 // do the jump
aoqi@0 578 assert(pc() == pc_of_b, "just checking");
aoqi@0 579 b(dest, relocInfo::none);
aoqi@0 580
aoqi@0 581 // some padding
aoqi@0 582 nop();
aoqi@0 583 nop();
aoqi@0 584 nop();
aoqi@0 585 nop();
aoqi@0 586 nop();
aoqi@0 587 nop();
aoqi@0 588 }
aoqi@0 589
aoqi@0 590 // Assert that we can identify the emitted call/jump.
aoqi@0 591 assert(is_bxx64_patchable_variant2_at((address)start_pc, link),
aoqi@0 592 "can't identify emitted call");
aoqi@0 593 } else {
aoqi@0 594 // variant 1:
aoqi@0 595 #if defined(ABI_ELFv2)
aoqi@0 596 nop();
aoqi@0 597 calculate_address_from_global_toc(R12, dest, true, true, false);
aoqi@0 598 mtctr(R12);
aoqi@0 599 nop();
aoqi@0 600 nop();
aoqi@0 601 #else
aoqi@0 602 mr(R0, R11); // spill R11 -> R0.
aoqi@0 603
aoqi@0 604 // Load the destination address into CTR,
aoqi@0 605 // calculate destination relative to global toc.
aoqi@0 606 calculate_address_from_global_toc(R11, dest, true, true, false);
aoqi@0 607
aoqi@0 608 mtctr(R11);
aoqi@0 609 mr(R11, R0); // spill R11 <- R0.
aoqi@0 610 nop();
aoqi@0 611 #endif
aoqi@0 612
aoqi@0 613 // do the call/jump
aoqi@0 614 if (link) {
aoqi@0 615 bctrl();
aoqi@0 616 } else{
aoqi@0 617 bctr();
aoqi@0 618 }
aoqi@0 619 // Assert that we can identify the emitted call/jump.
aoqi@0 620 assert(is_bxx64_patchable_variant1b_at((address)start_pc, link),
aoqi@0 621 "can't identify emitted call");
aoqi@0 622 }
aoqi@0 623
aoqi@0 624 // Assert that we can identify the emitted call/jump.
aoqi@0 625 assert(is_bxx64_patchable_at((address)start_pc, link),
aoqi@0 626 "can't identify emitted call");
aoqi@0 627 assert(get_dest_of_bxx64_patchable_at((address)start_pc, link) == dest,
aoqi@0 628 "wrong encoding of dest address");
aoqi@0 629 }
aoqi@0 630
aoqi@0 631 // Identify a bxx64_patchable instruction.
aoqi@0 632 bool MacroAssembler::is_bxx64_patchable_at(address instruction_addr, bool link) {
aoqi@0 633 return is_bxx64_patchable_variant1b_at(instruction_addr, link)
aoqi@0 634 //|| is_bxx64_patchable_variant1_at(instruction_addr, link)
aoqi@0 635 || is_bxx64_patchable_variant2_at(instruction_addr, link);
aoqi@0 636 }
aoqi@0 637
aoqi@0 638 // Does the call64_patchable instruction use a pc-relative encoding of
aoqi@0 639 // the call destination?
aoqi@0 640 bool MacroAssembler::is_bxx64_patchable_pcrelative_at(address instruction_addr, bool link) {
aoqi@0 641 // variant 2 is pc-relative
aoqi@0 642 return is_bxx64_patchable_variant2_at(instruction_addr, link);
aoqi@0 643 }
aoqi@0 644
aoqi@0 645 // Identify variant 1.
aoqi@0 646 bool MacroAssembler::is_bxx64_patchable_variant1_at(address instruction_addr, bool link) {
aoqi@0 647 unsigned int* instr = (unsigned int*) instruction_addr;
aoqi@0 648 return (link ? is_bctrl(instr[6]) : is_bctr(instr[6])) // bctr[l]
aoqi@0 649 && is_mtctr(instr[5]) // mtctr
aoqi@0 650 && is_load_const_at(instruction_addr);
aoqi@0 651 }
aoqi@0 652
aoqi@0 653 // Identify variant 1b: load destination relative to global toc.
aoqi@0 654 bool MacroAssembler::is_bxx64_patchable_variant1b_at(address instruction_addr, bool link) {
aoqi@0 655 unsigned int* instr = (unsigned int*) instruction_addr;
aoqi@0 656 return (link ? is_bctrl(instr[6]) : is_bctr(instr[6])) // bctr[l]
aoqi@0 657 && is_mtctr(instr[3]) // mtctr
aoqi@0 658 && is_calculate_address_from_global_toc_at(instruction_addr + 2*BytesPerInstWord, instruction_addr);
aoqi@0 659 }
aoqi@0 660
aoqi@0 661 // Identify variant 2.
aoqi@0 662 bool MacroAssembler::is_bxx64_patchable_variant2_at(address instruction_addr, bool link) {
aoqi@0 663 unsigned int* instr = (unsigned int*) instruction_addr;
aoqi@0 664 if (link) {
aoqi@0 665 return is_bl (instr[6]) // bl dest is last
aoqi@0 666 && is_nop(instr[0]) // nop
aoqi@0 667 && is_nop(instr[1]) // nop
aoqi@0 668 && is_nop(instr[2]) // nop
aoqi@0 669 && is_nop(instr[3]) // nop
aoqi@0 670 && is_nop(instr[4]) // nop
aoqi@0 671 && is_nop(instr[5]); // nop
aoqi@0 672 } else {
aoqi@0 673 return is_b (instr[0]) // b dest is first
aoqi@0 674 && is_nop(instr[1]) // nop
aoqi@0 675 && is_nop(instr[2]) // nop
aoqi@0 676 && is_nop(instr[3]) // nop
aoqi@0 677 && is_nop(instr[4]) // nop
aoqi@0 678 && is_nop(instr[5]) // nop
aoqi@0 679 && is_nop(instr[6]); // nop
aoqi@0 680 }
aoqi@0 681 }
aoqi@0 682
aoqi@0 683 // Set dest address of a bxx64_patchable instruction.
aoqi@0 684 void MacroAssembler::set_dest_of_bxx64_patchable_at(address instruction_addr, address dest, bool link) {
aoqi@0 685 ResourceMark rm;
aoqi@0 686 int code_size = MacroAssembler::bxx64_patchable_size;
aoqi@0 687 CodeBuffer buf(instruction_addr, code_size);
aoqi@0 688 MacroAssembler masm(&buf);
aoqi@0 689 masm.bxx64_patchable(dest, relocInfo::none, link);
aoqi@0 690 ICache::ppc64_flush_icache_bytes(instruction_addr, code_size);
aoqi@0 691 }
aoqi@0 692
aoqi@0 693 // Get dest address of a bxx64_patchable instruction.
aoqi@0 694 address MacroAssembler::get_dest_of_bxx64_patchable_at(address instruction_addr, bool link) {
aoqi@0 695 if (is_bxx64_patchable_variant1_at(instruction_addr, link)) {
aoqi@0 696 return (address) (unsigned long) get_const(instruction_addr);
aoqi@0 697 } else if (is_bxx64_patchable_variant2_at(instruction_addr, link)) {
aoqi@0 698 unsigned int* instr = (unsigned int*) instruction_addr;
aoqi@0 699 if (link) {
aoqi@0 700 const int instr_idx = 6; // bl is last
aoqi@0 701 int branchoffset = branch_destination(instr[instr_idx], 0);
aoqi@0 702 return instruction_addr + branchoffset + instr_idx*BytesPerInstWord;
aoqi@0 703 } else {
aoqi@0 704 const int instr_idx = 0; // b is first
aoqi@0 705 int branchoffset = branch_destination(instr[instr_idx], 0);
aoqi@0 706 return instruction_addr + branchoffset + instr_idx*BytesPerInstWord;
aoqi@0 707 }
aoqi@0 708 // Load dest relative to global toc.
aoqi@0 709 } else if (is_bxx64_patchable_variant1b_at(instruction_addr, link)) {
aoqi@0 710 return get_address_of_calculate_address_from_global_toc_at(instruction_addr + 2*BytesPerInstWord,
aoqi@0 711 instruction_addr);
aoqi@0 712 } else {
aoqi@0 713 ShouldNotReachHere();
aoqi@0 714 return NULL;
aoqi@0 715 }
aoqi@0 716 }
aoqi@0 717
aoqi@0 718 // Uses ordering which corresponds to ABI:
aoqi@0 719 // _savegpr0_14: std r14,-144(r1)
aoqi@0 720 // _savegpr0_15: std r15,-136(r1)
aoqi@0 721 // _savegpr0_16: std r16,-128(r1)
aoqi@0 722 void MacroAssembler::save_nonvolatile_gprs(Register dst, int offset) {
aoqi@0 723 std(R14, offset, dst); offset += 8;
aoqi@0 724 std(R15, offset, dst); offset += 8;
aoqi@0 725 std(R16, offset, dst); offset += 8;
aoqi@0 726 std(R17, offset, dst); offset += 8;
aoqi@0 727 std(R18, offset, dst); offset += 8;
aoqi@0 728 std(R19, offset, dst); offset += 8;
aoqi@0 729 std(R20, offset, dst); offset += 8;
aoqi@0 730 std(R21, offset, dst); offset += 8;
aoqi@0 731 std(R22, offset, dst); offset += 8;
aoqi@0 732 std(R23, offset, dst); offset += 8;
aoqi@0 733 std(R24, offset, dst); offset += 8;
aoqi@0 734 std(R25, offset, dst); offset += 8;
aoqi@0 735 std(R26, offset, dst); offset += 8;
aoqi@0 736 std(R27, offset, dst); offset += 8;
aoqi@0 737 std(R28, offset, dst); offset += 8;
aoqi@0 738 std(R29, offset, dst); offset += 8;
aoqi@0 739 std(R30, offset, dst); offset += 8;
aoqi@0 740 std(R31, offset, dst); offset += 8;
aoqi@0 741
aoqi@0 742 stfd(F14, offset, dst); offset += 8;
aoqi@0 743 stfd(F15, offset, dst); offset += 8;
aoqi@0 744 stfd(F16, offset, dst); offset += 8;
aoqi@0 745 stfd(F17, offset, dst); offset += 8;
aoqi@0 746 stfd(F18, offset, dst); offset += 8;
aoqi@0 747 stfd(F19, offset, dst); offset += 8;
aoqi@0 748 stfd(F20, offset, dst); offset += 8;
aoqi@0 749 stfd(F21, offset, dst); offset += 8;
aoqi@0 750 stfd(F22, offset, dst); offset += 8;
aoqi@0 751 stfd(F23, offset, dst); offset += 8;
aoqi@0 752 stfd(F24, offset, dst); offset += 8;
aoqi@0 753 stfd(F25, offset, dst); offset += 8;
aoqi@0 754 stfd(F26, offset, dst); offset += 8;
aoqi@0 755 stfd(F27, offset, dst); offset += 8;
aoqi@0 756 stfd(F28, offset, dst); offset += 8;
aoqi@0 757 stfd(F29, offset, dst); offset += 8;
aoqi@0 758 stfd(F30, offset, dst); offset += 8;
aoqi@0 759 stfd(F31, offset, dst);
aoqi@0 760 }
aoqi@0 761
aoqi@0 762 // Uses ordering which corresponds to ABI:
aoqi@0 763 // _restgpr0_14: ld r14,-144(r1)
aoqi@0 764 // _restgpr0_15: ld r15,-136(r1)
aoqi@0 765 // _restgpr0_16: ld r16,-128(r1)
aoqi@0 766 void MacroAssembler::restore_nonvolatile_gprs(Register src, int offset) {
aoqi@0 767 ld(R14, offset, src); offset += 8;
aoqi@0 768 ld(R15, offset, src); offset += 8;
aoqi@0 769 ld(R16, offset, src); offset += 8;
aoqi@0 770 ld(R17, offset, src); offset += 8;
aoqi@0 771 ld(R18, offset, src); offset += 8;
aoqi@0 772 ld(R19, offset, src); offset += 8;
aoqi@0 773 ld(R20, offset, src); offset += 8;
aoqi@0 774 ld(R21, offset, src); offset += 8;
aoqi@0 775 ld(R22, offset, src); offset += 8;
aoqi@0 776 ld(R23, offset, src); offset += 8;
aoqi@0 777 ld(R24, offset, src); offset += 8;
aoqi@0 778 ld(R25, offset, src); offset += 8;
aoqi@0 779 ld(R26, offset, src); offset += 8;
aoqi@0 780 ld(R27, offset, src); offset += 8;
aoqi@0 781 ld(R28, offset, src); offset += 8;
aoqi@0 782 ld(R29, offset, src); offset += 8;
aoqi@0 783 ld(R30, offset, src); offset += 8;
aoqi@0 784 ld(R31, offset, src); offset += 8;
aoqi@0 785
aoqi@0 786 // FP registers
aoqi@0 787 lfd(F14, offset, src); offset += 8;
aoqi@0 788 lfd(F15, offset, src); offset += 8;
aoqi@0 789 lfd(F16, offset, src); offset += 8;
aoqi@0 790 lfd(F17, offset, src); offset += 8;
aoqi@0 791 lfd(F18, offset, src); offset += 8;
aoqi@0 792 lfd(F19, offset, src); offset += 8;
aoqi@0 793 lfd(F20, offset, src); offset += 8;
aoqi@0 794 lfd(F21, offset, src); offset += 8;
aoqi@0 795 lfd(F22, offset, src); offset += 8;
aoqi@0 796 lfd(F23, offset, src); offset += 8;
aoqi@0 797 lfd(F24, offset, src); offset += 8;
aoqi@0 798 lfd(F25, offset, src); offset += 8;
aoqi@0 799 lfd(F26, offset, src); offset += 8;
aoqi@0 800 lfd(F27, offset, src); offset += 8;
aoqi@0 801 lfd(F28, offset, src); offset += 8;
aoqi@0 802 lfd(F29, offset, src); offset += 8;
aoqi@0 803 lfd(F30, offset, src); offset += 8;
aoqi@0 804 lfd(F31, offset, src);
aoqi@0 805 }
aoqi@0 806
aoqi@0 807 // For verify_oops.
aoqi@0 808 void MacroAssembler::save_volatile_gprs(Register dst, int offset) {
goetz@7424 809 std(R2, offset, dst); offset += 8;
aoqi@0 810 std(R3, offset, dst); offset += 8;
aoqi@0 811 std(R4, offset, dst); offset += 8;
aoqi@0 812 std(R5, offset, dst); offset += 8;
aoqi@0 813 std(R6, offset, dst); offset += 8;
aoqi@0 814 std(R7, offset, dst); offset += 8;
aoqi@0 815 std(R8, offset, dst); offset += 8;
aoqi@0 816 std(R9, offset, dst); offset += 8;
aoqi@0 817 std(R10, offset, dst); offset += 8;
aoqi@0 818 std(R11, offset, dst); offset += 8;
aoqi@0 819 std(R12, offset, dst);
aoqi@0 820 }
aoqi@0 821
aoqi@0 822 // For verify_oops.
aoqi@0 823 void MacroAssembler::restore_volatile_gprs(Register src, int offset) {
goetz@7424 824 ld(R2, offset, src); offset += 8;
aoqi@0 825 ld(R3, offset, src); offset += 8;
aoqi@0 826 ld(R4, offset, src); offset += 8;
aoqi@0 827 ld(R5, offset, src); offset += 8;
aoqi@0 828 ld(R6, offset, src); offset += 8;
aoqi@0 829 ld(R7, offset, src); offset += 8;
aoqi@0 830 ld(R8, offset, src); offset += 8;
aoqi@0 831 ld(R9, offset, src); offset += 8;
aoqi@0 832 ld(R10, offset, src); offset += 8;
aoqi@0 833 ld(R11, offset, src); offset += 8;
aoqi@0 834 ld(R12, offset, src);
aoqi@0 835 }
aoqi@0 836
aoqi@0 837 void MacroAssembler::save_LR_CR(Register tmp) {
aoqi@0 838 mfcr(tmp);
aoqi@0 839 std(tmp, _abi(cr), R1_SP);
aoqi@0 840 mflr(tmp);
aoqi@0 841 std(tmp, _abi(lr), R1_SP);
aoqi@0 842 // Tmp must contain lr on exit! (see return_addr and prolog in ppc64.ad)
aoqi@0 843 }
aoqi@0 844
aoqi@0 845 void MacroAssembler::restore_LR_CR(Register tmp) {
aoqi@0 846 assert(tmp != R1_SP, "must be distinct");
aoqi@0 847 ld(tmp, _abi(lr), R1_SP);
aoqi@0 848 mtlr(tmp);
aoqi@0 849 ld(tmp, _abi(cr), R1_SP);
aoqi@0 850 mtcr(tmp);
aoqi@0 851 }
aoqi@0 852
aoqi@0 853 address MacroAssembler::get_PC_trash_LR(Register result) {
aoqi@0 854 Label L;
aoqi@0 855 bl(L);
aoqi@0 856 bind(L);
aoqi@0 857 address lr_pc = pc();
aoqi@0 858 mflr(result);
aoqi@0 859 return lr_pc;
aoqi@0 860 }
aoqi@0 861
aoqi@0 862 void MacroAssembler::resize_frame(Register offset, Register tmp) {
aoqi@0 863 #ifdef ASSERT
aoqi@0 864 assert_different_registers(offset, tmp, R1_SP);
aoqi@0 865 andi_(tmp, offset, frame::alignment_in_bytes-1);
aoqi@0 866 asm_assert_eq("resize_frame: unaligned", 0x204);
aoqi@0 867 #endif
aoqi@0 868
aoqi@0 869 // tmp <- *(SP)
aoqi@0 870 ld(tmp, _abi(callers_sp), R1_SP);
aoqi@0 871 // addr <- SP + offset;
aoqi@0 872 // *(addr) <- tmp;
aoqi@0 873 // SP <- addr
aoqi@0 874 stdux(tmp, R1_SP, offset);
aoqi@0 875 }
aoqi@0 876
aoqi@0 877 void MacroAssembler::resize_frame(int offset, Register tmp) {
aoqi@0 878 assert(is_simm(offset, 16), "too big an offset");
aoqi@0 879 assert_different_registers(tmp, R1_SP);
aoqi@0 880 assert((offset & (frame::alignment_in_bytes-1))==0, "resize_frame: unaligned");
aoqi@0 881 // tmp <- *(SP)
aoqi@0 882 ld(tmp, _abi(callers_sp), R1_SP);
aoqi@0 883 // addr <- SP + offset;
aoqi@0 884 // *(addr) <- tmp;
aoqi@0 885 // SP <- addr
aoqi@0 886 stdu(tmp, offset, R1_SP);
aoqi@0 887 }
aoqi@0 888
aoqi@0 889 void MacroAssembler::resize_frame_absolute(Register addr, Register tmp1, Register tmp2) {
aoqi@0 890 // (addr == tmp1) || (addr == tmp2) is allowed here!
aoqi@0 891 assert(tmp1 != tmp2, "must be distinct");
aoqi@0 892
aoqi@0 893 // compute offset w.r.t. current stack pointer
aoqi@0 894 // tmp_1 <- addr - SP (!)
aoqi@0 895 subf(tmp1, R1_SP, addr);
aoqi@0 896
aoqi@0 897 // atomically update SP keeping back link.
aoqi@0 898 resize_frame(tmp1/* offset */, tmp2/* tmp */);
aoqi@0 899 }
aoqi@0 900
aoqi@0 901 void MacroAssembler::push_frame(Register bytes, Register tmp) {
aoqi@0 902 #ifdef ASSERT
aoqi@0 903 assert(bytes != R0, "r0 not allowed here");
aoqi@0 904 andi_(R0, bytes, frame::alignment_in_bytes-1);
aoqi@0 905 asm_assert_eq("push_frame(Reg, Reg): unaligned", 0x203);
aoqi@0 906 #endif
aoqi@0 907 neg(tmp, bytes);
aoqi@0 908 stdux(R1_SP, R1_SP, tmp);
aoqi@0 909 }
aoqi@0 910
aoqi@0 911 // Push a frame of size `bytes'.
aoqi@0 912 void MacroAssembler::push_frame(unsigned int bytes, Register tmp) {
aoqi@0 913 long offset = align_addr(bytes, frame::alignment_in_bytes);
aoqi@0 914 if (is_simm(-offset, 16)) {
aoqi@0 915 stdu(R1_SP, -offset, R1_SP);
aoqi@0 916 } else {
aoqi@0 917 load_const(tmp, -offset);
aoqi@0 918 stdux(R1_SP, R1_SP, tmp);
aoqi@0 919 }
aoqi@0 920 }
aoqi@0 921
aoqi@0 922 // Push a frame of size `bytes' plus abi_reg_args on top.
aoqi@0 923 void MacroAssembler::push_frame_reg_args(unsigned int bytes, Register tmp) {
aoqi@0 924 push_frame(bytes + frame::abi_reg_args_size, tmp);
aoqi@0 925 }
aoqi@0 926
aoqi@0 927 // Setup up a new C frame with a spill area for non-volatile GPRs and
aoqi@0 928 // additional space for local variables.
aoqi@0 929 void MacroAssembler::push_frame_reg_args_nonvolatiles(unsigned int bytes,
aoqi@0 930 Register tmp) {
aoqi@0 931 push_frame(bytes + frame::abi_reg_args_size + frame::spill_nonvolatiles_size, tmp);
aoqi@0 932 }
aoqi@0 933
aoqi@0 934 // Pop current C frame.
aoqi@0 935 void MacroAssembler::pop_frame() {
aoqi@0 936 ld(R1_SP, _abi(callers_sp), R1_SP);
aoqi@0 937 }
aoqi@0 938
aoqi@0 939 #if defined(ABI_ELFv2)
aoqi@0 940 address MacroAssembler::branch_to(Register r_function_entry, bool and_link) {
aoqi@0 941 // TODO(asmundak): make sure the caller uses R12 as function descriptor
aoqi@0 942 // most of the times.
aoqi@0 943 if (R12 != r_function_entry) {
aoqi@0 944 mr(R12, r_function_entry);
aoqi@0 945 }
aoqi@0 946 mtctr(R12);
aoqi@0 947 // Do a call or a branch.
aoqi@0 948 if (and_link) {
aoqi@0 949 bctrl();
aoqi@0 950 } else {
aoqi@0 951 bctr();
aoqi@0 952 }
aoqi@0 953 _last_calls_return_pc = pc();
aoqi@0 954
aoqi@0 955 return _last_calls_return_pc;
aoqi@0 956 }
aoqi@0 957
aoqi@0 958 // Call a C function via a function descriptor and use full C
aoqi@0 959 // calling conventions. Updates and returns _last_calls_return_pc.
aoqi@0 960 address MacroAssembler::call_c(Register r_function_entry) {
aoqi@0 961 return branch_to(r_function_entry, /*and_link=*/true);
aoqi@0 962 }
aoqi@0 963
aoqi@0 964 // For tail calls: only branch, don't link, so callee returns to caller of this function.
aoqi@0 965 address MacroAssembler::call_c_and_return_to_caller(Register r_function_entry) {
aoqi@0 966 return branch_to(r_function_entry, /*and_link=*/false);
aoqi@0 967 }
aoqi@0 968
aoqi@0 969 address MacroAssembler::call_c(address function_entry, relocInfo::relocType rt) {
aoqi@0 970 load_const(R12, function_entry, R0);
aoqi@0 971 return branch_to(R12, /*and_link=*/true);
aoqi@0 972 }
aoqi@0 973
aoqi@0 974 #else
aoqi@0 975 // Generic version of a call to C function via a function descriptor
aoqi@0 976 // with variable support for C calling conventions (TOC, ENV, etc.).
aoqi@0 977 // Updates and returns _last_calls_return_pc.
aoqi@0 978 address MacroAssembler::branch_to(Register function_descriptor, bool and_link, bool save_toc_before_call,
aoqi@0 979 bool restore_toc_after_call, bool load_toc_of_callee, bool load_env_of_callee) {
aoqi@0 980 // we emit standard ptrgl glue code here
aoqi@0 981 assert((function_descriptor != R0), "function_descriptor cannot be R0");
aoqi@0 982
aoqi@0 983 // retrieve necessary entries from the function descriptor
aoqi@0 984 ld(R0, in_bytes(FunctionDescriptor::entry_offset()), function_descriptor);
aoqi@0 985 mtctr(R0);
aoqi@0 986
aoqi@0 987 if (load_toc_of_callee) {
aoqi@0 988 ld(R2_TOC, in_bytes(FunctionDescriptor::toc_offset()), function_descriptor);
aoqi@0 989 }
aoqi@0 990 if (load_env_of_callee) {
aoqi@0 991 ld(R11, in_bytes(FunctionDescriptor::env_offset()), function_descriptor);
aoqi@0 992 } else if (load_toc_of_callee) {
aoqi@0 993 li(R11, 0);
aoqi@0 994 }
aoqi@0 995
aoqi@0 996 // do a call or a branch
aoqi@0 997 if (and_link) {
aoqi@0 998 bctrl();
aoqi@0 999 } else {
aoqi@0 1000 bctr();
aoqi@0 1001 }
aoqi@0 1002 _last_calls_return_pc = pc();
aoqi@0 1003
aoqi@0 1004 return _last_calls_return_pc;
aoqi@0 1005 }
aoqi@0 1006
aoqi@0 1007 // Call a C function via a function descriptor and use full C calling
aoqi@0 1008 // conventions.
aoqi@0 1009 // We don't use the TOC in generated code, so there is no need to save
aoqi@0 1010 // and restore its value.
aoqi@0 1011 address MacroAssembler::call_c(Register fd) {
aoqi@0 1012 return branch_to(fd, /*and_link=*/true,
aoqi@0 1013 /*save toc=*/false,
aoqi@0 1014 /*restore toc=*/false,
aoqi@0 1015 /*load toc=*/true,
aoqi@0 1016 /*load env=*/true);
aoqi@0 1017 }
aoqi@0 1018
aoqi@0 1019 address MacroAssembler::call_c_and_return_to_caller(Register fd) {
aoqi@0 1020 return branch_to(fd, /*and_link=*/false,
aoqi@0 1021 /*save toc=*/false,
aoqi@0 1022 /*restore toc=*/false,
aoqi@0 1023 /*load toc=*/true,
aoqi@0 1024 /*load env=*/true);
aoqi@0 1025 }
aoqi@0 1026
aoqi@0 1027 address MacroAssembler::call_c(const FunctionDescriptor* fd, relocInfo::relocType rt) {
aoqi@0 1028 if (rt != relocInfo::none) {
aoqi@0 1029 // this call needs to be relocatable
aoqi@0 1030 if (!ReoptimizeCallSequences
aoqi@0 1031 || (rt != relocInfo::runtime_call_type && rt != relocInfo::none)
aoqi@0 1032 || fd == NULL // support code-size estimation
aoqi@0 1033 || !fd->is_friend_function()
aoqi@0 1034 || fd->entry() == NULL) {
aoqi@0 1035 // it's not a friend function as defined by class FunctionDescriptor,
aoqi@0 1036 // so do a full call-c here.
aoqi@0 1037 load_const(R11, (address)fd, R0);
aoqi@0 1038
aoqi@0 1039 bool has_env = (fd != NULL && fd->env() != NULL);
aoqi@0 1040 return branch_to(R11, /*and_link=*/true,
aoqi@0 1041 /*save toc=*/false,
aoqi@0 1042 /*restore toc=*/false,
aoqi@0 1043 /*load toc=*/true,
aoqi@0 1044 /*load env=*/has_env);
aoqi@0 1045 } else {
aoqi@0 1046 // It's a friend function. Load the entry point and don't care about
aoqi@0 1047 // toc and env. Use an optimizable call instruction, but ensure the
aoqi@0 1048 // same code-size as in the case of a non-friend function.
aoqi@0 1049 nop();
aoqi@0 1050 nop();
aoqi@0 1051 nop();
aoqi@0 1052 bl64_patchable(fd->entry(), rt);
aoqi@0 1053 _last_calls_return_pc = pc();
aoqi@0 1054 return _last_calls_return_pc;
aoqi@0 1055 }
aoqi@0 1056 } else {
aoqi@0 1057 // This call does not need to be relocatable, do more aggressive
aoqi@0 1058 // optimizations.
aoqi@0 1059 if (!ReoptimizeCallSequences
aoqi@0 1060 || !fd->is_friend_function()) {
aoqi@0 1061 // It's not a friend function as defined by class FunctionDescriptor,
aoqi@0 1062 // so do a full call-c here.
aoqi@0 1063 load_const(R11, (address)fd, R0);
aoqi@0 1064 return branch_to(R11, /*and_link=*/true,
aoqi@0 1065 /*save toc=*/false,
aoqi@0 1066 /*restore toc=*/false,
aoqi@0 1067 /*load toc=*/true,
aoqi@0 1068 /*load env=*/true);
aoqi@0 1069 } else {
aoqi@0 1070 // it's a friend function, load the entry point and don't care about
aoqi@0 1071 // toc and env.
aoqi@0 1072 address dest = fd->entry();
aoqi@0 1073 if (is_within_range_of_b(dest, pc())) {
aoqi@0 1074 bl(dest);
aoqi@0 1075 } else {
aoqi@0 1076 bl64_patchable(dest, rt);
aoqi@0 1077 }
aoqi@0 1078 _last_calls_return_pc = pc();
aoqi@0 1079 return _last_calls_return_pc;
aoqi@0 1080 }
aoqi@0 1081 }
aoqi@0 1082 }
aoqi@0 1083
aoqi@0 1084 // Call a C function. All constants needed reside in TOC.
aoqi@0 1085 //
aoqi@0 1086 // Read the address to call from the TOC.
aoqi@0 1087 // Read env from TOC, if fd specifies an env.
aoqi@0 1088 // Read new TOC from TOC.
aoqi@0 1089 address MacroAssembler::call_c_using_toc(const FunctionDescriptor* fd,
aoqi@0 1090 relocInfo::relocType rt, Register toc) {
aoqi@0 1091 if (!ReoptimizeCallSequences
aoqi@0 1092 || (rt != relocInfo::runtime_call_type && rt != relocInfo::none)
aoqi@0 1093 || !fd->is_friend_function()) {
aoqi@0 1094 // It's not a friend function as defined by class FunctionDescriptor,
aoqi@0 1095 // so do a full call-c here.
aoqi@0 1096 assert(fd->entry() != NULL, "function must be linked");
aoqi@0 1097
aoqi@0 1098 AddressLiteral fd_entry(fd->entry());
aoqi@0 1099 load_const_from_method_toc(R11, fd_entry, toc);
aoqi@0 1100 mtctr(R11);
aoqi@0 1101 if (fd->env() == NULL) {
aoqi@0 1102 li(R11, 0);
aoqi@0 1103 nop();
aoqi@0 1104 } else {
aoqi@0 1105 AddressLiteral fd_env(fd->env());
aoqi@0 1106 load_const_from_method_toc(R11, fd_env, toc);
aoqi@0 1107 }
aoqi@0 1108 AddressLiteral fd_toc(fd->toc());
aoqi@0 1109 load_toc_from_toc(R2_TOC, fd_toc, toc);
aoqi@0 1110 // R2_TOC is killed.
aoqi@0 1111 bctrl();
aoqi@0 1112 _last_calls_return_pc = pc();
aoqi@0 1113 } else {
aoqi@0 1114 // It's a friend function, load the entry point and don't care about
aoqi@0 1115 // toc and env. Use an optimizable call instruction, but ensure the
aoqi@0 1116 // same code-size as in the case of a non-friend function.
aoqi@0 1117 nop();
aoqi@0 1118 bl64_patchable(fd->entry(), rt);
aoqi@0 1119 _last_calls_return_pc = pc();
aoqi@0 1120 }
aoqi@0 1121 return _last_calls_return_pc;
aoqi@0 1122 }
aoqi@0 1123 #endif // ABI_ELFv2
aoqi@0 1124
aoqi@0 1125 void MacroAssembler::call_VM_base(Register oop_result,
aoqi@0 1126 Register last_java_sp,
aoqi@0 1127 address entry_point,
aoqi@0 1128 bool check_exceptions) {
aoqi@0 1129 BLOCK_COMMENT("call_VM {");
aoqi@0 1130 // Determine last_java_sp register.
aoqi@0 1131 if (!last_java_sp->is_valid()) {
aoqi@0 1132 last_java_sp = R1_SP;
aoqi@0 1133 }
aoqi@0 1134 set_top_ijava_frame_at_SP_as_last_Java_frame(last_java_sp, R11_scratch1);
aoqi@0 1135
aoqi@0 1136 // ARG1 must hold thread address.
aoqi@0 1137 mr(R3_ARG1, R16_thread);
aoqi@0 1138 #if defined(ABI_ELFv2)
aoqi@0 1139 address return_pc = call_c(entry_point, relocInfo::none);
aoqi@0 1140 #else
aoqi@0 1141 address return_pc = call_c((FunctionDescriptor*)entry_point, relocInfo::none);
aoqi@0 1142 #endif
aoqi@0 1143
aoqi@0 1144 reset_last_Java_frame();
aoqi@0 1145
aoqi@0 1146 // Check for pending exceptions.
aoqi@0 1147 if (check_exceptions) {
aoqi@0 1148 // We don't check for exceptions here.
aoqi@0 1149 ShouldNotReachHere();
aoqi@0 1150 }
aoqi@0 1151
aoqi@0 1152 // Get oop result if there is one and reset the value in the thread.
aoqi@0 1153 if (oop_result->is_valid()) {
aoqi@0 1154 get_vm_result(oop_result);
aoqi@0 1155 }
aoqi@0 1156
aoqi@0 1157 _last_calls_return_pc = return_pc;
aoqi@0 1158 BLOCK_COMMENT("} call_VM");
aoqi@0 1159 }
aoqi@0 1160
aoqi@0 1161 void MacroAssembler::call_VM_leaf_base(address entry_point) {
aoqi@0 1162 BLOCK_COMMENT("call_VM_leaf {");
aoqi@0 1163 #if defined(ABI_ELFv2)
aoqi@0 1164 call_c(entry_point, relocInfo::none);
aoqi@0 1165 #else
aoqi@0 1166 call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, entry_point), relocInfo::none);
aoqi@0 1167 #endif
aoqi@0 1168 BLOCK_COMMENT("} call_VM_leaf");
aoqi@0 1169 }
aoqi@0 1170
aoqi@0 1171 void MacroAssembler::call_VM(Register oop_result, address entry_point, bool check_exceptions) {
aoqi@0 1172 call_VM_base(oop_result, noreg, entry_point, check_exceptions);
aoqi@0 1173 }
aoqi@0 1174
aoqi@0 1175 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1,
aoqi@0 1176 bool check_exceptions) {
aoqi@0 1177 // R3_ARG1 is reserved for the thread.
aoqi@0 1178 mr_if_needed(R4_ARG2, arg_1);
aoqi@0 1179 call_VM(oop_result, entry_point, check_exceptions);
aoqi@0 1180 }
aoqi@0 1181
aoqi@0 1182 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2,
aoqi@0 1183 bool check_exceptions) {
aoqi@0 1184 // R3_ARG1 is reserved for the thread
aoqi@0 1185 mr_if_needed(R4_ARG2, arg_1);
aoqi@0 1186 assert(arg_2 != R4_ARG2, "smashed argument");
aoqi@0 1187 mr_if_needed(R5_ARG3, arg_2);
aoqi@0 1188 call_VM(oop_result, entry_point, check_exceptions);
aoqi@0 1189 }
aoqi@0 1190
goetz@7424 1191 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg_3,
goetz@7424 1192 bool check_exceptions) {
goetz@7424 1193 // R3_ARG1 is reserved for the thread
goetz@7424 1194 mr_if_needed(R4_ARG2, arg_1);
goetz@7424 1195 assert(arg_2 != R4_ARG2, "smashed argument");
goetz@7424 1196 mr_if_needed(R5_ARG3, arg_2);
goetz@7424 1197 mr_if_needed(R6_ARG4, arg_3);
goetz@7424 1198 call_VM(oop_result, entry_point, check_exceptions);
goetz@7424 1199 }
goetz@7424 1200
aoqi@0 1201 void MacroAssembler::call_VM_leaf(address entry_point) {
aoqi@0 1202 call_VM_leaf_base(entry_point);
aoqi@0 1203 }
aoqi@0 1204
aoqi@0 1205 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1) {
aoqi@0 1206 mr_if_needed(R3_ARG1, arg_1);
aoqi@0 1207 call_VM_leaf(entry_point);
aoqi@0 1208 }
aoqi@0 1209
aoqi@0 1210 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1, Register arg_2) {
aoqi@0 1211 mr_if_needed(R3_ARG1, arg_1);
aoqi@0 1212 assert(arg_2 != R3_ARG1, "smashed argument");
aoqi@0 1213 mr_if_needed(R4_ARG2, arg_2);
aoqi@0 1214 call_VM_leaf(entry_point);
aoqi@0 1215 }
aoqi@0 1216
aoqi@0 1217 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3) {
aoqi@0 1218 mr_if_needed(R3_ARG1, arg_1);
aoqi@0 1219 assert(arg_2 != R3_ARG1, "smashed argument");
aoqi@0 1220 mr_if_needed(R4_ARG2, arg_2);
aoqi@0 1221 assert(arg_3 != R3_ARG1 && arg_3 != R4_ARG2, "smashed argument");
aoqi@0 1222 mr_if_needed(R5_ARG3, arg_3);
aoqi@0 1223 call_VM_leaf(entry_point);
aoqi@0 1224 }
aoqi@0 1225
aoqi@0 1226 // Check whether instruction is a read access to the polling page
aoqi@0 1227 // which was emitted by load_from_polling_page(..).
aoqi@0 1228 bool MacroAssembler::is_load_from_polling_page(int instruction, void* ucontext,
aoqi@0 1229 address* polling_address_ptr) {
aoqi@0 1230 if (!is_ld(instruction))
aoqi@0 1231 return false; // It's not a ld. Fail.
aoqi@0 1232
aoqi@0 1233 int rt = inv_rt_field(instruction);
aoqi@0 1234 int ra = inv_ra_field(instruction);
aoqi@0 1235 int ds = inv_ds_field(instruction);
aoqi@0 1236 if (!(ds == 0 && ra != 0 && rt == 0)) {
aoqi@0 1237 return false; // It's not a ld(r0, X, ra). Fail.
aoqi@0 1238 }
aoqi@0 1239
aoqi@0 1240 if (!ucontext) {
aoqi@0 1241 // Set polling address.
aoqi@0 1242 if (polling_address_ptr != NULL) {
aoqi@0 1243 *polling_address_ptr = NULL;
aoqi@0 1244 }
aoqi@0 1245 return true; // No ucontext given. Can't check value of ra. Assume true.
aoqi@0 1246 }
aoqi@0 1247
aoqi@0 1248 #ifdef LINUX
aoqi@0 1249 // Ucontext given. Check that register ra contains the address of
aoqi@0 1250 // the safepoing polling page.
aoqi@0 1251 ucontext_t* uc = (ucontext_t*) ucontext;
aoqi@0 1252 // Set polling address.
aoqi@0 1253 address addr = (address)uc->uc_mcontext.regs->gpr[ra] + (ssize_t)ds;
aoqi@0 1254 if (polling_address_ptr != NULL) {
aoqi@0 1255 *polling_address_ptr = addr;
aoqi@0 1256 }
aoqi@0 1257 return os::is_poll_address(addr);
aoqi@0 1258 #else
aoqi@0 1259 // Not on Linux, ucontext must be NULL.
aoqi@0 1260 ShouldNotReachHere();
aoqi@0 1261 return false;
aoqi@0 1262 #endif
aoqi@0 1263 }
aoqi@0 1264
aoqi@0 1265 bool MacroAssembler::is_memory_serialization(int instruction, JavaThread* thread, void* ucontext) {
aoqi@0 1266 #ifdef LINUX
aoqi@0 1267 ucontext_t* uc = (ucontext_t*) ucontext;
aoqi@0 1268
aoqi@0 1269 if (is_stwx(instruction) || is_stwux(instruction)) {
aoqi@0 1270 int ra = inv_ra_field(instruction);
aoqi@0 1271 int rb = inv_rb_field(instruction);
aoqi@0 1272
aoqi@0 1273 // look up content of ra and rb in ucontext
aoqi@0 1274 address ra_val=(address)uc->uc_mcontext.regs->gpr[ra];
aoqi@0 1275 long rb_val=(long)uc->uc_mcontext.regs->gpr[rb];
aoqi@0 1276 return os::is_memory_serialize_page(thread, ra_val+rb_val);
aoqi@0 1277 } else if (is_stw(instruction) || is_stwu(instruction)) {
aoqi@0 1278 int ra = inv_ra_field(instruction);
aoqi@0 1279 int d1 = inv_d1_field(instruction);
aoqi@0 1280
aoqi@0 1281 // look up content of ra in ucontext
aoqi@0 1282 address ra_val=(address)uc->uc_mcontext.regs->gpr[ra];
aoqi@0 1283 return os::is_memory_serialize_page(thread, ra_val+d1);
aoqi@0 1284 } else {
aoqi@0 1285 return false;
aoqi@0 1286 }
aoqi@0 1287 #else
aoqi@0 1288 // workaround not needed on !LINUX :-)
aoqi@0 1289 ShouldNotCallThis();
aoqi@0 1290 return false;
aoqi@0 1291 #endif
aoqi@0 1292 }
aoqi@0 1293
aoqi@0 1294 void MacroAssembler::bang_stack_with_offset(int offset) {
aoqi@0 1295 // When increasing the stack, the old stack pointer will be written
aoqi@0 1296 // to the new top of stack according to the PPC64 abi.
aoqi@0 1297 // Therefore, stack banging is not necessary when increasing
aoqi@0 1298 // the stack by <= os::vm_page_size() bytes.
aoqi@0 1299 // When increasing the stack by a larger amount, this method is
aoqi@0 1300 // called repeatedly to bang the intermediate pages.
aoqi@0 1301
aoqi@0 1302 // Stack grows down, caller passes positive offset.
aoqi@0 1303 assert(offset > 0, "must bang with positive offset");
aoqi@0 1304
aoqi@0 1305 long stdoffset = -offset;
aoqi@0 1306
aoqi@0 1307 if (is_simm(stdoffset, 16)) {
aoqi@0 1308 // Signed 16 bit offset, a simple std is ok.
aoqi@0 1309 if (UseLoadInstructionsForStackBangingPPC64) {
aoqi@0 1310 ld(R0, (int)(signed short)stdoffset, R1_SP);
aoqi@0 1311 } else {
aoqi@0 1312 std(R0,(int)(signed short)stdoffset, R1_SP);
aoqi@0 1313 }
aoqi@0 1314 } else if (is_simm(stdoffset, 31)) {
aoqi@0 1315 const int hi = MacroAssembler::largeoffset_si16_si16_hi(stdoffset);
aoqi@0 1316 const int lo = MacroAssembler::largeoffset_si16_si16_lo(stdoffset);
aoqi@0 1317
aoqi@0 1318 Register tmp = R11;
aoqi@0 1319 addis(tmp, R1_SP, hi);
aoqi@0 1320 if (UseLoadInstructionsForStackBangingPPC64) {
aoqi@0 1321 ld(R0, lo, tmp);
aoqi@0 1322 } else {
aoqi@0 1323 std(R0, lo, tmp);
aoqi@0 1324 }
aoqi@0 1325 } else {
aoqi@0 1326 ShouldNotReachHere();
aoqi@0 1327 }
aoqi@0 1328 }
aoqi@0 1329
aoqi@0 1330 // If instruction is a stack bang of the form
aoqi@0 1331 // std R0, x(Ry), (see bang_stack_with_offset())
aoqi@0 1332 // stdu R1_SP, x(R1_SP), (see push_frame(), resize_frame())
aoqi@0 1333 // or stdux R1_SP, Rx, R1_SP (see push_frame(), resize_frame())
aoqi@0 1334 // return the banged address. Otherwise, return 0.
aoqi@0 1335 address MacroAssembler::get_stack_bang_address(int instruction, void *ucontext) {
aoqi@0 1336 #ifdef LINUX
aoqi@0 1337 ucontext_t* uc = (ucontext_t*) ucontext;
aoqi@0 1338 int rs = inv_rs_field(instruction);
aoqi@0 1339 int ra = inv_ra_field(instruction);
aoqi@0 1340 if ( (is_ld(instruction) && rs == 0 && UseLoadInstructionsForStackBangingPPC64)
aoqi@0 1341 || (is_std(instruction) && rs == 0 && !UseLoadInstructionsForStackBangingPPC64)
aoqi@0 1342 || (is_stdu(instruction) && rs == 1)) {
aoqi@0 1343 int ds = inv_ds_field(instruction);
aoqi@0 1344 // return banged address
aoqi@0 1345 return ds+(address)uc->uc_mcontext.regs->gpr[ra];
aoqi@0 1346 } else if (is_stdux(instruction) && rs == 1) {
aoqi@0 1347 int rb = inv_rb_field(instruction);
aoqi@0 1348 address sp = (address)uc->uc_mcontext.regs->gpr[1];
aoqi@0 1349 long rb_val = (long)uc->uc_mcontext.regs->gpr[rb];
aoqi@0 1350 return ra != 1 || rb_val >= 0 ? NULL // not a stack bang
aoqi@0 1351 : sp + rb_val; // banged address
aoqi@0 1352 }
aoqi@0 1353 return NULL; // not a stack bang
aoqi@0 1354 #else
aoqi@0 1355 // workaround not needed on !LINUX :-)
aoqi@0 1356 ShouldNotCallThis();
aoqi@0 1357 return NULL;
aoqi@0 1358 #endif
aoqi@0 1359 }
aoqi@0 1360
aoqi@0 1361 // CmpxchgX sets condition register to cmpX(current, compare).
aoqi@0 1362 void MacroAssembler::cmpxchgw(ConditionRegister flag, Register dest_current_value,
aoqi@0 1363 Register compare_value, Register exchange_value,
aoqi@0 1364 Register addr_base, int semantics, bool cmpxchgx_hint,
aoqi@0 1365 Register int_flag_success, bool contention_hint) {
aoqi@0 1366 Label retry;
aoqi@0 1367 Label failed;
aoqi@0 1368 Label done;
aoqi@0 1369
aoqi@0 1370 // Save one branch if result is returned via register and
aoqi@0 1371 // result register is different from the other ones.
aoqi@0 1372 bool use_result_reg = (int_flag_success != noreg);
aoqi@0 1373 bool preset_result_reg = (int_flag_success != dest_current_value && int_flag_success != compare_value &&
aoqi@0 1374 int_flag_success != exchange_value && int_flag_success != addr_base);
aoqi@0 1375
aoqi@0 1376 // release/fence semantics
aoqi@0 1377 if (semantics & MemBarRel) {
aoqi@0 1378 release();
aoqi@0 1379 }
aoqi@0 1380
aoqi@0 1381 if (use_result_reg && preset_result_reg) {
aoqi@0 1382 li(int_flag_success, 0); // preset (assume cas failed)
aoqi@0 1383 }
aoqi@0 1384
aoqi@0 1385 // Add simple guard in order to reduce risk of starving under high contention (recommended by IBM).
aoqi@0 1386 if (contention_hint) { // Don't try to reserve if cmp fails.
aoqi@0 1387 lwz(dest_current_value, 0, addr_base);
aoqi@0 1388 cmpw(flag, dest_current_value, compare_value);
aoqi@0 1389 bne(flag, failed);
aoqi@0 1390 }
aoqi@0 1391
aoqi@0 1392 // atomic emulation loop
aoqi@0 1393 bind(retry);
aoqi@0 1394
aoqi@0 1395 lwarx(dest_current_value, addr_base, cmpxchgx_hint);
aoqi@0 1396 cmpw(flag, dest_current_value, compare_value);
aoqi@0 1397 if (UseStaticBranchPredictionInCompareAndSwapPPC64) {
aoqi@0 1398 bne_predict_not_taken(flag, failed);
aoqi@0 1399 } else {
aoqi@0 1400 bne( flag, failed);
aoqi@0 1401 }
aoqi@0 1402 // branch to done => (flag == ne), (dest_current_value != compare_value)
aoqi@0 1403 // fall through => (flag == eq), (dest_current_value == compare_value)
aoqi@0 1404
aoqi@0 1405 stwcx_(exchange_value, addr_base);
aoqi@0 1406 if (UseStaticBranchPredictionInCompareAndSwapPPC64) {
aoqi@0 1407 bne_predict_not_taken(CCR0, retry); // StXcx_ sets CCR0.
aoqi@0 1408 } else {
aoqi@0 1409 bne( CCR0, retry); // StXcx_ sets CCR0.
aoqi@0 1410 }
aoqi@0 1411 // fall through => (flag == eq), (dest_current_value == compare_value), (swapped)
aoqi@0 1412
aoqi@0 1413 // Result in register (must do this at the end because int_flag_success can be the
aoqi@0 1414 // same register as one above).
aoqi@0 1415 if (use_result_reg) {
aoqi@0 1416 li(int_flag_success, 1);
aoqi@0 1417 }
aoqi@0 1418
aoqi@0 1419 if (semantics & MemBarFenceAfter) {
aoqi@0 1420 fence();
aoqi@0 1421 } else if (semantics & MemBarAcq) {
aoqi@0 1422 isync();
aoqi@0 1423 }
aoqi@0 1424
aoqi@0 1425 if (use_result_reg && !preset_result_reg) {
aoqi@0 1426 b(done);
aoqi@0 1427 }
aoqi@0 1428
aoqi@0 1429 bind(failed);
aoqi@0 1430 if (use_result_reg && !preset_result_reg) {
aoqi@0 1431 li(int_flag_success, 0);
aoqi@0 1432 }
aoqi@0 1433
aoqi@0 1434 bind(done);
aoqi@0 1435 // (flag == ne) => (dest_current_value != compare_value), (!swapped)
aoqi@0 1436 // (flag == eq) => (dest_current_value == compare_value), ( swapped)
aoqi@0 1437 }
aoqi@0 1438
aoqi@0 1439 // Preforms atomic compare exchange:
aoqi@0 1440 // if (compare_value == *addr_base)
aoqi@0 1441 // *addr_base = exchange_value
aoqi@0 1442 // int_flag_success = 1;
aoqi@0 1443 // else
aoqi@0 1444 // int_flag_success = 0;
aoqi@0 1445 //
aoqi@0 1446 // ConditionRegister flag = cmp(compare_value, *addr_base)
aoqi@0 1447 // Register dest_current_value = *addr_base
aoqi@0 1448 // Register compare_value Used to compare with value in memory
aoqi@0 1449 // Register exchange_value Written to memory if compare_value == *addr_base
aoqi@0 1450 // Register addr_base The memory location to compareXChange
aoqi@0 1451 // Register int_flag_success Set to 1 if exchange_value was written to *addr_base
aoqi@0 1452 //
aoqi@0 1453 // To avoid the costly compare exchange the value is tested beforehand.
aoqi@0 1454 // Several special cases exist to avoid that unnecessary information is generated.
aoqi@0 1455 //
aoqi@0 1456 void MacroAssembler::cmpxchgd(ConditionRegister flag,
aoqi@0 1457 Register dest_current_value, Register compare_value, Register exchange_value,
aoqi@0 1458 Register addr_base, int semantics, bool cmpxchgx_hint,
aoqi@0 1459 Register int_flag_success, Label* failed_ext, bool contention_hint) {
aoqi@0 1460 Label retry;
aoqi@0 1461 Label failed_int;
aoqi@0 1462 Label& failed = (failed_ext != NULL) ? *failed_ext : failed_int;
aoqi@0 1463 Label done;
aoqi@0 1464
aoqi@0 1465 // Save one branch if result is returned via register and result register is different from the other ones.
aoqi@0 1466 bool use_result_reg = (int_flag_success!=noreg);
aoqi@0 1467 bool preset_result_reg = (int_flag_success!=dest_current_value && int_flag_success!=compare_value &&
aoqi@0 1468 int_flag_success!=exchange_value && int_flag_success!=addr_base);
aoqi@0 1469 assert(int_flag_success == noreg || failed_ext == NULL, "cannot have both");
aoqi@0 1470
aoqi@0 1471 // release/fence semantics
aoqi@0 1472 if (semantics & MemBarRel) {
aoqi@0 1473 release();
aoqi@0 1474 }
aoqi@0 1475
aoqi@0 1476 if (use_result_reg && preset_result_reg) {
aoqi@0 1477 li(int_flag_success, 0); // preset (assume cas failed)
aoqi@0 1478 }
aoqi@0 1479
aoqi@0 1480 // Add simple guard in order to reduce risk of starving under high contention (recommended by IBM).
aoqi@0 1481 if (contention_hint) { // Don't try to reserve if cmp fails.
aoqi@0 1482 ld(dest_current_value, 0, addr_base);
aoqi@0 1483 cmpd(flag, dest_current_value, compare_value);
aoqi@0 1484 bne(flag, failed);
aoqi@0 1485 }
aoqi@0 1486
aoqi@0 1487 // atomic emulation loop
aoqi@0 1488 bind(retry);
aoqi@0 1489
aoqi@0 1490 ldarx(dest_current_value, addr_base, cmpxchgx_hint);
aoqi@0 1491 cmpd(flag, dest_current_value, compare_value);
aoqi@0 1492 if (UseStaticBranchPredictionInCompareAndSwapPPC64) {
aoqi@0 1493 bne_predict_not_taken(flag, failed);
aoqi@0 1494 } else {
aoqi@0 1495 bne( flag, failed);
aoqi@0 1496 }
aoqi@0 1497
aoqi@0 1498 stdcx_(exchange_value, addr_base);
aoqi@0 1499 if (UseStaticBranchPredictionInCompareAndSwapPPC64) {
aoqi@0 1500 bne_predict_not_taken(CCR0, retry); // stXcx_ sets CCR0
aoqi@0 1501 } else {
aoqi@0 1502 bne( CCR0, retry); // stXcx_ sets CCR0
aoqi@0 1503 }
aoqi@0 1504
aoqi@0 1505 // result in register (must do this at the end because int_flag_success can be the same register as one above)
aoqi@0 1506 if (use_result_reg) {
aoqi@0 1507 li(int_flag_success, 1);
aoqi@0 1508 }
aoqi@0 1509
aoqi@0 1510 // POWER6 doesn't need isync in CAS.
aoqi@0 1511 // Always emit isync to be on the safe side.
aoqi@0 1512 if (semantics & MemBarFenceAfter) {
aoqi@0 1513 fence();
aoqi@0 1514 } else if (semantics & MemBarAcq) {
aoqi@0 1515 isync();
aoqi@0 1516 }
aoqi@0 1517
aoqi@0 1518 if (use_result_reg && !preset_result_reg) {
aoqi@0 1519 b(done);
aoqi@0 1520 }
aoqi@0 1521
aoqi@0 1522 bind(failed_int);
aoqi@0 1523 if (use_result_reg && !preset_result_reg) {
aoqi@0 1524 li(int_flag_success, 0);
aoqi@0 1525 }
aoqi@0 1526
aoqi@0 1527 bind(done);
aoqi@0 1528 // (flag == ne) => (dest_current_value != compare_value), (!swapped)
aoqi@0 1529 // (flag == eq) => (dest_current_value == compare_value), ( swapped)
aoqi@0 1530 }
aoqi@0 1531
aoqi@0 1532 // Look up the method for a megamorphic invokeinterface call.
aoqi@0 1533 // The target method is determined by <intf_klass, itable_index>.
aoqi@0 1534 // The receiver klass is in recv_klass.
aoqi@0 1535 // On success, the result will be in method_result, and execution falls through.
aoqi@0 1536 // On failure, execution transfers to the given label.
aoqi@0 1537 void MacroAssembler::lookup_interface_method(Register recv_klass,
aoqi@0 1538 Register intf_klass,
aoqi@0 1539 RegisterOrConstant itable_index,
aoqi@0 1540 Register method_result,
aoqi@0 1541 Register scan_temp,
aoqi@0 1542 Register sethi_temp,
aoqi@0 1543 Label& L_no_such_interface) {
aoqi@0 1544 assert_different_registers(recv_klass, intf_klass, method_result, scan_temp);
aoqi@0 1545 assert(itable_index.is_constant() || itable_index.as_register() == method_result,
aoqi@0 1546 "caller must use same register for non-constant itable index as for method");
aoqi@0 1547
aoqi@0 1548 // Compute start of first itableOffsetEntry (which is at the end of the vtable).
aoqi@0 1549 int vtable_base = InstanceKlass::vtable_start_offset() * wordSize;
aoqi@0 1550 int itentry_off = itableMethodEntry::method_offset_in_bytes();
aoqi@0 1551 int logMEsize = exact_log2(itableMethodEntry::size() * wordSize);
aoqi@0 1552 int scan_step = itableOffsetEntry::size() * wordSize;
aoqi@0 1553 int log_vte_size= exact_log2(vtableEntry::size() * wordSize);
aoqi@0 1554
aoqi@0 1555 lwz(scan_temp, InstanceKlass::vtable_length_offset() * wordSize, recv_klass);
aoqi@0 1556 // %%% We should store the aligned, prescaled offset in the klassoop.
aoqi@0 1557 // Then the next several instructions would fold away.
aoqi@0 1558
aoqi@0 1559 sldi(scan_temp, scan_temp, log_vte_size);
aoqi@0 1560 addi(scan_temp, scan_temp, vtable_base);
aoqi@0 1561 add(scan_temp, recv_klass, scan_temp);
aoqi@0 1562
aoqi@0 1563 // Adjust recv_klass by scaled itable_index, so we can free itable_index.
aoqi@0 1564 if (itable_index.is_register()) {
aoqi@0 1565 Register itable_offset = itable_index.as_register();
aoqi@0 1566 sldi(itable_offset, itable_offset, logMEsize);
aoqi@0 1567 if (itentry_off) addi(itable_offset, itable_offset, itentry_off);
aoqi@0 1568 add(recv_klass, itable_offset, recv_klass);
aoqi@0 1569 } else {
aoqi@0 1570 long itable_offset = (long)itable_index.as_constant();
aoqi@0 1571 load_const_optimized(sethi_temp, (itable_offset<<logMEsize)+itentry_off); // static address, no relocation
aoqi@0 1572 add(recv_klass, sethi_temp, recv_klass);
aoqi@0 1573 }
aoqi@0 1574
aoqi@0 1575 // for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) {
aoqi@0 1576 // if (scan->interface() == intf) {
aoqi@0 1577 // result = (klass + scan->offset() + itable_index);
aoqi@0 1578 // }
aoqi@0 1579 // }
aoqi@0 1580 Label search, found_method;
aoqi@0 1581
aoqi@0 1582 for (int peel = 1; peel >= 0; peel--) {
aoqi@0 1583 // %%%% Could load both offset and interface in one ldx, if they were
aoqi@0 1584 // in the opposite order. This would save a load.
aoqi@0 1585 ld(method_result, itableOffsetEntry::interface_offset_in_bytes(), scan_temp);
aoqi@0 1586
aoqi@0 1587 // Check that this entry is non-null. A null entry means that
aoqi@0 1588 // the receiver class doesn't implement the interface, and wasn't the
aoqi@0 1589 // same as when the caller was compiled.
aoqi@0 1590 cmpd(CCR0, method_result, intf_klass);
aoqi@0 1591
aoqi@0 1592 if (peel) {
aoqi@0 1593 beq(CCR0, found_method);
aoqi@0 1594 } else {
aoqi@0 1595 bne(CCR0, search);
aoqi@0 1596 // (invert the test to fall through to found_method...)
aoqi@0 1597 }
aoqi@0 1598
aoqi@0 1599 if (!peel) break;
aoqi@0 1600
aoqi@0 1601 bind(search);
aoqi@0 1602
aoqi@0 1603 cmpdi(CCR0, method_result, 0);
aoqi@0 1604 beq(CCR0, L_no_such_interface);
aoqi@0 1605 addi(scan_temp, scan_temp, scan_step);
aoqi@0 1606 }
aoqi@0 1607
aoqi@0 1608 bind(found_method);
aoqi@0 1609
aoqi@0 1610 // Got a hit.
aoqi@0 1611 int ito_offset = itableOffsetEntry::offset_offset_in_bytes();
aoqi@0 1612 lwz(scan_temp, ito_offset, scan_temp);
aoqi@0 1613 ldx(method_result, scan_temp, recv_klass);
aoqi@0 1614 }
aoqi@0 1615
aoqi@0 1616 // virtual method calling
aoqi@0 1617 void MacroAssembler::lookup_virtual_method(Register recv_klass,
aoqi@0 1618 RegisterOrConstant vtable_index,
aoqi@0 1619 Register method_result) {
aoqi@0 1620
aoqi@0 1621 assert_different_registers(recv_klass, method_result, vtable_index.register_or_noreg());
aoqi@0 1622
aoqi@0 1623 const int base = InstanceKlass::vtable_start_offset() * wordSize;
aoqi@0 1624 assert(vtableEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
aoqi@0 1625
aoqi@0 1626 if (vtable_index.is_register()) {
aoqi@0 1627 sldi(vtable_index.as_register(), vtable_index.as_register(), LogBytesPerWord);
aoqi@0 1628 add(recv_klass, vtable_index.as_register(), recv_klass);
aoqi@0 1629 } else {
aoqi@0 1630 addi(recv_klass, recv_klass, vtable_index.as_constant() << LogBytesPerWord);
aoqi@0 1631 }
aoqi@0 1632 ld(R19_method, base + vtableEntry::method_offset_in_bytes(), recv_klass);
aoqi@0 1633 }
aoqi@0 1634
aoqi@0 1635 /////////////////////////////////////////// subtype checking ////////////////////////////////////////////
aoqi@0 1636
aoqi@0 1637 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass,
aoqi@0 1638 Register super_klass,
aoqi@0 1639 Register temp1_reg,
aoqi@0 1640 Register temp2_reg,
aoqi@0 1641 Label& L_success,
aoqi@0 1642 Label& L_failure) {
aoqi@0 1643
aoqi@0 1644 const Register check_cache_offset = temp1_reg;
aoqi@0 1645 const Register cached_super = temp2_reg;
aoqi@0 1646
aoqi@0 1647 assert_different_registers(sub_klass, super_klass, check_cache_offset, cached_super);
aoqi@0 1648
aoqi@0 1649 int sco_offset = in_bytes(Klass::super_check_offset_offset());
aoqi@0 1650 int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
aoqi@0 1651
aoqi@0 1652 // If the pointers are equal, we are done (e.g., String[] elements).
aoqi@0 1653 // This self-check enables sharing of secondary supertype arrays among
aoqi@0 1654 // non-primary types such as array-of-interface. Otherwise, each such
aoqi@0 1655 // type would need its own customized SSA.
aoqi@0 1656 // We move this check to the front of the fast path because many
aoqi@0 1657 // type checks are in fact trivially successful in this manner,
aoqi@0 1658 // so we get a nicely predicted branch right at the start of the check.
aoqi@0 1659 cmpd(CCR0, sub_klass, super_klass);
aoqi@0 1660 beq(CCR0, L_success);
aoqi@0 1661
aoqi@0 1662 // Check the supertype display:
aoqi@0 1663 lwz(check_cache_offset, sco_offset, super_klass);
aoqi@0 1664 // The loaded value is the offset from KlassOopDesc.
aoqi@0 1665
aoqi@0 1666 ldx(cached_super, check_cache_offset, sub_klass);
aoqi@0 1667 cmpd(CCR0, cached_super, super_klass);
aoqi@0 1668 beq(CCR0, L_success);
aoqi@0 1669
aoqi@0 1670 // This check has worked decisively for primary supers.
aoqi@0 1671 // Secondary supers are sought in the super_cache ('super_cache_addr').
aoqi@0 1672 // (Secondary supers are interfaces and very deeply nested subtypes.)
aoqi@0 1673 // This works in the same check above because of a tricky aliasing
aoqi@0 1674 // between the super_cache and the primary super display elements.
aoqi@0 1675 // (The 'super_check_addr' can address either, as the case requires.)
aoqi@0 1676 // Note that the cache is updated below if it does not help us find
aoqi@0 1677 // what we need immediately.
aoqi@0 1678 // So if it was a primary super, we can just fail immediately.
aoqi@0 1679 // Otherwise, it's the slow path for us (no success at this point).
aoqi@0 1680
aoqi@0 1681 cmpwi(CCR0, check_cache_offset, sc_offset);
aoqi@0 1682 bne(CCR0, L_failure);
aoqi@0 1683 // bind(slow_path); // fallthru
aoqi@0 1684 }
aoqi@0 1685
aoqi@0 1686 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
aoqi@0 1687 Register super_klass,
aoqi@0 1688 Register temp1_reg,
aoqi@0 1689 Register temp2_reg,
aoqi@0 1690 Label* L_success,
aoqi@0 1691 Register result_reg) {
aoqi@0 1692 const Register array_ptr = temp1_reg; // current value from cache array
aoqi@0 1693 const Register temp = temp2_reg;
aoqi@0 1694
aoqi@0 1695 assert_different_registers(sub_klass, super_klass, array_ptr, temp);
aoqi@0 1696
aoqi@0 1697 int source_offset = in_bytes(Klass::secondary_supers_offset());
aoqi@0 1698 int target_offset = in_bytes(Klass::secondary_super_cache_offset());
aoqi@0 1699
aoqi@0 1700 int length_offset = Array<Klass*>::length_offset_in_bytes();
aoqi@0 1701 int base_offset = Array<Klass*>::base_offset_in_bytes();
aoqi@0 1702
aoqi@0 1703 Label hit, loop, failure, fallthru;
aoqi@0 1704
aoqi@0 1705 ld(array_ptr, source_offset, sub_klass);
aoqi@0 1706
aoqi@0 1707 //assert(4 == arrayOopDesc::length_length_in_bytes(), "precondition violated.");
aoqi@0 1708 lwz(temp, length_offset, array_ptr);
aoqi@0 1709 cmpwi(CCR0, temp, 0);
aoqi@0 1710 beq(CCR0, result_reg!=noreg ? failure : fallthru); // length 0
aoqi@0 1711
aoqi@0 1712 mtctr(temp); // load ctr
aoqi@0 1713
aoqi@0 1714 bind(loop);
aoqi@0 1715 // Oops in table are NO MORE compressed.
aoqi@0 1716 ld(temp, base_offset, array_ptr);
aoqi@0 1717 cmpd(CCR0, temp, super_klass);
aoqi@0 1718 beq(CCR0, hit);
aoqi@0 1719 addi(array_ptr, array_ptr, BytesPerWord);
aoqi@0 1720 bdnz(loop);
aoqi@0 1721
aoqi@0 1722 bind(failure);
aoqi@0 1723 if (result_reg!=noreg) li(result_reg, 1); // load non-zero result (indicates a miss)
aoqi@0 1724 b(fallthru);
aoqi@0 1725
aoqi@0 1726 bind(hit);
aoqi@0 1727 std(super_klass, target_offset, sub_klass); // save result to cache
aoqi@0 1728 if (result_reg != noreg) li(result_reg, 0); // load zero result (indicates a hit)
aoqi@0 1729 if (L_success != NULL) b(*L_success);
aoqi@0 1730
aoqi@0 1731 bind(fallthru);
aoqi@0 1732 }
aoqi@0 1733
aoqi@0 1734 // Try fast path, then go to slow one if not successful
aoqi@0 1735 void MacroAssembler::check_klass_subtype(Register sub_klass,
aoqi@0 1736 Register super_klass,
aoqi@0 1737 Register temp1_reg,
aoqi@0 1738 Register temp2_reg,
aoqi@0 1739 Label& L_success) {
aoqi@0 1740 Label L_failure;
aoqi@0 1741 check_klass_subtype_fast_path(sub_klass, super_klass, temp1_reg, temp2_reg, L_success, L_failure);
aoqi@0 1742 check_klass_subtype_slow_path(sub_klass, super_klass, temp1_reg, temp2_reg, &L_success);
aoqi@0 1743 bind(L_failure); // Fallthru if not successful.
aoqi@0 1744 }
aoqi@0 1745
aoqi@0 1746 void MacroAssembler::check_method_handle_type(Register mtype_reg, Register mh_reg,
aoqi@0 1747 Register temp_reg,
aoqi@0 1748 Label& wrong_method_type) {
aoqi@0 1749 assert_different_registers(mtype_reg, mh_reg, temp_reg);
aoqi@0 1750 // Compare method type against that of the receiver.
aoqi@0 1751 load_heap_oop_not_null(temp_reg, delayed_value(java_lang_invoke_MethodHandle::type_offset_in_bytes, temp_reg), mh_reg);
aoqi@0 1752 cmpd(CCR0, temp_reg, mtype_reg);
aoqi@0 1753 bne(CCR0, wrong_method_type);
aoqi@0 1754 }
aoqi@0 1755
aoqi@0 1756 RegisterOrConstant MacroAssembler::argument_offset(RegisterOrConstant arg_slot,
aoqi@0 1757 Register temp_reg,
aoqi@0 1758 int extra_slot_offset) {
aoqi@0 1759 // cf. TemplateTable::prepare_invoke(), if (load_receiver).
aoqi@0 1760 int stackElementSize = Interpreter::stackElementSize;
aoqi@0 1761 int offset = extra_slot_offset * stackElementSize;
aoqi@0 1762 if (arg_slot.is_constant()) {
aoqi@0 1763 offset += arg_slot.as_constant() * stackElementSize;
aoqi@0 1764 return offset;
aoqi@0 1765 } else {
aoqi@0 1766 assert(temp_reg != noreg, "must specify");
aoqi@0 1767 sldi(temp_reg, arg_slot.as_register(), exact_log2(stackElementSize));
aoqi@0 1768 if (offset != 0)
aoqi@0 1769 addi(temp_reg, temp_reg, offset);
aoqi@0 1770 return temp_reg;
aoqi@0 1771 }
aoqi@0 1772 }
aoqi@0 1773
aoqi@0 1774 void MacroAssembler::biased_locking_enter(ConditionRegister cr_reg, Register obj_reg,
aoqi@0 1775 Register mark_reg, Register temp_reg,
aoqi@0 1776 Register temp2_reg, Label& done, Label* slow_case) {
aoqi@0 1777 assert(UseBiasedLocking, "why call this otherwise?");
aoqi@0 1778
aoqi@0 1779 #ifdef ASSERT
aoqi@0 1780 assert_different_registers(obj_reg, mark_reg, temp_reg, temp2_reg);
aoqi@0 1781 #endif
aoqi@0 1782
aoqi@0 1783 Label cas_label;
aoqi@0 1784
aoqi@0 1785 // Branch to done if fast path fails and no slow_case provided.
aoqi@0 1786 Label *slow_case_int = (slow_case != NULL) ? slow_case : &done;
aoqi@0 1787
aoqi@0 1788 // Biased locking
aoqi@0 1789 // See whether the lock is currently biased toward our thread and
aoqi@0 1790 // whether the epoch is still valid
aoqi@0 1791 // Note that the runtime guarantees sufficient alignment of JavaThread
aoqi@0 1792 // pointers to allow age to be placed into low bits
aoqi@0 1793 assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits,
aoqi@0 1794 "biased locking makes assumptions about bit layout");
aoqi@0 1795
aoqi@0 1796 if (PrintBiasedLockingStatistics) {
aoqi@0 1797 load_const(temp_reg, (address) BiasedLocking::total_entry_count_addr(), temp2_reg);
aoqi@0 1798 lwz(temp2_reg, 0, temp_reg);
aoqi@0 1799 addi(temp2_reg, temp2_reg, 1);
aoqi@0 1800 stw(temp2_reg, 0, temp_reg);
aoqi@0 1801 }
aoqi@0 1802
aoqi@0 1803 andi(temp_reg, mark_reg, markOopDesc::biased_lock_mask_in_place);
aoqi@0 1804 cmpwi(cr_reg, temp_reg, markOopDesc::biased_lock_pattern);
aoqi@0 1805 bne(cr_reg, cas_label);
aoqi@0 1806
aoqi@0 1807 load_klass(temp_reg, obj_reg);
aoqi@0 1808
aoqi@0 1809 load_const_optimized(temp2_reg, ~((int) markOopDesc::age_mask_in_place));
aoqi@0 1810 ld(temp_reg, in_bytes(Klass::prototype_header_offset()), temp_reg);
aoqi@0 1811 orr(temp_reg, R16_thread, temp_reg);
aoqi@0 1812 xorr(temp_reg, mark_reg, temp_reg);
aoqi@0 1813 andr(temp_reg, temp_reg, temp2_reg);
aoqi@0 1814 cmpdi(cr_reg, temp_reg, 0);
aoqi@0 1815 if (PrintBiasedLockingStatistics) {
aoqi@0 1816 Label l;
aoqi@0 1817 bne(cr_reg, l);
aoqi@0 1818 load_const(mark_reg, (address) BiasedLocking::biased_lock_entry_count_addr());
aoqi@0 1819 lwz(temp2_reg, 0, mark_reg);
aoqi@0 1820 addi(temp2_reg, temp2_reg, 1);
aoqi@0 1821 stw(temp2_reg, 0, mark_reg);
aoqi@0 1822 // restore mark_reg
aoqi@0 1823 ld(mark_reg, oopDesc::mark_offset_in_bytes(), obj_reg);
aoqi@0 1824 bind(l);
aoqi@0 1825 }
aoqi@0 1826 beq(cr_reg, done);
aoqi@0 1827
aoqi@0 1828 Label try_revoke_bias;
aoqi@0 1829 Label try_rebias;
aoqi@0 1830
aoqi@0 1831 // At this point we know that the header has the bias pattern and
aoqi@0 1832 // that we are not the bias owner in the current epoch. We need to
aoqi@0 1833 // figure out more details about the state of the header in order to
aoqi@0 1834 // know what operations can be legally performed on the object's
aoqi@0 1835 // header.
aoqi@0 1836
aoqi@0 1837 // If the low three bits in the xor result aren't clear, that means
aoqi@0 1838 // the prototype header is no longer biased and we have to revoke
aoqi@0 1839 // the bias on this object.
aoqi@0 1840 andi(temp2_reg, temp_reg, markOopDesc::biased_lock_mask_in_place);
aoqi@0 1841 cmpwi(cr_reg, temp2_reg, 0);
aoqi@0 1842 bne(cr_reg, try_revoke_bias);
aoqi@0 1843
aoqi@0 1844 // Biasing is still enabled for this data type. See whether the
aoqi@0 1845 // epoch of the current bias is still valid, meaning that the epoch
aoqi@0 1846 // bits of the mark word are equal to the epoch bits of the
aoqi@0 1847 // prototype header. (Note that the prototype header's epoch bits
aoqi@0 1848 // only change at a safepoint.) If not, attempt to rebias the object
aoqi@0 1849 // toward the current thread. Note that we must be absolutely sure
aoqi@0 1850 // that the current epoch is invalid in order to do this because
aoqi@0 1851 // otherwise the manipulations it performs on the mark word are
aoqi@0 1852 // illegal.
aoqi@0 1853
aoqi@0 1854 int shift_amount = 64 - markOopDesc::epoch_shift;
aoqi@0 1855 // rotate epoch bits to right (little) end and set other bits to 0
aoqi@0 1856 // [ big part | epoch | little part ] -> [ 0..0 | epoch ]
aoqi@0 1857 rldicl_(temp2_reg, temp_reg, shift_amount, 64 - markOopDesc::epoch_bits);
aoqi@0 1858 // branch if epoch bits are != 0, i.e. they differ, because the epoch has been incremented
aoqi@0 1859 bne(CCR0, try_rebias);
aoqi@0 1860
aoqi@0 1861 // The epoch of the current bias is still valid but we know nothing
aoqi@0 1862 // about the owner; it might be set or it might be clear. Try to
aoqi@0 1863 // acquire the bias of the object using an atomic operation. If this
aoqi@0 1864 // fails we will go in to the runtime to revoke the object's bias.
aoqi@0 1865 // Note that we first construct the presumed unbiased header so we
aoqi@0 1866 // don't accidentally blow away another thread's valid bias.
aoqi@0 1867 andi(mark_reg, mark_reg, (markOopDesc::biased_lock_mask_in_place |
aoqi@0 1868 markOopDesc::age_mask_in_place |
aoqi@0 1869 markOopDesc::epoch_mask_in_place));
aoqi@0 1870 orr(temp_reg, R16_thread, mark_reg);
aoqi@0 1871
aoqi@0 1872 assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
aoqi@0 1873
aoqi@0 1874 // CmpxchgX sets cr_reg to cmpX(temp2_reg, mark_reg).
aoqi@0 1875 fence(); // TODO: replace by MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq ?
aoqi@0 1876 cmpxchgd(/*flag=*/cr_reg, /*current_value=*/temp2_reg,
aoqi@0 1877 /*compare_value=*/mark_reg, /*exchange_value=*/temp_reg,
aoqi@0 1878 /*where=*/obj_reg,
aoqi@0 1879 MacroAssembler::MemBarAcq,
aoqi@0 1880 MacroAssembler::cmpxchgx_hint_acquire_lock(),
aoqi@0 1881 noreg, slow_case_int); // bail out if failed
aoqi@0 1882
aoqi@0 1883 // If the biasing toward our thread failed, this means that
aoqi@0 1884 // another thread succeeded in biasing it toward itself and we
aoqi@0 1885 // need to revoke that bias. The revocation will occur in the
aoqi@0 1886 // interpreter runtime in the slow case.
aoqi@0 1887 if (PrintBiasedLockingStatistics) {
aoqi@0 1888 load_const(temp_reg, (address) BiasedLocking::anonymously_biased_lock_entry_count_addr(), temp2_reg);
aoqi@0 1889 lwz(temp2_reg, 0, temp_reg);
aoqi@0 1890 addi(temp2_reg, temp2_reg, 1);
aoqi@0 1891 stw(temp2_reg, 0, temp_reg);
aoqi@0 1892 }
aoqi@0 1893 b(done);
aoqi@0 1894
aoqi@0 1895 bind(try_rebias);
aoqi@0 1896 // At this point we know the epoch has expired, meaning that the
aoqi@0 1897 // current "bias owner", if any, is actually invalid. Under these
aoqi@0 1898 // circumstances _only_, we are allowed to use the current header's
aoqi@0 1899 // value as the comparison value when doing the cas to acquire the
aoqi@0 1900 // bias in the current epoch. In other words, we allow transfer of
aoqi@0 1901 // the bias from one thread to another directly in this situation.
aoqi@0 1902 andi(temp_reg, mark_reg, markOopDesc::age_mask_in_place);
aoqi@0 1903 orr(temp_reg, R16_thread, temp_reg);
aoqi@0 1904 load_klass(temp2_reg, obj_reg);
aoqi@0 1905 ld(temp2_reg, in_bytes(Klass::prototype_header_offset()), temp2_reg);
aoqi@0 1906 orr(temp_reg, temp_reg, temp2_reg);
aoqi@0 1907
aoqi@0 1908 assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
aoqi@0 1909
aoqi@0 1910 // CmpxchgX sets cr_reg to cmpX(temp2_reg, mark_reg).
aoqi@0 1911 fence(); // TODO: replace by MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq ?
aoqi@0 1912 cmpxchgd(/*flag=*/cr_reg, /*current_value=*/temp2_reg,
aoqi@0 1913 /*compare_value=*/mark_reg, /*exchange_value=*/temp_reg,
aoqi@0 1914 /*where=*/obj_reg,
aoqi@0 1915 MacroAssembler::MemBarAcq,
aoqi@0 1916 MacroAssembler::cmpxchgx_hint_acquire_lock(),
aoqi@0 1917 noreg, slow_case_int); // bail out if failed
aoqi@0 1918
aoqi@0 1919 // If the biasing toward our thread failed, this means that
aoqi@0 1920 // another thread succeeded in biasing it toward itself and we
aoqi@0 1921 // need to revoke that bias. The revocation will occur in the
aoqi@0 1922 // interpreter runtime in the slow case.
aoqi@0 1923 if (PrintBiasedLockingStatistics) {
aoqi@0 1924 load_const(temp_reg, (address) BiasedLocking::rebiased_lock_entry_count_addr(), temp2_reg);
aoqi@0 1925 lwz(temp2_reg, 0, temp_reg);
aoqi@0 1926 addi(temp2_reg, temp2_reg, 1);
aoqi@0 1927 stw(temp2_reg, 0, temp_reg);
aoqi@0 1928 }
aoqi@0 1929 b(done);
aoqi@0 1930
aoqi@0 1931 bind(try_revoke_bias);
aoqi@0 1932 // The prototype mark in the klass doesn't have the bias bit set any
aoqi@0 1933 // more, indicating that objects of this data type are not supposed
aoqi@0 1934 // to be biased any more. We are going to try to reset the mark of
aoqi@0 1935 // this object to the prototype value and fall through to the
aoqi@0 1936 // CAS-based locking scheme. Note that if our CAS fails, it means
aoqi@0 1937 // that another thread raced us for the privilege of revoking the
aoqi@0 1938 // bias of this particular object, so it's okay to continue in the
aoqi@0 1939 // normal locking code.
aoqi@0 1940 load_klass(temp_reg, obj_reg);
aoqi@0 1941 ld(temp_reg, in_bytes(Klass::prototype_header_offset()), temp_reg);
aoqi@0 1942 andi(temp2_reg, mark_reg, markOopDesc::age_mask_in_place);
aoqi@0 1943 orr(temp_reg, temp_reg, temp2_reg);
aoqi@0 1944
aoqi@0 1945 assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
aoqi@0 1946
aoqi@0 1947 // CmpxchgX sets cr_reg to cmpX(temp2_reg, mark_reg).
aoqi@0 1948 fence(); // TODO: replace by MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq ?
aoqi@0 1949 cmpxchgd(/*flag=*/cr_reg, /*current_value=*/temp2_reg,
aoqi@0 1950 /*compare_value=*/mark_reg, /*exchange_value=*/temp_reg,
aoqi@0 1951 /*where=*/obj_reg,
aoqi@0 1952 MacroAssembler::MemBarAcq,
aoqi@0 1953 MacroAssembler::cmpxchgx_hint_acquire_lock());
aoqi@0 1954
aoqi@0 1955 // reload markOop in mark_reg before continuing with lightweight locking
aoqi@0 1956 ld(mark_reg, oopDesc::mark_offset_in_bytes(), obj_reg);
aoqi@0 1957
aoqi@0 1958 // Fall through to the normal CAS-based lock, because no matter what
aoqi@0 1959 // the result of the above CAS, some thread must have succeeded in
aoqi@0 1960 // removing the bias bit from the object's header.
aoqi@0 1961 if (PrintBiasedLockingStatistics) {
aoqi@0 1962 Label l;
aoqi@0 1963 bne(cr_reg, l);
aoqi@0 1964 load_const(temp_reg, (address) BiasedLocking::revoked_lock_entry_count_addr(), temp2_reg);
aoqi@0 1965 lwz(temp2_reg, 0, temp_reg);
aoqi@0 1966 addi(temp2_reg, temp2_reg, 1);
aoqi@0 1967 stw(temp2_reg, 0, temp_reg);
aoqi@0 1968 bind(l);
aoqi@0 1969 }
aoqi@0 1970
aoqi@0 1971 bind(cas_label);
aoqi@0 1972 }
aoqi@0 1973
aoqi@0 1974 void MacroAssembler::biased_locking_exit (ConditionRegister cr_reg, Register mark_addr, Register temp_reg, Label& done) {
aoqi@0 1975 // Check for biased locking unlock case, which is a no-op
aoqi@0 1976 // Note: we do not have to check the thread ID for two reasons.
aoqi@0 1977 // First, the interpreter checks for IllegalMonitorStateException at
aoqi@0 1978 // a higher level. Second, if the bias was revoked while we held the
aoqi@0 1979 // lock, the object could not be rebiased toward another thread, so
aoqi@0 1980 // the bias bit would be clear.
aoqi@0 1981
aoqi@0 1982 ld(temp_reg, 0, mark_addr);
aoqi@0 1983 andi(temp_reg, temp_reg, markOopDesc::biased_lock_mask_in_place);
aoqi@0 1984
aoqi@0 1985 cmpwi(cr_reg, temp_reg, markOopDesc::biased_lock_pattern);
aoqi@0 1986 beq(cr_reg, done);
aoqi@0 1987 }
aoqi@0 1988
aoqi@0 1989 // "The box" is the space on the stack where we copy the object mark.
aoqi@0 1990 void MacroAssembler::compiler_fast_lock_object(ConditionRegister flag, Register oop, Register box,
aoqi@0 1991 Register temp, Register displaced_header, Register current_header) {
aoqi@0 1992 assert_different_registers(oop, box, temp, displaced_header, current_header);
aoqi@0 1993 assert(flag != CCR0, "bad condition register");
aoqi@0 1994 Label cont;
aoqi@0 1995 Label object_has_monitor;
aoqi@0 1996 Label cas_failed;
aoqi@0 1997
aoqi@0 1998 // Load markOop from object into displaced_header.
aoqi@0 1999 ld(displaced_header, oopDesc::mark_offset_in_bytes(), oop);
aoqi@0 2000
aoqi@0 2001
aoqi@0 2002 // Always do locking in runtime.
aoqi@0 2003 if (EmitSync & 0x01) {
aoqi@0 2004 cmpdi(flag, oop, 0); // Oop can't be 0 here => always false.
aoqi@0 2005 return;
aoqi@0 2006 }
aoqi@0 2007
aoqi@0 2008 if (UseBiasedLocking) {
aoqi@0 2009 biased_locking_enter(flag, oop, displaced_header, temp, current_header, cont);
aoqi@0 2010 }
aoqi@0 2011
aoqi@0 2012 // Handle existing monitor.
aoqi@0 2013 if ((EmitSync & 0x02) == 0) {
aoqi@0 2014 // The object has an existing monitor iff (mark & monitor_value) != 0.
aoqi@0 2015 andi_(temp, displaced_header, markOopDesc::monitor_value);
aoqi@0 2016 bne(CCR0, object_has_monitor);
aoqi@0 2017 }
aoqi@0 2018
aoqi@0 2019 // Set displaced_header to be (markOop of object | UNLOCK_VALUE).
aoqi@0 2020 ori(displaced_header, displaced_header, markOopDesc::unlocked_value);
aoqi@0 2021
aoqi@0 2022 // Load Compare Value application register.
aoqi@0 2023
aoqi@0 2024 // Initialize the box. (Must happen before we update the object mark!)
aoqi@0 2025 std(displaced_header, BasicLock::displaced_header_offset_in_bytes(), box);
aoqi@0 2026
aoqi@0 2027 // Must fence, otherwise, preceding store(s) may float below cmpxchg.
aoqi@0 2028 // Compare object markOop with mark and if equal exchange scratch1 with object markOop.
aoqi@0 2029 // CmpxchgX sets cr_reg to cmpX(current, displaced).
aoqi@0 2030 membar(Assembler::StoreStore);
aoqi@0 2031 cmpxchgd(/*flag=*/flag,
aoqi@0 2032 /*current_value=*/current_header,
aoqi@0 2033 /*compare_value=*/displaced_header,
aoqi@0 2034 /*exchange_value=*/box,
aoqi@0 2035 /*where=*/oop,
aoqi@0 2036 MacroAssembler::MemBarAcq,
aoqi@0 2037 MacroAssembler::cmpxchgx_hint_acquire_lock(),
aoqi@0 2038 noreg,
aoqi@0 2039 &cas_failed);
aoqi@0 2040 assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
aoqi@0 2041
aoqi@0 2042 // If the compare-and-exchange succeeded, then we found an unlocked
aoqi@0 2043 // object and we have now locked it.
aoqi@0 2044 b(cont);
aoqi@0 2045
aoqi@0 2046 bind(cas_failed);
aoqi@0 2047 // We did not see an unlocked object so try the fast recursive case.
aoqi@0 2048
aoqi@0 2049 // Check if the owner is self by comparing the value in the markOop of object
aoqi@0 2050 // (current_header) with the stack pointer.
aoqi@0 2051 sub(current_header, current_header, R1_SP);
aoqi@0 2052 load_const_optimized(temp, (address) (~(os::vm_page_size()-1) |
aoqi@0 2053 markOopDesc::lock_mask_in_place));
aoqi@0 2054
aoqi@0 2055 and_(R0/*==0?*/, current_header, temp);
aoqi@0 2056 // If condition is true we are cont and hence we can store 0 as the
aoqi@0 2057 // displaced header in the box, which indicates that it is a recursive lock.
aoqi@0 2058 mcrf(flag,CCR0);
aoqi@0 2059 std(R0/*==0, perhaps*/, BasicLock::displaced_header_offset_in_bytes(), box);
aoqi@0 2060
aoqi@0 2061 // Handle existing monitor.
aoqi@0 2062 if ((EmitSync & 0x02) == 0) {
aoqi@0 2063 b(cont);
aoqi@0 2064
aoqi@0 2065 bind(object_has_monitor);
aoqi@0 2066 // The object's monitor m is unlocked iff m->owner == NULL,
aoqi@0 2067 // otherwise m->owner may contain a thread or a stack address.
aoqi@0 2068 //
aoqi@0 2069 // Try to CAS m->owner from NULL to current thread.
aoqi@0 2070 addi(temp, displaced_header, ObjectMonitor::owner_offset_in_bytes()-markOopDesc::monitor_value);
aoqi@0 2071 li(displaced_header, 0);
aoqi@0 2072 // CmpxchgX sets flag to cmpX(current, displaced).
aoqi@0 2073 cmpxchgd(/*flag=*/flag,
aoqi@0 2074 /*current_value=*/current_header,
aoqi@0 2075 /*compare_value=*/displaced_header,
aoqi@0 2076 /*exchange_value=*/R16_thread,
aoqi@0 2077 /*where=*/temp,
aoqi@0 2078 MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq,
aoqi@0 2079 MacroAssembler::cmpxchgx_hint_acquire_lock());
aoqi@0 2080
aoqi@0 2081 // Store a non-null value into the box.
aoqi@0 2082 std(box, BasicLock::displaced_header_offset_in_bytes(), box);
aoqi@0 2083
aoqi@0 2084 # ifdef ASSERT
aoqi@0 2085 bne(flag, cont);
aoqi@0 2086 // We have acquired the monitor, check some invariants.
aoqi@0 2087 addi(/*monitor=*/temp, temp, -ObjectMonitor::owner_offset_in_bytes());
aoqi@0 2088 // Invariant 1: _recursions should be 0.
aoqi@0 2089 //assert(ObjectMonitor::recursions_size_in_bytes() == 8, "unexpected size");
aoqi@0 2090 asm_assert_mem8_is_zero(ObjectMonitor::recursions_offset_in_bytes(), temp,
aoqi@0 2091 "monitor->_recursions should be 0", -1);
aoqi@0 2092 // Invariant 2: OwnerIsThread shouldn't be 0.
aoqi@0 2093 //assert(ObjectMonitor::OwnerIsThread_size_in_bytes() == 4, "unexpected size");
aoqi@0 2094 //asm_assert_mem4_isnot_zero(ObjectMonitor::OwnerIsThread_offset_in_bytes(), temp,
aoqi@0 2095 // "monitor->OwnerIsThread shouldn't be 0", -1);
aoqi@0 2096 # endif
aoqi@0 2097 }
aoqi@0 2098
aoqi@0 2099 bind(cont);
aoqi@0 2100 // flag == EQ indicates success
aoqi@0 2101 // flag == NE indicates failure
aoqi@0 2102 }
aoqi@0 2103
aoqi@0 2104 void MacroAssembler::compiler_fast_unlock_object(ConditionRegister flag, Register oop, Register box,
aoqi@0 2105 Register temp, Register displaced_header, Register current_header) {
aoqi@0 2106 assert_different_registers(oop, box, temp, displaced_header, current_header);
aoqi@0 2107 assert(flag != CCR0, "bad condition register");
aoqi@0 2108 Label cont;
aoqi@0 2109 Label object_has_monitor;
aoqi@0 2110
aoqi@0 2111 // Always do locking in runtime.
aoqi@0 2112 if (EmitSync & 0x01) {
aoqi@0 2113 cmpdi(flag, oop, 0); // Oop can't be 0 here => always false.
aoqi@0 2114 return;
aoqi@0 2115 }
aoqi@0 2116
aoqi@0 2117 if (UseBiasedLocking) {
aoqi@0 2118 biased_locking_exit(flag, oop, current_header, cont);
aoqi@0 2119 }
aoqi@0 2120
aoqi@0 2121 // Find the lock address and load the displaced header from the stack.
aoqi@0 2122 ld(displaced_header, BasicLock::displaced_header_offset_in_bytes(), box);
aoqi@0 2123
aoqi@0 2124 // If the displaced header is 0, we have a recursive unlock.
aoqi@0 2125 cmpdi(flag, displaced_header, 0);
aoqi@0 2126 beq(flag, cont);
aoqi@0 2127
aoqi@0 2128 // Handle existing monitor.
aoqi@0 2129 if ((EmitSync & 0x02) == 0) {
aoqi@0 2130 // The object has an existing monitor iff (mark & monitor_value) != 0.
aoqi@0 2131 ld(current_header, oopDesc::mark_offset_in_bytes(), oop);
aoqi@0 2132 andi(temp, current_header, markOopDesc::monitor_value);
aoqi@0 2133 cmpdi(flag, temp, 0);
aoqi@0 2134 bne(flag, object_has_monitor);
aoqi@0 2135 }
aoqi@0 2136
aoqi@0 2137
aoqi@0 2138 // Check if it is still a light weight lock, this is is true if we see
aoqi@0 2139 // the stack address of the basicLock in the markOop of the object.
aoqi@0 2140 // Cmpxchg sets flag to cmpd(current_header, box).
aoqi@0 2141 cmpxchgd(/*flag=*/flag,
aoqi@0 2142 /*current_value=*/current_header,
aoqi@0 2143 /*compare_value=*/box,
aoqi@0 2144 /*exchange_value=*/displaced_header,
aoqi@0 2145 /*where=*/oop,
aoqi@0 2146 MacroAssembler::MemBarRel,
aoqi@0 2147 MacroAssembler::cmpxchgx_hint_release_lock(),
aoqi@0 2148 noreg,
aoqi@0 2149 &cont);
aoqi@0 2150
aoqi@0 2151 assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
aoqi@0 2152
aoqi@0 2153 // Handle existing monitor.
aoqi@0 2154 if ((EmitSync & 0x02) == 0) {
aoqi@0 2155 b(cont);
aoqi@0 2156
aoqi@0 2157 bind(object_has_monitor);
aoqi@0 2158 addi(current_header, current_header, -markOopDesc::monitor_value); // monitor
aoqi@0 2159 ld(temp, ObjectMonitor::owner_offset_in_bytes(), current_header);
aoqi@0 2160 ld(displaced_header, ObjectMonitor::recursions_offset_in_bytes(), current_header);
aoqi@0 2161 xorr(temp, R16_thread, temp); // Will be 0 if we are the owner.
aoqi@0 2162 orr(temp, temp, displaced_header); // Will be 0 if there are 0 recursions.
aoqi@0 2163 cmpdi(flag, temp, 0);
aoqi@0 2164 bne(flag, cont);
aoqi@0 2165
aoqi@0 2166 ld(temp, ObjectMonitor::EntryList_offset_in_bytes(), current_header);
aoqi@0 2167 ld(displaced_header, ObjectMonitor::cxq_offset_in_bytes(), current_header);
aoqi@0 2168 orr(temp, temp, displaced_header); // Will be 0 if both are 0.
aoqi@0 2169 cmpdi(flag, temp, 0);
aoqi@0 2170 bne(flag, cont);
aoqi@0 2171 release();
aoqi@0 2172 std(temp, ObjectMonitor::owner_offset_in_bytes(), current_header);
aoqi@0 2173 }
aoqi@0 2174
aoqi@0 2175 bind(cont);
aoqi@0 2176 // flag == EQ indicates success
aoqi@0 2177 // flag == NE indicates failure
aoqi@0 2178 }
aoqi@0 2179
aoqi@0 2180 // Write serialization page so VM thread can do a pseudo remote membar.
aoqi@0 2181 // We use the current thread pointer to calculate a thread specific
aoqi@0 2182 // offset to write to within the page. This minimizes bus traffic
aoqi@0 2183 // due to cache line collision.
aoqi@0 2184 void MacroAssembler::serialize_memory(Register thread, Register tmp1, Register tmp2) {
aoqi@0 2185 srdi(tmp2, thread, os::get_serialize_page_shift_count());
aoqi@0 2186
aoqi@0 2187 int mask = os::vm_page_size() - sizeof(int);
aoqi@0 2188 if (Assembler::is_simm(mask, 16)) {
aoqi@0 2189 andi(tmp2, tmp2, mask);
aoqi@0 2190 } else {
aoqi@0 2191 lis(tmp1, (int)((signed short) (mask >> 16)));
aoqi@0 2192 ori(tmp1, tmp1, mask & 0x0000ffff);
aoqi@0 2193 andr(tmp2, tmp2, tmp1);
aoqi@0 2194 }
aoqi@0 2195
aoqi@0 2196 load_const(tmp1, (long) os::get_memory_serialize_page());
aoqi@0 2197 release();
aoqi@0 2198 stwx(R0, tmp1, tmp2);
aoqi@0 2199 }
aoqi@0 2200
aoqi@0 2201
aoqi@0 2202 // GC barrier helper macros
aoqi@0 2203
aoqi@0 2204 // Write the card table byte if needed.
aoqi@0 2205 void MacroAssembler::card_write_barrier_post(Register Rstore_addr, Register Rnew_val, Register Rtmp) {
aoqi@0 2206 CardTableModRefBS* bs = (CardTableModRefBS*) Universe::heap()->barrier_set();
aoqi@0 2207 assert(bs->kind() == BarrierSet::CardTableModRef ||
aoqi@0 2208 bs->kind() == BarrierSet::CardTableExtension, "wrong barrier");
aoqi@0 2209 #ifdef ASSERT
aoqi@0 2210 cmpdi(CCR0, Rnew_val, 0);
aoqi@0 2211 asm_assert_ne("null oop not allowed", 0x321);
aoqi@0 2212 #endif
aoqi@0 2213 card_table_write(bs->byte_map_base, Rtmp, Rstore_addr);
aoqi@0 2214 }
aoqi@0 2215
aoqi@0 2216 // Write the card table byte.
aoqi@0 2217 void MacroAssembler::card_table_write(jbyte* byte_map_base, Register Rtmp, Register Robj) {
aoqi@0 2218 assert_different_registers(Robj, Rtmp, R0);
aoqi@0 2219 load_const_optimized(Rtmp, (address)byte_map_base, R0);
aoqi@0 2220 srdi(Robj, Robj, CardTableModRefBS::card_shift);
aoqi@0 2221 li(R0, 0); // dirty
aoqi@0 2222 if (UseConcMarkSweepGC) membar(Assembler::StoreStore);
aoqi@0 2223 stbx(R0, Rtmp, Robj);
aoqi@0 2224 }
aoqi@0 2225
aoqi@0 2226 #if INCLUDE_ALL_GCS
aoqi@0 2227 // General G1 pre-barrier generator.
aoqi@0 2228 // Goal: record the previous value if it is not null.
aoqi@0 2229 void MacroAssembler::g1_write_barrier_pre(Register Robj, RegisterOrConstant offset, Register Rpre_val,
aoqi@0 2230 Register Rtmp1, Register Rtmp2, bool needs_frame) {
aoqi@0 2231 Label runtime, filtered;
aoqi@0 2232
aoqi@0 2233 // Is marking active?
aoqi@0 2234 if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
aoqi@0 2235 lwz(Rtmp1, in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_active()), R16_thread);
aoqi@0 2236 } else {
aoqi@0 2237 guarantee(in_bytes(PtrQueue::byte_width_of_active()) == 1, "Assumption");
aoqi@0 2238 lbz(Rtmp1, in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_active()), R16_thread);
aoqi@0 2239 }
aoqi@0 2240 cmpdi(CCR0, Rtmp1, 0);
aoqi@0 2241 beq(CCR0, filtered);
aoqi@0 2242
aoqi@0 2243 // Do we need to load the previous value?
aoqi@0 2244 if (Robj != noreg) {
aoqi@0 2245 // Load the previous value...
aoqi@0 2246 if (UseCompressedOops) {
aoqi@0 2247 lwz(Rpre_val, offset, Robj);
aoqi@0 2248 } else {
aoqi@0 2249 ld(Rpre_val, offset, Robj);
aoqi@0 2250 }
aoqi@0 2251 // Previous value has been loaded into Rpre_val.
aoqi@0 2252 }
aoqi@0 2253 assert(Rpre_val != noreg, "must have a real register");
aoqi@0 2254
aoqi@0 2255 // Is the previous value null?
aoqi@0 2256 cmpdi(CCR0, Rpre_val, 0);
aoqi@0 2257 beq(CCR0, filtered);
aoqi@0 2258
aoqi@0 2259 if (Robj != noreg && UseCompressedOops) {
aoqi@0 2260 decode_heap_oop_not_null(Rpre_val);
aoqi@0 2261 }
aoqi@0 2262
aoqi@0 2263 // OK, it's not filtered, so we'll need to call enqueue. In the normal
aoqi@0 2264 // case, pre_val will be a scratch G-reg, but there are some cases in
aoqi@0 2265 // which it's an O-reg. In the first case, do a normal call. In the
aoqi@0 2266 // latter, do a save here and call the frameless version.
aoqi@0 2267
aoqi@0 2268 // Can we store original value in the thread's buffer?
aoqi@0 2269 // Is index == 0?
aoqi@0 2270 // (The index field is typed as size_t.)
aoqi@0 2271 const Register Rbuffer = Rtmp1, Rindex = Rtmp2;
aoqi@0 2272
aoqi@0 2273 ld(Rindex, in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_index()), R16_thread);
aoqi@0 2274 cmpdi(CCR0, Rindex, 0);
aoqi@0 2275 beq(CCR0, runtime); // If index == 0, goto runtime.
aoqi@0 2276 ld(Rbuffer, in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_buf()), R16_thread);
aoqi@0 2277
aoqi@0 2278 addi(Rindex, Rindex, -wordSize); // Decrement index.
aoqi@0 2279 std(Rindex, in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_index()), R16_thread);
aoqi@0 2280
aoqi@0 2281 // Record the previous value.
aoqi@0 2282 stdx(Rpre_val, Rbuffer, Rindex);
aoqi@0 2283 b(filtered);
aoqi@0 2284
aoqi@0 2285 bind(runtime);
aoqi@0 2286
aoqi@0 2287 // VM call need frame to access(write) O register.
aoqi@0 2288 if (needs_frame) {
aoqi@0 2289 save_LR_CR(Rtmp1);
aoqi@0 2290 push_frame_reg_args(0, Rtmp2);
aoqi@0 2291 }
aoqi@0 2292
aoqi@0 2293 if (Rpre_val->is_volatile() && Robj == noreg) mr(R31, Rpre_val); // Save pre_val across C call if it was preloaded.
aoqi@0 2294 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), Rpre_val, R16_thread);
aoqi@0 2295 if (Rpre_val->is_volatile() && Robj == noreg) mr(Rpre_val, R31); // restore
aoqi@0 2296
aoqi@0 2297 if (needs_frame) {
aoqi@0 2298 pop_frame();
aoqi@0 2299 restore_LR_CR(Rtmp1);
aoqi@0 2300 }
aoqi@0 2301
aoqi@0 2302 bind(filtered);
aoqi@0 2303 }
aoqi@0 2304
aoqi@0 2305 // General G1 post-barrier generator
aoqi@0 2306 // Store cross-region card.
aoqi@0 2307 void MacroAssembler::g1_write_barrier_post(Register Rstore_addr, Register Rnew_val, Register Rtmp1, Register Rtmp2, Register Rtmp3, Label *filtered_ext) {
aoqi@0 2308 Label runtime, filtered_int;
aoqi@0 2309 Label& filtered = (filtered_ext != NULL) ? *filtered_ext : filtered_int;
aoqi@0 2310 assert_different_registers(Rstore_addr, Rnew_val, Rtmp1, Rtmp2);
aoqi@0 2311
aoqi@0 2312 G1SATBCardTableModRefBS* bs = (G1SATBCardTableModRefBS*) Universe::heap()->barrier_set();
aoqi@0 2313 assert(bs->kind() == BarrierSet::G1SATBCT ||
aoqi@0 2314 bs->kind() == BarrierSet::G1SATBCTLogging, "wrong barrier");
aoqi@0 2315
aoqi@0 2316 // Does store cross heap regions?
aoqi@0 2317 if (G1RSBarrierRegionFilter) {
aoqi@0 2318 xorr(Rtmp1, Rstore_addr, Rnew_val);
aoqi@0 2319 srdi_(Rtmp1, Rtmp1, HeapRegion::LogOfHRGrainBytes);
aoqi@0 2320 beq(CCR0, filtered);
aoqi@0 2321 }
aoqi@0 2322
aoqi@0 2323 // Crosses regions, storing NULL?
aoqi@0 2324 #ifdef ASSERT
aoqi@0 2325 cmpdi(CCR0, Rnew_val, 0);
aoqi@0 2326 asm_assert_ne("null oop not allowed (G1)", 0x322); // Checked by caller on PPC64, so following branch is obsolete:
aoqi@0 2327 //beq(CCR0, filtered);
aoqi@0 2328 #endif
aoqi@0 2329
aoqi@0 2330 // Storing region crossing non-NULL, is card already dirty?
aoqi@0 2331 assert(sizeof(*bs->byte_map_base) == sizeof(jbyte), "adjust this code");
aoqi@0 2332 const Register Rcard_addr = Rtmp1;
aoqi@0 2333 Register Rbase = Rtmp2;
aoqi@0 2334 load_const_optimized(Rbase, (address)bs->byte_map_base, /*temp*/ Rtmp3);
aoqi@0 2335
aoqi@0 2336 srdi(Rcard_addr, Rstore_addr, CardTableModRefBS::card_shift);
aoqi@0 2337
aoqi@0 2338 // Get the address of the card.
aoqi@0 2339 lbzx(/*card value*/ Rtmp3, Rbase, Rcard_addr);
aoqi@0 2340 cmpwi(CCR0, Rtmp3, (int)G1SATBCardTableModRefBS::g1_young_card_val());
aoqi@0 2341 beq(CCR0, filtered);
aoqi@0 2342
aoqi@0 2343 membar(Assembler::StoreLoad);
aoqi@0 2344 lbzx(/*card value*/ Rtmp3, Rbase, Rcard_addr); // Reload after membar.
aoqi@0 2345 cmpwi(CCR0, Rtmp3 /* card value */, CardTableModRefBS::dirty_card_val());
aoqi@0 2346 beq(CCR0, filtered);
aoqi@0 2347
aoqi@0 2348 // Storing a region crossing, non-NULL oop, card is clean.
aoqi@0 2349 // Dirty card and log.
aoqi@0 2350 li(Rtmp3, CardTableModRefBS::dirty_card_val());
aoqi@0 2351 //release(); // G1: oops are allowed to get visible after dirty marking.
aoqi@0 2352 stbx(Rtmp3, Rbase, Rcard_addr);
aoqi@0 2353
aoqi@0 2354 add(Rcard_addr, Rbase, Rcard_addr); // This is the address which needs to get enqueued.
aoqi@0 2355 Rbase = noreg; // end of lifetime
aoqi@0 2356
aoqi@0 2357 const Register Rqueue_index = Rtmp2,
aoqi@0 2358 Rqueue_buf = Rtmp3;
aoqi@0 2359 ld(Rqueue_index, in_bytes(JavaThread::dirty_card_queue_offset() + PtrQueue::byte_offset_of_index()), R16_thread);
aoqi@0 2360 cmpdi(CCR0, Rqueue_index, 0);
aoqi@0 2361 beq(CCR0, runtime); // index == 0 then jump to runtime
aoqi@0 2362 ld(Rqueue_buf, in_bytes(JavaThread::dirty_card_queue_offset() + PtrQueue::byte_offset_of_buf()), R16_thread);
aoqi@0 2363
aoqi@0 2364 addi(Rqueue_index, Rqueue_index, -wordSize); // decrement index
aoqi@0 2365 std(Rqueue_index, in_bytes(JavaThread::dirty_card_queue_offset() + PtrQueue::byte_offset_of_index()), R16_thread);
aoqi@0 2366
aoqi@0 2367 stdx(Rcard_addr, Rqueue_buf, Rqueue_index); // store card
aoqi@0 2368 b(filtered);
aoqi@0 2369
aoqi@0 2370 bind(runtime);
aoqi@0 2371
aoqi@0 2372 // Save the live input values.
aoqi@0 2373 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), Rcard_addr, R16_thread);
aoqi@0 2374
aoqi@0 2375 bind(filtered_int);
aoqi@0 2376 }
aoqi@0 2377 #endif // INCLUDE_ALL_GCS
aoqi@0 2378
aoqi@0 2379 // Values for last_Java_pc, and last_Java_sp must comply to the rules
goetz@7222 2380 // in frame_ppc.hpp.
aoqi@0 2381 void MacroAssembler::set_last_Java_frame(Register last_Java_sp, Register last_Java_pc) {
aoqi@0 2382 // Always set last_Java_pc and flags first because once last_Java_sp
aoqi@0 2383 // is visible has_last_Java_frame is true and users will look at the
aoqi@0 2384 // rest of the fields. (Note: flags should always be zero before we
aoqi@0 2385 // get here so doesn't need to be set.)
aoqi@0 2386
aoqi@0 2387 // Verify that last_Java_pc was zeroed on return to Java
aoqi@0 2388 asm_assert_mem8_is_zero(in_bytes(JavaThread::last_Java_pc_offset()), R16_thread,
aoqi@0 2389 "last_Java_pc not zeroed before leaving Java", 0x200);
aoqi@0 2390
aoqi@0 2391 // When returning from calling out from Java mode the frame anchor's
aoqi@0 2392 // last_Java_pc will always be set to NULL. It is set here so that
aoqi@0 2393 // if we are doing a call to native (not VM) that we capture the
aoqi@0 2394 // known pc and don't have to rely on the native call having a
aoqi@0 2395 // standard frame linkage where we can find the pc.
aoqi@0 2396 if (last_Java_pc != noreg)
aoqi@0 2397 std(last_Java_pc, in_bytes(JavaThread::last_Java_pc_offset()), R16_thread);
aoqi@0 2398
aoqi@0 2399 // Set last_Java_sp last.
aoqi@0 2400 std(last_Java_sp, in_bytes(JavaThread::last_Java_sp_offset()), R16_thread);
aoqi@0 2401 }
aoqi@0 2402
aoqi@0 2403 void MacroAssembler::reset_last_Java_frame(void) {
aoqi@0 2404 asm_assert_mem8_isnot_zero(in_bytes(JavaThread::last_Java_sp_offset()),
aoqi@0 2405 R16_thread, "SP was not set, still zero", 0x202);
aoqi@0 2406
aoqi@0 2407 BLOCK_COMMENT("reset_last_Java_frame {");
aoqi@0 2408 li(R0, 0);
aoqi@0 2409
aoqi@0 2410 // _last_Java_sp = 0
aoqi@0 2411 std(R0, in_bytes(JavaThread::last_Java_sp_offset()), R16_thread);
aoqi@0 2412
aoqi@0 2413 // _last_Java_pc = 0
aoqi@0 2414 std(R0, in_bytes(JavaThread::last_Java_pc_offset()), R16_thread);
aoqi@0 2415 BLOCK_COMMENT("} reset_last_Java_frame");
aoqi@0 2416 }
aoqi@0 2417
aoqi@0 2418 void MacroAssembler::set_top_ijava_frame_at_SP_as_last_Java_frame(Register sp, Register tmp1) {
aoqi@0 2419 assert_different_registers(sp, tmp1);
aoqi@0 2420
aoqi@0 2421 // sp points to a TOP_IJAVA_FRAME, retrieve frame's PC via
aoqi@0 2422 // TOP_IJAVA_FRAME_ABI.
aoqi@0 2423 // FIXME: assert that we really have a TOP_IJAVA_FRAME here!
aoqi@0 2424 #ifdef CC_INTERP
aoqi@0 2425 ld(tmp1/*pc*/, _top_ijava_frame_abi(frame_manager_lr), sp);
aoqi@0 2426 #else
aoqi@0 2427 address entry = pc();
aoqi@0 2428 load_const_optimized(tmp1, entry);
aoqi@0 2429 #endif
aoqi@0 2430
aoqi@0 2431 set_last_Java_frame(/*sp=*/sp, /*pc=*/tmp1);
aoqi@0 2432 }
aoqi@0 2433
aoqi@0 2434 void MacroAssembler::get_vm_result(Register oop_result) {
aoqi@0 2435 // Read:
aoqi@0 2436 // R16_thread
aoqi@0 2437 // R16_thread->in_bytes(JavaThread::vm_result_offset())
aoqi@0 2438 //
aoqi@0 2439 // Updated:
aoqi@0 2440 // oop_result
aoqi@0 2441 // R16_thread->in_bytes(JavaThread::vm_result_offset())
aoqi@0 2442
aoqi@0 2443 ld(oop_result, in_bytes(JavaThread::vm_result_offset()), R16_thread);
aoqi@0 2444 li(R0, 0);
aoqi@0 2445 std(R0, in_bytes(JavaThread::vm_result_offset()), R16_thread);
aoqi@0 2446
aoqi@0 2447 verify_oop(oop_result);
aoqi@0 2448 }
aoqi@0 2449
aoqi@0 2450 void MacroAssembler::get_vm_result_2(Register metadata_result) {
aoqi@0 2451 // Read:
aoqi@0 2452 // R16_thread
aoqi@0 2453 // R16_thread->in_bytes(JavaThread::vm_result_2_offset())
aoqi@0 2454 //
aoqi@0 2455 // Updated:
aoqi@0 2456 // metadata_result
aoqi@0 2457 // R16_thread->in_bytes(JavaThread::vm_result_2_offset())
aoqi@0 2458
aoqi@0 2459 ld(metadata_result, in_bytes(JavaThread::vm_result_2_offset()), R16_thread);
aoqi@0 2460 li(R0, 0);
aoqi@0 2461 std(R0, in_bytes(JavaThread::vm_result_2_offset()), R16_thread);
aoqi@0 2462 }
aoqi@0 2463
aoqi@0 2464
aoqi@0 2465 void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
aoqi@0 2466 Register current = (src != noreg) ? src : dst; // Klass is in dst if no src provided.
aoqi@0 2467 if (Universe::narrow_klass_base() != 0) {
aoqi@0 2468 // Use dst as temp if it is free.
aoqi@0 2469 load_const(R0, Universe::narrow_klass_base(), (dst != current && dst != R0) ? dst : noreg);
aoqi@0 2470 sub(dst, current, R0);
aoqi@0 2471 current = dst;
aoqi@0 2472 }
aoqi@0 2473 if (Universe::narrow_klass_shift() != 0) {
aoqi@0 2474 srdi(dst, current, Universe::narrow_klass_shift());
aoqi@0 2475 current = dst;
aoqi@0 2476 }
aoqi@0 2477 mr_if_needed(dst, current); // Move may be required.
aoqi@0 2478 }
aoqi@0 2479
aoqi@0 2480 void MacroAssembler::store_klass(Register dst_oop, Register klass, Register ck) {
aoqi@0 2481 if (UseCompressedClassPointers) {
aoqi@0 2482 encode_klass_not_null(ck, klass);
aoqi@0 2483 stw(ck, oopDesc::klass_offset_in_bytes(), dst_oop);
aoqi@0 2484 } else {
aoqi@0 2485 std(klass, oopDesc::klass_offset_in_bytes(), dst_oop);
aoqi@0 2486 }
aoqi@0 2487 }
aoqi@0 2488
aoqi@0 2489 void MacroAssembler::store_klass_gap(Register dst_oop, Register val) {
aoqi@0 2490 if (UseCompressedClassPointers) {
aoqi@0 2491 if (val == noreg) {
aoqi@0 2492 val = R0;
aoqi@0 2493 li(val, 0);
aoqi@0 2494 }
aoqi@0 2495 stw(val, oopDesc::klass_gap_offset_in_bytes(), dst_oop); // klass gap if compressed
aoqi@0 2496 }
aoqi@0 2497 }
aoqi@0 2498
aoqi@0 2499 int MacroAssembler::instr_size_for_decode_klass_not_null() {
aoqi@0 2500 if (!UseCompressedClassPointers) return 0;
aoqi@0 2501 int num_instrs = 1; // shift or move
aoqi@0 2502 if (Universe::narrow_klass_base() != 0) num_instrs = 7; // shift + load const + add
aoqi@0 2503 return num_instrs * BytesPerInstWord;
aoqi@0 2504 }
aoqi@0 2505
aoqi@0 2506 void MacroAssembler::decode_klass_not_null(Register dst, Register src) {
goetz@7222 2507 assert(dst != R0, "Dst reg may not be R0, as R0 is used here.");
aoqi@0 2508 if (src == noreg) src = dst;
aoqi@0 2509 Register shifted_src = src;
aoqi@0 2510 if (Universe::narrow_klass_shift() != 0 ||
aoqi@0 2511 Universe::narrow_klass_base() == 0 && src != dst) { // Move required.
aoqi@0 2512 shifted_src = dst;
aoqi@0 2513 sldi(shifted_src, src, Universe::narrow_klass_shift());
aoqi@0 2514 }
aoqi@0 2515 if (Universe::narrow_klass_base() != 0) {
aoqi@0 2516 load_const(R0, Universe::narrow_klass_base());
aoqi@0 2517 add(dst, shifted_src, R0);
aoqi@0 2518 }
aoqi@0 2519 }
aoqi@0 2520
aoqi@0 2521 void MacroAssembler::load_klass(Register dst, Register src) {
aoqi@0 2522 if (UseCompressedClassPointers) {
aoqi@0 2523 lwz(dst, oopDesc::klass_offset_in_bytes(), src);
aoqi@0 2524 // Attention: no null check here!
aoqi@0 2525 decode_klass_not_null(dst, dst);
aoqi@0 2526 } else {
aoqi@0 2527 ld(dst, oopDesc::klass_offset_in_bytes(), src);
aoqi@0 2528 }
aoqi@0 2529 }
aoqi@0 2530
aoqi@0 2531 void MacroAssembler::load_klass_with_trap_null_check(Register dst, Register src) {
aoqi@0 2532 if (!os::zero_page_read_protected()) {
aoqi@0 2533 if (TrapBasedNullChecks) {
aoqi@0 2534 trap_null_check(src);
aoqi@0 2535 }
aoqi@0 2536 }
aoqi@0 2537 load_klass(dst, src);
aoqi@0 2538 }
aoqi@0 2539
aoqi@0 2540 void MacroAssembler::reinit_heapbase(Register d, Register tmp) {
aoqi@0 2541 if (Universe::heap() != NULL) {
goetz@7222 2542 load_const_optimized(R30, Universe::narrow_ptrs_base(), tmp);
aoqi@0 2543 } else {
goetz@7222 2544 // Heap not yet allocated. Load indirectly.
goetz@7222 2545 int simm16_offset = load_const_optimized(R30, Universe::narrow_ptrs_base_addr(), tmp, true);
goetz@7222 2546 ld(R30, simm16_offset, R30);
aoqi@0 2547 }
aoqi@0 2548 }
aoqi@0 2549
aoqi@0 2550 // Clear Array
aoqi@0 2551 // Kills both input registers. tmp == R0 is allowed.
aoqi@0 2552 void MacroAssembler::clear_memory_doubleword(Register base_ptr, Register cnt_dwords, Register tmp) {
aoqi@0 2553 // Procedure for large arrays (uses data cache block zero instruction).
aoqi@0 2554 Label startloop, fast, fastloop, small_rest, restloop, done;
aoqi@0 2555 const int cl_size = VM_Version::get_cache_line_size(),
aoqi@0 2556 cl_dwords = cl_size>>3,
aoqi@0 2557 cl_dw_addr_bits = exact_log2(cl_dwords),
aoqi@0 2558 dcbz_min = 1; // Min count of dcbz executions, needs to be >0.
aoqi@0 2559
aoqi@0 2560 //2:
aoqi@0 2561 cmpdi(CCR1, cnt_dwords, ((dcbz_min+1)<<cl_dw_addr_bits)-1); // Big enough? (ensure >=dcbz_min lines included).
aoqi@0 2562 blt(CCR1, small_rest); // Too small.
aoqi@0 2563 rldicl_(tmp, base_ptr, 64-3, 64-cl_dw_addr_bits); // Extract dword offset within first cache line.
aoqi@0 2564 beq(CCR0, fast); // Already 128byte aligned.
aoqi@0 2565
aoqi@0 2566 subfic(tmp, tmp, cl_dwords);
aoqi@0 2567 mtctr(tmp); // Set ctr to hit 128byte boundary (0<ctr<cl_dwords).
aoqi@0 2568 subf(cnt_dwords, tmp, cnt_dwords); // rest.
aoqi@0 2569 li(tmp, 0);
aoqi@0 2570 //10:
aoqi@0 2571 bind(startloop); // Clear at the beginning to reach 128byte boundary.
aoqi@0 2572 std(tmp, 0, base_ptr); // Clear 8byte aligned block.
aoqi@0 2573 addi(base_ptr, base_ptr, 8);
aoqi@0 2574 bdnz(startloop);
aoqi@0 2575 //13:
aoqi@0 2576 bind(fast); // Clear 128byte blocks.
aoqi@0 2577 srdi(tmp, cnt_dwords, cl_dw_addr_bits); // Loop count for 128byte loop (>0).
aoqi@0 2578 andi(cnt_dwords, cnt_dwords, cl_dwords-1); // Rest in dwords.
aoqi@0 2579 mtctr(tmp); // Load counter.
aoqi@0 2580 //16:
aoqi@0 2581 bind(fastloop);
aoqi@0 2582 dcbz(base_ptr); // Clear 128byte aligned block.
aoqi@0 2583 addi(base_ptr, base_ptr, cl_size);
aoqi@0 2584 bdnz(fastloop);
aoqi@0 2585 if (InsertEndGroupPPC64) { endgroup(); } else { nop(); }
aoqi@0 2586 //20:
aoqi@0 2587 bind(small_rest);
aoqi@0 2588 cmpdi(CCR0, cnt_dwords, 0); // size 0?
aoqi@0 2589 beq(CCR0, done); // rest == 0
aoqi@0 2590 li(tmp, 0);
aoqi@0 2591 mtctr(cnt_dwords); // Load counter.
aoqi@0 2592 //24:
aoqi@0 2593 bind(restloop); // Clear rest.
aoqi@0 2594 std(tmp, 0, base_ptr); // Clear 8byte aligned block.
aoqi@0 2595 addi(base_ptr, base_ptr, 8);
aoqi@0 2596 bdnz(restloop);
aoqi@0 2597 //27:
aoqi@0 2598 bind(done);
aoqi@0 2599 }
aoqi@0 2600
aoqi@0 2601 /////////////////////////////////////////// String intrinsics ////////////////////////////////////////////
aoqi@0 2602
aoqi@0 2603 // Search for a single jchar in an jchar[].
aoqi@0 2604 //
aoqi@0 2605 // Assumes that result differs from all other registers.
aoqi@0 2606 //
aoqi@0 2607 // Haystack, needle are the addresses of jchar-arrays.
aoqi@0 2608 // NeedleChar is needle[0] if it is known at compile time.
aoqi@0 2609 // Haycnt is the length of the haystack. We assume haycnt >=1.
aoqi@0 2610 //
aoqi@0 2611 // Preserves haystack, haycnt, kills all other registers.
aoqi@0 2612 //
aoqi@0 2613 // If needle == R0, we search for the constant needleChar.
aoqi@0 2614 void MacroAssembler::string_indexof_1(Register result, Register haystack, Register haycnt,
aoqi@0 2615 Register needle, jchar needleChar,
aoqi@0 2616 Register tmp1, Register tmp2) {
aoqi@0 2617
aoqi@0 2618 assert_different_registers(result, haystack, haycnt, needle, tmp1, tmp2);
aoqi@0 2619
aoqi@0 2620 Label L_InnerLoop, L_FinalCheck, L_Found1, L_Found2, L_Found3, L_NotFound, L_End;
aoqi@0 2621 Register needle0 = needle, // Contains needle[0].
aoqi@0 2622 addr = tmp1,
aoqi@0 2623 ch1 = tmp2,
aoqi@0 2624 ch2 = R0;
aoqi@0 2625
aoqi@0 2626 //2 (variable) or 3 (const):
aoqi@0 2627 if (needle != R0) lhz(needle0, 0, needle); // Preload needle character, needle has len==1.
aoqi@0 2628 dcbtct(haystack, 0x00); // Indicate R/O access to haystack.
aoqi@0 2629
aoqi@0 2630 srwi_(tmp2, haycnt, 1); // Shift right by exact_log2(UNROLL_FACTOR).
aoqi@0 2631 mr(addr, haystack);
aoqi@0 2632 beq(CCR0, L_FinalCheck);
aoqi@0 2633 mtctr(tmp2); // Move to count register.
aoqi@0 2634 //8:
aoqi@0 2635 bind(L_InnerLoop); // Main work horse (2x unrolled search loop).
aoqi@0 2636 lhz(ch1, 0, addr); // Load characters from haystack.
aoqi@0 2637 lhz(ch2, 2, addr);
aoqi@0 2638 (needle != R0) ? cmpw(CCR0, ch1, needle0) : cmplwi(CCR0, ch1, needleChar);
aoqi@0 2639 (needle != R0) ? cmpw(CCR1, ch2, needle0) : cmplwi(CCR1, ch2, needleChar);
aoqi@0 2640 beq(CCR0, L_Found1); // Did we find the needle?
aoqi@0 2641 beq(CCR1, L_Found2);
aoqi@0 2642 addi(addr, addr, 4);
aoqi@0 2643 bdnz(L_InnerLoop);
aoqi@0 2644 //16:
aoqi@0 2645 bind(L_FinalCheck);
aoqi@0 2646 andi_(R0, haycnt, 1);
aoqi@0 2647 beq(CCR0, L_NotFound);
aoqi@0 2648 lhz(ch1, 0, addr); // One position left at which we have to compare.
aoqi@0 2649 (needle != R0) ? cmpw(CCR1, ch1, needle0) : cmplwi(CCR1, ch1, needleChar);
aoqi@0 2650 beq(CCR1, L_Found3);
aoqi@0 2651 //21:
aoqi@0 2652 bind(L_NotFound);
aoqi@0 2653 li(result, -1); // Not found.
aoqi@0 2654 b(L_End);
aoqi@0 2655
aoqi@0 2656 bind(L_Found2);
aoqi@0 2657 addi(addr, addr, 2);
aoqi@0 2658 //24:
aoqi@0 2659 bind(L_Found1);
aoqi@0 2660 bind(L_Found3); // Return index ...
aoqi@0 2661 subf(addr, haystack, addr); // relative to haystack,
aoqi@0 2662 srdi(result, addr, 1); // in characters.
aoqi@0 2663 bind(L_End);
aoqi@0 2664 }
aoqi@0 2665
aoqi@0 2666
aoqi@0 2667 // Implementation of IndexOf for jchar arrays.
aoqi@0 2668 //
aoqi@0 2669 // The length of haystack and needle are not constant, i.e. passed in a register.
aoqi@0 2670 //
aoqi@0 2671 // Preserves registers haystack, needle.
aoqi@0 2672 // Kills registers haycnt, needlecnt.
aoqi@0 2673 // Assumes that result differs from all other registers.
aoqi@0 2674 // Haystack, needle are the addresses of jchar-arrays.
aoqi@0 2675 // Haycnt, needlecnt are the lengths of them, respectively.
aoqi@0 2676 //
aoqi@0 2677 // Needlecntval must be zero or 15-bit unsigned immediate and > 1.
aoqi@0 2678 void MacroAssembler::string_indexof(Register result, Register haystack, Register haycnt,
aoqi@0 2679 Register needle, ciTypeArray* needle_values, Register needlecnt, int needlecntval,
aoqi@0 2680 Register tmp1, Register tmp2, Register tmp3, Register tmp4) {
aoqi@0 2681
aoqi@0 2682 // Ensure 0<needlecnt<=haycnt in ideal graph as prerequisite!
aoqi@0 2683 Label L_TooShort, L_Found, L_NotFound, L_End;
aoqi@0 2684 Register last_addr = haycnt, // Kill haycnt at the beginning.
aoqi@0 2685 addr = tmp1,
aoqi@0 2686 n_start = tmp2,
aoqi@0 2687 ch1 = tmp3,
aoqi@0 2688 ch2 = R0;
aoqi@0 2689
aoqi@0 2690 // **************************************************************************************************
aoqi@0 2691 // Prepare for main loop: optimized for needle count >=2, bail out otherwise.
aoqi@0 2692 // **************************************************************************************************
aoqi@0 2693
aoqi@0 2694 //1 (variable) or 3 (const):
aoqi@0 2695 dcbtct(needle, 0x00); // Indicate R/O access to str1.
aoqi@0 2696 dcbtct(haystack, 0x00); // Indicate R/O access to str2.
aoqi@0 2697
aoqi@0 2698 // Compute last haystack addr to use if no match gets found.
aoqi@0 2699 if (needlecntval == 0) { // variable needlecnt
aoqi@0 2700 //3:
aoqi@0 2701 subf(ch1, needlecnt, haycnt); // Last character index to compare is haycnt-needlecnt.
aoqi@0 2702 addi(addr, haystack, -2); // Accesses use pre-increment.
aoqi@0 2703 cmpwi(CCR6, needlecnt, 2);
aoqi@0 2704 blt(CCR6, L_TooShort); // Variable needlecnt: handle short needle separately.
aoqi@0 2705 slwi(ch1, ch1, 1); // Scale to number of bytes.
aoqi@0 2706 lwz(n_start, 0, needle); // Load first 2 characters of needle.
aoqi@0 2707 add(last_addr, haystack, ch1); // Point to last address to compare (haystack+2*(haycnt-needlecnt)).
aoqi@0 2708 addi(needlecnt, needlecnt, -2); // Rest of needle.
aoqi@0 2709 } else { // constant needlecnt
aoqi@0 2710 guarantee(needlecntval != 1, "IndexOf with single-character needle must be handled separately");
aoqi@0 2711 assert((needlecntval & 0x7fff) == needlecntval, "wrong immediate");
aoqi@0 2712 //5:
aoqi@0 2713 addi(ch1, haycnt, -needlecntval); // Last character index to compare is haycnt-needlecnt.
aoqi@0 2714 lwz(n_start, 0, needle); // Load first 2 characters of needle.
aoqi@0 2715 addi(addr, haystack, -2); // Accesses use pre-increment.
aoqi@0 2716 slwi(ch1, ch1, 1); // Scale to number of bytes.
aoqi@0 2717 add(last_addr, haystack, ch1); // Point to last address to compare (haystack+2*(haycnt-needlecnt)).
aoqi@0 2718 li(needlecnt, needlecntval-2); // Rest of needle.
aoqi@0 2719 }
aoqi@0 2720
aoqi@0 2721 // Main Loop (now we have at least 3 characters).
aoqi@0 2722 //11:
aoqi@0 2723 Label L_OuterLoop, L_InnerLoop, L_FinalCheck, L_Comp1, L_Comp2, L_Comp3;
aoqi@0 2724 bind(L_OuterLoop); // Search for 1st 2 characters.
aoqi@0 2725 Register addr_diff = tmp4;
aoqi@0 2726 subf(addr_diff, addr, last_addr); // Difference between already checked address and last address to check.
aoqi@0 2727 addi(addr, addr, 2); // This is the new address we want to use for comparing.
aoqi@0 2728 srdi_(ch2, addr_diff, 2);
aoqi@0 2729 beq(CCR0, L_FinalCheck); // 2 characters left?
aoqi@0 2730 mtctr(ch2); // addr_diff/4
aoqi@0 2731 //16:
aoqi@0 2732 bind(L_InnerLoop); // Main work horse (2x unrolled search loop)
aoqi@0 2733 lwz(ch1, 0, addr); // Load 2 characters of haystack (ignore alignment).
aoqi@0 2734 lwz(ch2, 2, addr);
aoqi@0 2735 cmpw(CCR0, ch1, n_start); // Compare 2 characters (1 would be sufficient but try to reduce branches to CompLoop).
aoqi@0 2736 cmpw(CCR1, ch2, n_start);
aoqi@0 2737 beq(CCR0, L_Comp1); // Did we find the needle start?
aoqi@0 2738 beq(CCR1, L_Comp2);
aoqi@0 2739 addi(addr, addr, 4);
aoqi@0 2740 bdnz(L_InnerLoop);
aoqi@0 2741 //24:
aoqi@0 2742 bind(L_FinalCheck);
aoqi@0 2743 rldicl_(addr_diff, addr_diff, 64-1, 63); // Remaining characters not covered by InnerLoop: (addr_diff>>1)&1.
aoqi@0 2744 beq(CCR0, L_NotFound);
aoqi@0 2745 lwz(ch1, 0, addr); // One position left at which we have to compare.
aoqi@0 2746 cmpw(CCR1, ch1, n_start);
aoqi@0 2747 beq(CCR1, L_Comp3);
aoqi@0 2748 //29:
aoqi@0 2749 bind(L_NotFound);
aoqi@0 2750 li(result, -1); // not found
aoqi@0 2751 b(L_End);
aoqi@0 2752
aoqi@0 2753
aoqi@0 2754 // **************************************************************************************************
aoqi@0 2755 // Special Case: unfortunately, the variable needle case can be called with needlecnt<2
aoqi@0 2756 // **************************************************************************************************
aoqi@0 2757 //31:
aoqi@0 2758 if ((needlecntval>>1) !=1 ) { // Const needlecnt is 2 or 3? Reduce code size.
aoqi@0 2759 int nopcnt = 5;
aoqi@0 2760 if (needlecntval !=0 ) ++nopcnt; // Balance alignment (other case: see below).
aoqi@0 2761 if (needlecntval == 0) { // We have to handle these cases separately.
aoqi@0 2762 Label L_OneCharLoop;
aoqi@0 2763 bind(L_TooShort);
aoqi@0 2764 mtctr(haycnt);
aoqi@0 2765 lhz(n_start, 0, needle); // First character of needle
aoqi@0 2766 bind(L_OneCharLoop);
aoqi@0 2767 lhzu(ch1, 2, addr);
aoqi@0 2768 cmpw(CCR1, ch1, n_start);
aoqi@0 2769 beq(CCR1, L_Found); // Did we find the one character needle?
aoqi@0 2770 bdnz(L_OneCharLoop);
aoqi@0 2771 li(result, -1); // Not found.
aoqi@0 2772 b(L_End);
aoqi@0 2773 } // 8 instructions, so no impact on alignment.
aoqi@0 2774 for (int x = 0; x < nopcnt; ++x) nop();
aoqi@0 2775 }
aoqi@0 2776
aoqi@0 2777 // **************************************************************************************************
aoqi@0 2778 // Regular Case Part II: compare rest of needle (first 2 characters have been compared already)
aoqi@0 2779 // **************************************************************************************************
aoqi@0 2780
aoqi@0 2781 // Compare the rest
aoqi@0 2782 //36 if needlecntval==0, else 37:
aoqi@0 2783 bind(L_Comp2);
aoqi@0 2784 addi(addr, addr, 2); // First comparison has failed, 2nd one hit.
aoqi@0 2785 bind(L_Comp1); // Addr points to possible needle start.
aoqi@0 2786 bind(L_Comp3); // Could have created a copy and use a different return address but saving code size here.
aoqi@0 2787 if (needlecntval != 2) { // Const needlecnt==2?
aoqi@0 2788 if (needlecntval != 3) {
aoqi@0 2789 if (needlecntval == 0) beq(CCR6, L_Found); // Variable needlecnt==2?
aoqi@0 2790 Register ind_reg = tmp4;
aoqi@0 2791 li(ind_reg, 2*2); // First 2 characters are already compared, use index 2.
aoqi@0 2792 mtctr(needlecnt); // Decremented by 2, still > 0.
aoqi@0 2793 //40:
aoqi@0 2794 Label L_CompLoop;
aoqi@0 2795 bind(L_CompLoop);
aoqi@0 2796 lhzx(ch2, needle, ind_reg);
aoqi@0 2797 lhzx(ch1, addr, ind_reg);
aoqi@0 2798 cmpw(CCR1, ch1, ch2);
aoqi@0 2799 bne(CCR1, L_OuterLoop);
aoqi@0 2800 addi(ind_reg, ind_reg, 2);
aoqi@0 2801 bdnz(L_CompLoop);
aoqi@0 2802 } else { // No loop required if there's only one needle character left.
aoqi@0 2803 lhz(ch2, 2*2, needle);
aoqi@0 2804 lhz(ch1, 2*2, addr);
aoqi@0 2805 cmpw(CCR1, ch1, ch2);
aoqi@0 2806 bne(CCR1, L_OuterLoop);
aoqi@0 2807 }
aoqi@0 2808 }
aoqi@0 2809 // Return index ...
aoqi@0 2810 //46:
aoqi@0 2811 bind(L_Found);
aoqi@0 2812 subf(addr, haystack, addr); // relative to haystack, ...
aoqi@0 2813 srdi(result, addr, 1); // in characters.
aoqi@0 2814 //48:
aoqi@0 2815 bind(L_End);
aoqi@0 2816 }
aoqi@0 2817
aoqi@0 2818 // Implementation of Compare for jchar arrays.
aoqi@0 2819 //
aoqi@0 2820 // Kills the registers str1, str2, cnt1, cnt2.
aoqi@0 2821 // Kills cr0, ctr.
aoqi@0 2822 // Assumes that result differes from the input registers.
aoqi@0 2823 void MacroAssembler::string_compare(Register str1_reg, Register str2_reg, Register cnt1_reg, Register cnt2_reg,
aoqi@0 2824 Register result_reg, Register tmp_reg) {
aoqi@0 2825 assert_different_registers(result_reg, str1_reg, str2_reg, cnt1_reg, cnt2_reg, tmp_reg);
aoqi@0 2826
aoqi@0 2827 Label Ldone, Lslow_case, Lslow_loop, Lfast_loop;
aoqi@0 2828 Register cnt_diff = R0,
aoqi@0 2829 limit_reg = cnt1_reg,
aoqi@0 2830 chr1_reg = result_reg,
aoqi@0 2831 chr2_reg = cnt2_reg,
aoqi@0 2832 addr_diff = str2_reg;
aoqi@0 2833
aoqi@0 2834 // Offset 0 should be 32 byte aligned.
aoqi@0 2835 //-4:
aoqi@0 2836 dcbtct(str1_reg, 0x00); // Indicate R/O access to str1.
aoqi@0 2837 dcbtct(str2_reg, 0x00); // Indicate R/O access to str2.
aoqi@0 2838 //-2:
aoqi@0 2839 // Compute min(cnt1, cnt2) and check if 0 (bail out if we don't need to compare characters).
aoqi@0 2840 subf(result_reg, cnt2_reg, cnt1_reg); // difference between cnt1/2
aoqi@0 2841 subf_(addr_diff, str1_reg, str2_reg); // alias?
aoqi@0 2842 beq(CCR0, Ldone); // return cnt difference if both ones are identical
aoqi@0 2843 srawi(limit_reg, result_reg, 31); // generate signmask (cnt1/2 must be non-negative so cnt_diff can't overflow)
aoqi@0 2844 mr(cnt_diff, result_reg);
aoqi@0 2845 andr(limit_reg, result_reg, limit_reg); // difference or zero (negative): cnt1<cnt2 ? cnt1-cnt2 : 0
aoqi@0 2846 add_(limit_reg, cnt2_reg, limit_reg); // min(cnt1, cnt2)==0?
aoqi@0 2847 beq(CCR0, Ldone); // return cnt difference if one has 0 length
aoqi@0 2848
aoqi@0 2849 lhz(chr1_reg, 0, str1_reg); // optional: early out if first characters mismatch
aoqi@0 2850 lhzx(chr2_reg, str1_reg, addr_diff); // optional: early out if first characters mismatch
aoqi@0 2851 addi(tmp_reg, limit_reg, -1); // min(cnt1, cnt2)-1
aoqi@0 2852 subf_(result_reg, chr2_reg, chr1_reg); // optional: early out if first characters mismatch
aoqi@0 2853 bne(CCR0, Ldone); // optional: early out if first characters mismatch
aoqi@0 2854
aoqi@0 2855 // Set loop counter by scaling down tmp_reg
aoqi@0 2856 srawi_(chr2_reg, tmp_reg, exact_log2(4)); // (min(cnt1, cnt2)-1)/4
aoqi@0 2857 ble(CCR0, Lslow_case); // need >4 characters for fast loop
aoqi@0 2858 andi(limit_reg, tmp_reg, 4-1); // remaining characters
aoqi@0 2859
aoqi@0 2860 // Adapt str1_reg str2_reg for the first loop iteration
aoqi@0 2861 mtctr(chr2_reg); // (min(cnt1, cnt2)-1)/4
aoqi@0 2862 addi(limit_reg, limit_reg, 4+1); // compare last 5-8 characters in slow_case if mismatch found in fast_loop
aoqi@0 2863 //16:
aoqi@0 2864 // Compare the rest of the characters
aoqi@0 2865 bind(Lfast_loop);
aoqi@0 2866 ld(chr1_reg, 0, str1_reg);
aoqi@0 2867 ldx(chr2_reg, str1_reg, addr_diff);
aoqi@0 2868 cmpd(CCR0, chr2_reg, chr1_reg);
aoqi@0 2869 bne(CCR0, Lslow_case); // return chr1_reg
aoqi@0 2870 addi(str1_reg, str1_reg, 4*2);
aoqi@0 2871 bdnz(Lfast_loop);
aoqi@0 2872 addi(limit_reg, limit_reg, -4); // no mismatch found in fast_loop, only 1-4 characters missing
aoqi@0 2873 //23:
aoqi@0 2874 bind(Lslow_case);
aoqi@0 2875 mtctr(limit_reg);
aoqi@0 2876 //24:
aoqi@0 2877 bind(Lslow_loop);
aoqi@0 2878 lhz(chr1_reg, 0, str1_reg);
aoqi@0 2879 lhzx(chr2_reg, str1_reg, addr_diff);
aoqi@0 2880 subf_(result_reg, chr2_reg, chr1_reg);
aoqi@0 2881 bne(CCR0, Ldone); // return chr1_reg
aoqi@0 2882 addi(str1_reg, str1_reg, 1*2);
aoqi@0 2883 bdnz(Lslow_loop);
aoqi@0 2884 //30:
aoqi@0 2885 // If strings are equal up to min length, return the length difference.
aoqi@0 2886 mr(result_reg, cnt_diff);
aoqi@0 2887 nop(); // alignment
aoqi@0 2888 //32:
aoqi@0 2889 // Otherwise, return the difference between the first mismatched chars.
aoqi@0 2890 bind(Ldone);
aoqi@0 2891 }
aoqi@0 2892
aoqi@0 2893
aoqi@0 2894 // Compare char[] arrays.
aoqi@0 2895 //
aoqi@0 2896 // str1_reg USE only
aoqi@0 2897 // str2_reg USE only
aoqi@0 2898 // cnt_reg USE_DEF, due to tmp reg shortage
aoqi@0 2899 // result_reg DEF only, might compromise USE only registers
aoqi@0 2900 void MacroAssembler::char_arrays_equals(Register str1_reg, Register str2_reg, Register cnt_reg, Register result_reg,
aoqi@0 2901 Register tmp1_reg, Register tmp2_reg, Register tmp3_reg, Register tmp4_reg,
aoqi@0 2902 Register tmp5_reg) {
aoqi@0 2903
aoqi@0 2904 // Str1 may be the same register as str2 which can occur e.g. after scalar replacement.
aoqi@0 2905 assert_different_registers(result_reg, str1_reg, cnt_reg, tmp1_reg, tmp2_reg, tmp3_reg, tmp4_reg, tmp5_reg);
aoqi@0 2906 assert_different_registers(result_reg, str2_reg, cnt_reg, tmp1_reg, tmp2_reg, tmp3_reg, tmp4_reg, tmp5_reg);
aoqi@0 2907
aoqi@0 2908 // Offset 0 should be 32 byte aligned.
aoqi@0 2909 Label Linit_cbc, Lcbc, Lloop, Ldone_true, Ldone_false;
aoqi@0 2910 Register index_reg = tmp5_reg;
aoqi@0 2911 Register cbc_iter = tmp4_reg;
aoqi@0 2912
aoqi@0 2913 //-1:
aoqi@0 2914 dcbtct(str1_reg, 0x00); // Indicate R/O access to str1.
aoqi@0 2915 dcbtct(str2_reg, 0x00); // Indicate R/O access to str2.
aoqi@0 2916 //1:
aoqi@0 2917 andi(cbc_iter, cnt_reg, 4-1); // Remaining iterations after 4 java characters per iteration loop.
aoqi@0 2918 li(index_reg, 0); // init
aoqi@0 2919 li(result_reg, 0); // assume false
aoqi@0 2920 srwi_(tmp2_reg, cnt_reg, exact_log2(4)); // Div: 4 java characters per iteration (main loop).
aoqi@0 2921
aoqi@0 2922 cmpwi(CCR1, cbc_iter, 0); // CCR1 = (cbc_iter==0)
aoqi@0 2923 beq(CCR0, Linit_cbc); // too short
aoqi@0 2924 mtctr(tmp2_reg);
aoqi@0 2925 //8:
aoqi@0 2926 bind(Lloop);
aoqi@0 2927 ldx(tmp1_reg, str1_reg, index_reg);
aoqi@0 2928 ldx(tmp2_reg, str2_reg, index_reg);
aoqi@0 2929 cmpd(CCR0, tmp1_reg, tmp2_reg);
aoqi@0 2930 bne(CCR0, Ldone_false); // Unequal char pair found -> done.
aoqi@0 2931 addi(index_reg, index_reg, 4*sizeof(jchar));
aoqi@0 2932 bdnz(Lloop);
aoqi@0 2933 //14:
aoqi@0 2934 bind(Linit_cbc);
aoqi@0 2935 beq(CCR1, Ldone_true);
aoqi@0 2936 mtctr(cbc_iter);
aoqi@0 2937 //16:
aoqi@0 2938 bind(Lcbc);
aoqi@0 2939 lhzx(tmp1_reg, str1_reg, index_reg);
aoqi@0 2940 lhzx(tmp2_reg, str2_reg, index_reg);
aoqi@0 2941 cmpw(CCR0, tmp1_reg, tmp2_reg);
aoqi@0 2942 bne(CCR0, Ldone_false); // Unequal char pair found -> done.
aoqi@0 2943 addi(index_reg, index_reg, 1*sizeof(jchar));
aoqi@0 2944 bdnz(Lcbc);
aoqi@0 2945 nop();
aoqi@0 2946 bind(Ldone_true);
aoqi@0 2947 li(result_reg, 1);
aoqi@0 2948 //24:
aoqi@0 2949 bind(Ldone_false);
aoqi@0 2950 }
aoqi@0 2951
aoqi@0 2952
aoqi@0 2953 void MacroAssembler::char_arrays_equalsImm(Register str1_reg, Register str2_reg, int cntval, Register result_reg,
aoqi@0 2954 Register tmp1_reg, Register tmp2_reg) {
aoqi@0 2955 // Str1 may be the same register as str2 which can occur e.g. after scalar replacement.
aoqi@0 2956 assert_different_registers(result_reg, str1_reg, tmp1_reg, tmp2_reg);
aoqi@0 2957 assert_different_registers(result_reg, str2_reg, tmp1_reg, tmp2_reg);
aoqi@0 2958 assert(sizeof(jchar) == 2, "must be");
aoqi@0 2959 assert(cntval >= 0 && ((cntval & 0x7fff) == cntval), "wrong immediate");
aoqi@0 2960
aoqi@0 2961 Label Ldone_false;
aoqi@0 2962
aoqi@0 2963 if (cntval < 16) { // short case
aoqi@0 2964 if (cntval != 0) li(result_reg, 0); // assume false
aoqi@0 2965
aoqi@0 2966 const int num_bytes = cntval*sizeof(jchar);
aoqi@0 2967 int index = 0;
aoqi@0 2968 for (int next_index; (next_index = index + 8) <= num_bytes; index = next_index) {
aoqi@0 2969 ld(tmp1_reg, index, str1_reg);
aoqi@0 2970 ld(tmp2_reg, index, str2_reg);
aoqi@0 2971 cmpd(CCR0, tmp1_reg, tmp2_reg);
aoqi@0 2972 bne(CCR0, Ldone_false);
aoqi@0 2973 }
aoqi@0 2974 if (cntval & 2) {
aoqi@0 2975 lwz(tmp1_reg, index, str1_reg);
aoqi@0 2976 lwz(tmp2_reg, index, str2_reg);
aoqi@0 2977 cmpw(CCR0, tmp1_reg, tmp2_reg);
aoqi@0 2978 bne(CCR0, Ldone_false);
aoqi@0 2979 index += 4;
aoqi@0 2980 }
aoqi@0 2981 if (cntval & 1) {
aoqi@0 2982 lhz(tmp1_reg, index, str1_reg);
aoqi@0 2983 lhz(tmp2_reg, index, str2_reg);
aoqi@0 2984 cmpw(CCR0, tmp1_reg, tmp2_reg);
aoqi@0 2985 bne(CCR0, Ldone_false);
aoqi@0 2986 }
aoqi@0 2987 // fallthrough: true
aoqi@0 2988 } else {
aoqi@0 2989 Label Lloop;
aoqi@0 2990 Register index_reg = tmp1_reg;
aoqi@0 2991 const int loopcnt = cntval/4;
aoqi@0 2992 assert(loopcnt > 0, "must be");
aoqi@0 2993 // Offset 0 should be 32 byte aligned.
aoqi@0 2994 //2:
aoqi@0 2995 dcbtct(str1_reg, 0x00); // Indicate R/O access to str1.
aoqi@0 2996 dcbtct(str2_reg, 0x00); // Indicate R/O access to str2.
aoqi@0 2997 li(tmp2_reg, loopcnt);
aoqi@0 2998 li(index_reg, 0); // init
aoqi@0 2999 li(result_reg, 0); // assume false
aoqi@0 3000 mtctr(tmp2_reg);
aoqi@0 3001 //8:
aoqi@0 3002 bind(Lloop);
aoqi@0 3003 ldx(R0, str1_reg, index_reg);
aoqi@0 3004 ldx(tmp2_reg, str2_reg, index_reg);
aoqi@0 3005 cmpd(CCR0, R0, tmp2_reg);
aoqi@0 3006 bne(CCR0, Ldone_false); // Unequal char pair found -> done.
aoqi@0 3007 addi(index_reg, index_reg, 4*sizeof(jchar));
aoqi@0 3008 bdnz(Lloop);
aoqi@0 3009 //14:
aoqi@0 3010 if (cntval & 2) {
aoqi@0 3011 lwzx(R0, str1_reg, index_reg);
aoqi@0 3012 lwzx(tmp2_reg, str2_reg, index_reg);
aoqi@0 3013 cmpw(CCR0, R0, tmp2_reg);
aoqi@0 3014 bne(CCR0, Ldone_false);
aoqi@0 3015 if (cntval & 1) addi(index_reg, index_reg, 2*sizeof(jchar));
aoqi@0 3016 }
aoqi@0 3017 if (cntval & 1) {
aoqi@0 3018 lhzx(R0, str1_reg, index_reg);
aoqi@0 3019 lhzx(tmp2_reg, str2_reg, index_reg);
aoqi@0 3020 cmpw(CCR0, R0, tmp2_reg);
aoqi@0 3021 bne(CCR0, Ldone_false);
aoqi@0 3022 }
aoqi@0 3023 // fallthru: true
aoqi@0 3024 }
aoqi@0 3025 li(result_reg, 1);
aoqi@0 3026 bind(Ldone_false);
aoqi@0 3027 }
aoqi@0 3028
aoqi@0 3029
aoqi@0 3030 void MacroAssembler::asm_assert(bool check_equal, const char *msg, int id) {
aoqi@0 3031 #ifdef ASSERT
aoqi@0 3032 Label ok;
aoqi@0 3033 if (check_equal) {
aoqi@0 3034 beq(CCR0, ok);
aoqi@0 3035 } else {
aoqi@0 3036 bne(CCR0, ok);
aoqi@0 3037 }
aoqi@0 3038 stop(msg, id);
aoqi@0 3039 bind(ok);
aoqi@0 3040 #endif
aoqi@0 3041 }
aoqi@0 3042
aoqi@0 3043 void MacroAssembler::asm_assert_mems_zero(bool check_equal, int size, int mem_offset,
aoqi@0 3044 Register mem_base, const char* msg, int id) {
aoqi@0 3045 #ifdef ASSERT
aoqi@0 3046 switch (size) {
aoqi@0 3047 case 4:
aoqi@0 3048 lwz(R0, mem_offset, mem_base);
aoqi@0 3049 cmpwi(CCR0, R0, 0);
aoqi@0 3050 break;
aoqi@0 3051 case 8:
aoqi@0 3052 ld(R0, mem_offset, mem_base);
aoqi@0 3053 cmpdi(CCR0, R0, 0);
aoqi@0 3054 break;
aoqi@0 3055 default:
aoqi@0 3056 ShouldNotReachHere();
aoqi@0 3057 }
aoqi@0 3058 asm_assert(check_equal, msg, id);
aoqi@0 3059 #endif // ASSERT
aoqi@0 3060 }
aoqi@0 3061
aoqi@0 3062 void MacroAssembler::verify_thread() {
aoqi@0 3063 if (VerifyThread) {
aoqi@0 3064 unimplemented("'VerifyThread' currently not implemented on PPC");
aoqi@0 3065 }
aoqi@0 3066 }
aoqi@0 3067
aoqi@0 3068 // READ: oop. KILL: R0. Volatile floats perhaps.
aoqi@0 3069 void MacroAssembler::verify_oop(Register oop, const char* msg) {
aoqi@0 3070 if (!VerifyOops) {
aoqi@0 3071 return;
aoqi@0 3072 }
goetz@7424 3073
aoqi@0 3074 address/* FunctionDescriptor** */fd = StubRoutines::verify_oop_subroutine_entry_address();
goetz@7424 3075 const Register tmp = R11; // Will be preserved.
goetz@7424 3076 const int nbytes_save = 11*8; // Volatile gprs except R0.
goetz@7424 3077 save_volatile_gprs(R1_SP, -nbytes_save); // except R0
goetz@7424 3078
goetz@7424 3079 if (oop == tmp) mr(R4_ARG2, oop);
goetz@7424 3080 save_LR_CR(tmp); // save in old frame
aoqi@0 3081 push_frame_reg_args(nbytes_save, tmp);
aoqi@0 3082 // load FunctionDescriptor** / entry_address *
goetz@7424 3083 load_const_optimized(tmp, fd, R0);
aoqi@0 3084 // load FunctionDescriptor* / entry_address
aoqi@0 3085 ld(tmp, 0, tmp);
goetz@7424 3086 if (oop != tmp) mr_if_needed(R4_ARG2, oop);
goetz@7424 3087 load_const_optimized(R3_ARG1, (address)msg, R0);
goetz@7424 3088 // Call destination for its side effect.
aoqi@0 3089 call_c(tmp);
goetz@7424 3090
aoqi@0 3091 pop_frame();
aoqi@0 3092 restore_LR_CR(tmp);
goetz@7424 3093 restore_volatile_gprs(R1_SP, -nbytes_save); // except R0
aoqi@0 3094 }
aoqi@0 3095
aoqi@0 3096 const char* stop_types[] = {
aoqi@0 3097 "stop",
aoqi@0 3098 "untested",
aoqi@0 3099 "unimplemented",
aoqi@0 3100 "shouldnotreachhere"
aoqi@0 3101 };
aoqi@0 3102
aoqi@0 3103 static void stop_on_request(int tp, const char* msg) {
coleenp@7358 3104 tty->print("PPC assembly code requires stop: (%s) %s\n", stop_types[tp%/*stop_end*/4], msg);
aoqi@0 3105 guarantee(false, err_msg("PPC assembly code requires stop: %s", msg));
aoqi@0 3106 }
aoqi@0 3107
aoqi@0 3108 // Call a C-function that prints output.
aoqi@0 3109 void MacroAssembler::stop(int type, const char* msg, int id) {
aoqi@0 3110 #ifndef PRODUCT
aoqi@0 3111 block_comment(err_msg("stop: %s %s {", stop_types[type%stop_end], msg));
aoqi@0 3112 #else
aoqi@0 3113 block_comment("stop {");
aoqi@0 3114 #endif
aoqi@0 3115
aoqi@0 3116 // setup arguments
aoqi@0 3117 load_const_optimized(R3_ARG1, type);
aoqi@0 3118 load_const_optimized(R4_ARG2, (void *)msg, /*tmp=*/R0);
aoqi@0 3119 call_VM_leaf(CAST_FROM_FN_PTR(address, stop_on_request), R3_ARG1, R4_ARG2);
aoqi@0 3120 illtrap();
aoqi@0 3121 emit_int32(id);
aoqi@0 3122 block_comment("} stop;");
aoqi@0 3123 }
aoqi@0 3124
aoqi@0 3125 #ifndef PRODUCT
aoqi@0 3126 // Write pattern 0x0101010101010101 in memory region [low-before, high+after].
aoqi@0 3127 // Val, addr are temp registers.
aoqi@0 3128 // If low == addr, addr is killed.
aoqi@0 3129 // High is preserved.
aoqi@0 3130 void MacroAssembler::zap_from_to(Register low, int before, Register high, int after, Register val, Register addr) {
aoqi@0 3131 if (!ZapMemory) return;
aoqi@0 3132
aoqi@0 3133 assert_different_registers(low, val);
aoqi@0 3134
aoqi@0 3135 BLOCK_COMMENT("zap memory region {");
aoqi@0 3136 load_const_optimized(val, 0x0101010101010101);
aoqi@0 3137 int size = before + after;
aoqi@0 3138 if (low == high && size < 5 && size > 0) {
aoqi@0 3139 int offset = -before*BytesPerWord;
aoqi@0 3140 for (int i = 0; i < size; ++i) {
aoqi@0 3141 std(val, offset, low);
aoqi@0 3142 offset += (1*BytesPerWord);
aoqi@0 3143 }
aoqi@0 3144 } else {
aoqi@0 3145 addi(addr, low, -before*BytesPerWord);
aoqi@0 3146 assert_different_registers(high, val);
aoqi@0 3147 if (after) addi(high, high, after * BytesPerWord);
aoqi@0 3148 Label loop;
aoqi@0 3149 bind(loop);
aoqi@0 3150 std(val, 0, addr);
aoqi@0 3151 addi(addr, addr, 8);
aoqi@0 3152 cmpd(CCR6, addr, high);
aoqi@0 3153 ble(CCR6, loop);
aoqi@0 3154 if (after) addi(high, high, -after * BytesPerWord); // Correct back to old value.
aoqi@0 3155 }
aoqi@0 3156 BLOCK_COMMENT("} zap memory region");
aoqi@0 3157 }
aoqi@0 3158
aoqi@0 3159 #endif // !PRODUCT
aoqi@0 3160
aoqi@0 3161 SkipIfEqualZero::SkipIfEqualZero(MacroAssembler* masm, Register temp, const bool* flag_addr) : _masm(masm), _label() {
aoqi@0 3162 int simm16_offset = masm->load_const_optimized(temp, (address)flag_addr, R0, true);
aoqi@0 3163 assert(sizeof(bool) == 1, "PowerPC ABI");
aoqi@0 3164 masm->lbz(temp, simm16_offset, temp);
aoqi@0 3165 masm->cmpwi(CCR0, temp, 0);
aoqi@0 3166 masm->beq(CCR0, _label);
aoqi@0 3167 }
aoqi@0 3168
aoqi@0 3169 SkipIfEqualZero::~SkipIfEqualZero() {
aoqi@0 3170 _masm->bind(_label);
aoqi@0 3171 }

mercurial