src/cpu/ppc/vm/macroAssembler_ppc.cpp

Tue, 07 May 2019 20:38:26 +0000

author
phh
date
Tue, 07 May 2019 20:38:26 +0000
changeset 9669
32bc598624bd
parent 9603
6ce4101edc7a
child 9703
2fdf635bcf28
permissions
-rw-r--r--

8176100: [REDO][REDO] G1 Needs pre barrier on dereference of weak JNI handles
Summary: Add tag bit to all JNI weak handles
Reviewed-by: kbarrett, coleenp, tschatzl

goetz@6458 1 /*
phh@9669 2 * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
phh@9669 3 * Copyright (c) 2012, 2017, SAP SE. All rights reserved.
goetz@6458 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
goetz@6458 5 *
goetz@6458 6 * This code is free software; you can redistribute it and/or modify it
goetz@6458 7 * under the terms of the GNU General Public License version 2 only, as
goetz@6458 8 * published by the Free Software Foundation.
goetz@6458 9 *
goetz@6458 10 * This code is distributed in the hope that it will be useful, but WITHOUT
goetz@6458 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
goetz@6458 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
goetz@6458 13 * version 2 for more details (a copy is included in the LICENSE file that
goetz@6458 14 * accompanied this code).
goetz@6458 15 *
goetz@6458 16 * You should have received a copy of the GNU General Public License version
goetz@6458 17 * 2 along with this work; if not, write to the Free Software Foundation,
goetz@6458 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
goetz@6458 19 *
goetz@6458 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
goetz@6458 21 * or visit www.oracle.com if you need additional information or have any
goetz@6458 22 * questions.
goetz@6458 23 *
goetz@6458 24 */
goetz@6458 25
goetz@6458 26 #include "precompiled.hpp"
goetz@6458 27 #include "asm/macroAssembler.inline.hpp"
goetz@6458 28 #include "compiler/disassembler.hpp"
goetz@6458 29 #include "gc_interface/collectedHeap.inline.hpp"
goetz@6458 30 #include "interpreter/interpreter.hpp"
goetz@6458 31 #include "memory/cardTableModRefBS.hpp"
goetz@6458 32 #include "memory/resourceArea.hpp"
goetz@6458 33 #include "prims/methodHandles.hpp"
goetz@6458 34 #include "runtime/biasedLocking.hpp"
goetz@6458 35 #include "runtime/interfaceSupport.hpp"
goetz@6458 36 #include "runtime/objectMonitor.hpp"
goetz@6458 37 #include "runtime/os.hpp"
goetz@6458 38 #include "runtime/sharedRuntime.hpp"
goetz@6458 39 #include "runtime/stubRoutines.hpp"
goetz@6458 40 #include "utilities/macros.hpp"
goetz@6458 41 #if INCLUDE_ALL_GCS
goetz@6458 42 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
goetz@6458 43 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
goetz@6458 44 #include "gc_implementation/g1/heapRegion.hpp"
goetz@6458 45 #endif // INCLUDE_ALL_GCS
goetz@6458 46
goetz@6458 47 #ifdef PRODUCT
goetz@6458 48 #define BLOCK_COMMENT(str) // nothing
goetz@6458 49 #else
goetz@6458 50 #define BLOCK_COMMENT(str) block_comment(str)
goetz@6458 51 #endif
gromero@9496 52 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
goetz@6458 53
goetz@6458 54 #ifdef ASSERT
goetz@6458 55 // On RISC, there's no benefit to verifying instruction boundaries.
goetz@6458 56 bool AbstractAssembler::pd_check_instruction_mark() { return false; }
goetz@6458 57 #endif
goetz@6458 58
goetz@6458 59 void MacroAssembler::ld_largeoffset_unchecked(Register d, int si31, Register a, int emit_filler_nop) {
goetz@6458 60 assert(Assembler::is_simm(si31, 31) && si31 >= 0, "si31 out of range");
goetz@6458 61 if (Assembler::is_simm(si31, 16)) {
goetz@6458 62 ld(d, si31, a);
goetz@6458 63 if (emit_filler_nop) nop();
goetz@6458 64 } else {
goetz@6458 65 const int hi = MacroAssembler::largeoffset_si16_si16_hi(si31);
goetz@6458 66 const int lo = MacroAssembler::largeoffset_si16_si16_lo(si31);
goetz@6458 67 addis(d, a, hi);
goetz@6458 68 ld(d, lo, d);
goetz@6458 69 }
goetz@6458 70 }
goetz@6458 71
goetz@6458 72 void MacroAssembler::ld_largeoffset(Register d, int si31, Register a, int emit_filler_nop) {
goetz@6458 73 assert_different_registers(d, a);
goetz@6458 74 ld_largeoffset_unchecked(d, si31, a, emit_filler_nop);
goetz@6458 75 }
goetz@6458 76
goetz@6458 77 void MacroAssembler::load_sized_value(Register dst, RegisterOrConstant offs, Register base,
goetz@6458 78 size_t size_in_bytes, bool is_signed) {
goetz@6458 79 switch (size_in_bytes) {
goetz@6458 80 case 8: ld(dst, offs, base); break;
goetz@6458 81 case 4: is_signed ? lwa(dst, offs, base) : lwz(dst, offs, base); break;
goetz@6458 82 case 2: is_signed ? lha(dst, offs, base) : lhz(dst, offs, base); break;
goetz@6458 83 case 1: lbz(dst, offs, base); if (is_signed) extsb(dst, dst); break; // lba doesn't exist :(
goetz@6458 84 default: ShouldNotReachHere();
goetz@6458 85 }
goetz@6458 86 }
goetz@6458 87
goetz@6458 88 void MacroAssembler::store_sized_value(Register dst, RegisterOrConstant offs, Register base,
goetz@6458 89 size_t size_in_bytes) {
goetz@6458 90 switch (size_in_bytes) {
goetz@6458 91 case 8: std(dst, offs, base); break;
goetz@6458 92 case 4: stw(dst, offs, base); break;
goetz@6458 93 case 2: sth(dst, offs, base); break;
goetz@6458 94 case 1: stb(dst, offs, base); break;
goetz@6458 95 default: ShouldNotReachHere();
goetz@6458 96 }
goetz@6458 97 }
goetz@6458 98
goetz@6495 99 void MacroAssembler::align(int modulus, int max, int rem) {
goetz@6495 100 int padding = (rem + modulus - (offset() % modulus)) % modulus;
goetz@6495 101 if (padding > max) return;
goetz@6495 102 for (int c = (padding >> 2); c > 0; --c) { nop(); }
goetz@6458 103 }
goetz@6458 104
goetz@6458 105 // Issue instructions that calculate given TOC from global TOC.
goetz@6458 106 void MacroAssembler::calculate_address_from_global_toc(Register dst, address addr, bool hi16, bool lo16,
goetz@6458 107 bool add_relocation, bool emit_dummy_addr) {
goetz@6458 108 int offset = -1;
goetz@6458 109 if (emit_dummy_addr) {
goetz@6458 110 offset = -128; // dummy address
goetz@6458 111 } else if (addr != (address)(intptr_t)-1) {
goetz@6458 112 offset = MacroAssembler::offset_to_global_toc(addr);
goetz@6458 113 }
goetz@6458 114
goetz@6458 115 if (hi16) {
goetz@6458 116 addis(dst, R29, MacroAssembler::largeoffset_si16_si16_hi(offset));
goetz@6458 117 }
goetz@6458 118 if (lo16) {
goetz@6458 119 if (add_relocation) {
goetz@6458 120 // Relocate at the addi to avoid confusion with a load from the method's TOC.
goetz@6458 121 relocate(internal_word_Relocation::spec(addr));
goetz@6458 122 }
goetz@6458 123 addi(dst, dst, MacroAssembler::largeoffset_si16_si16_lo(offset));
goetz@6458 124 }
goetz@6458 125 }
goetz@6458 126
goetz@6458 127 int MacroAssembler::patch_calculate_address_from_global_toc_at(address a, address bound, address addr) {
goetz@6458 128 const int offset = MacroAssembler::offset_to_global_toc(addr);
goetz@6458 129
goetz@6458 130 const address inst2_addr = a;
goetz@6458 131 const int inst2 = *(int *)inst2_addr;
goetz@6458 132
goetz@6458 133 // The relocation points to the second instruction, the addi,
goetz@6458 134 // and the addi reads and writes the same register dst.
goetz@6458 135 const int dst = inv_rt_field(inst2);
goetz@6458 136 assert(is_addi(inst2) && inv_ra_field(inst2) == dst, "must be addi reading and writing dst");
goetz@6458 137
goetz@6458 138 // Now, find the preceding addis which writes to dst.
goetz@6458 139 int inst1 = 0;
goetz@6458 140 address inst1_addr = inst2_addr - BytesPerInstWord;
goetz@6458 141 while (inst1_addr >= bound) {
goetz@6458 142 inst1 = *(int *) inst1_addr;
goetz@6458 143 if (is_addis(inst1) && inv_rt_field(inst1) == dst) {
goetz@6458 144 // Stop, found the addis which writes dst.
goetz@6458 145 break;
goetz@6458 146 }
goetz@6458 147 inst1_addr -= BytesPerInstWord;
goetz@6458 148 }
goetz@6458 149
goetz@6458 150 assert(is_addis(inst1) && inv_ra_field(inst1) == 29 /* R29 */, "source must be global TOC");
goetz@6458 151 set_imm((int *)inst1_addr, MacroAssembler::largeoffset_si16_si16_hi(offset));
goetz@6458 152 set_imm((int *)inst2_addr, MacroAssembler::largeoffset_si16_si16_lo(offset));
goetz@6458 153 return (int)((intptr_t)addr - (intptr_t)inst1_addr);
goetz@6458 154 }
goetz@6458 155
goetz@6458 156 address MacroAssembler::get_address_of_calculate_address_from_global_toc_at(address a, address bound) {
goetz@6458 157 const address inst2_addr = a;
goetz@6458 158 const int inst2 = *(int *)inst2_addr;
goetz@6458 159
goetz@6458 160 // The relocation points to the second instruction, the addi,
goetz@6458 161 // and the addi reads and writes the same register dst.
goetz@6458 162 const int dst = inv_rt_field(inst2);
goetz@6458 163 assert(is_addi(inst2) && inv_ra_field(inst2) == dst, "must be addi reading and writing dst");
goetz@6458 164
goetz@6458 165 // Now, find the preceding addis which writes to dst.
goetz@6458 166 int inst1 = 0;
goetz@6458 167 address inst1_addr = inst2_addr - BytesPerInstWord;
goetz@6458 168 while (inst1_addr >= bound) {
goetz@6458 169 inst1 = *(int *) inst1_addr;
goetz@6458 170 if (is_addis(inst1) && inv_rt_field(inst1) == dst) {
goetz@6458 171 // stop, found the addis which writes dst
goetz@6458 172 break;
goetz@6458 173 }
goetz@6458 174 inst1_addr -= BytesPerInstWord;
goetz@6458 175 }
goetz@6458 176
goetz@6458 177 assert(is_addis(inst1) && inv_ra_field(inst1) == 29 /* R29 */, "source must be global TOC");
goetz@6458 178
goetz@6458 179 int offset = (get_imm(inst1_addr, 0) << 16) + get_imm(inst2_addr, 0);
goetz@6458 180 // -1 is a special case
goetz@6458 181 if (offset == -1) {
goetz@6458 182 return (address)(intptr_t)-1;
goetz@6458 183 } else {
goetz@6458 184 return global_toc() + offset;
goetz@6458 185 }
goetz@6458 186 }
goetz@6458 187
goetz@6458 188 #ifdef _LP64
goetz@6458 189 // Patch compressed oops or klass constants.
goetz@6495 190 // Assembler sequence is
goetz@6495 191 // 1) compressed oops:
goetz@6495 192 // lis rx = const.hi
goetz@6495 193 // ori rx = rx | const.lo
goetz@6495 194 // 2) compressed klass:
goetz@6495 195 // lis rx = const.hi
goetz@6495 196 // clrldi rx = rx & 0xFFFFffff // clearMS32b, optional
goetz@6495 197 // ori rx = rx | const.lo
goetz@6495 198 // Clrldi will be passed by.
goetz@6458 199 int MacroAssembler::patch_set_narrow_oop(address a, address bound, narrowOop data) {
goetz@6458 200 assert(UseCompressedOops, "Should only patch compressed oops");
goetz@6458 201
goetz@6458 202 const address inst2_addr = a;
goetz@6458 203 const int inst2 = *(int *)inst2_addr;
goetz@6458 204
goetz@6495 205 // The relocation points to the second instruction, the ori,
goetz@6495 206 // and the ori reads and writes the same register dst.
goetz@6495 207 const int dst = inv_rta_field(inst2);
goetz@6501 208 assert(is_ori(inst2) && inv_rs_field(inst2) == dst, "must be ori reading and writing dst");
goetz@6458 209 // Now, find the preceding addis which writes to dst.
goetz@6458 210 int inst1 = 0;
goetz@6458 211 address inst1_addr = inst2_addr - BytesPerInstWord;
goetz@6458 212 bool inst1_found = false;
goetz@6458 213 while (inst1_addr >= bound) {
goetz@6458 214 inst1 = *(int *)inst1_addr;
goetz@6458 215 if (is_lis(inst1) && inv_rs_field(inst1) == dst) { inst1_found = true; break; }
goetz@6458 216 inst1_addr -= BytesPerInstWord;
goetz@6458 217 }
goetz@6458 218 assert(inst1_found, "inst is not lis");
goetz@6458 219
goetz@6458 220 int xc = (data >> 16) & 0xffff;
goetz@6458 221 int xd = (data >> 0) & 0xffff;
goetz@6458 222
goetz@6495 223 set_imm((int *)inst1_addr, (short)(xc)); // see enc_load_con_narrow_hi/_lo
goetz@6501 224 set_imm((int *)inst2_addr, (xd)); // unsigned int
goetz@6458 225 return (int)((intptr_t)inst2_addr - (intptr_t)inst1_addr);
goetz@6458 226 }
goetz@6458 227
goetz@6458 228 // Get compressed oop or klass constant.
goetz@6458 229 narrowOop MacroAssembler::get_narrow_oop(address a, address bound) {
goetz@6458 230 assert(UseCompressedOops, "Should only patch compressed oops");
goetz@6458 231
goetz@6458 232 const address inst2_addr = a;
goetz@6458 233 const int inst2 = *(int *)inst2_addr;
goetz@6458 234
goetz@6495 235 // The relocation points to the second instruction, the ori,
goetz@6495 236 // and the ori reads and writes the same register dst.
goetz@6495 237 const int dst = inv_rta_field(inst2);
goetz@6501 238 assert(is_ori(inst2) && inv_rs_field(inst2) == dst, "must be ori reading and writing dst");
goetz@6458 239 // Now, find the preceding lis which writes to dst.
goetz@6458 240 int inst1 = 0;
goetz@6458 241 address inst1_addr = inst2_addr - BytesPerInstWord;
goetz@6458 242 bool inst1_found = false;
goetz@6458 243
goetz@6458 244 while (inst1_addr >= bound) {
goetz@6458 245 inst1 = *(int *) inst1_addr;
goetz@6458 246 if (is_lis(inst1) && inv_rs_field(inst1) == dst) { inst1_found = true; break;}
goetz@6458 247 inst1_addr -= BytesPerInstWord;
goetz@6458 248 }
goetz@6458 249 assert(inst1_found, "inst is not lis");
goetz@6458 250
goetz@6495 251 uint xl = ((unsigned int) (get_imm(inst2_addr, 0) & 0xffff));
goetz@6495 252 uint xh = (((get_imm(inst1_addr, 0)) & 0xffff) << 16);
goetz@6495 253
goetz@6458 254 return (int) (xl | xh);
goetz@6458 255 }
goetz@6458 256 #endif // _LP64
goetz@6458 257
goetz@6458 258 void MacroAssembler::load_const_from_method_toc(Register dst, AddressLiteral& a, Register toc) {
goetz@6458 259 int toc_offset = 0;
goetz@6458 260 // Use RelocationHolder::none for the constant pool entry, otherwise
goetz@6458 261 // we will end up with a failing NativeCall::verify(x) where x is
goetz@6458 262 // the address of the constant pool entry.
goetz@6458 263 // FIXME: We should insert relocation information for oops at the constant
goetz@6458 264 // pool entries instead of inserting it at the loads; patching of a constant
goetz@6458 265 // pool entry should be less expensive.
goetz@6495 266 address oop_address = address_constant((address)a.value(), RelocationHolder::none);
goetz@6495 267 // Relocate at the pc of the load.
goetz@6495 268 relocate(a.rspec());
goetz@6495 269 toc_offset = (int)(oop_address - code()->consts()->start());
goetz@6458 270 ld_largeoffset_unchecked(dst, toc_offset, toc, true);
goetz@6458 271 }
goetz@6458 272
goetz@6458 273 bool MacroAssembler::is_load_const_from_method_toc_at(address a) {
goetz@6458 274 const address inst1_addr = a;
goetz@6458 275 const int inst1 = *(int *)inst1_addr;
goetz@6458 276
goetz@6458 277 // The relocation points to the ld or the addis.
goetz@6458 278 return (is_ld(inst1)) ||
goetz@6458 279 (is_addis(inst1) && inv_ra_field(inst1) != 0);
goetz@6458 280 }
goetz@6458 281
goetz@6458 282 int MacroAssembler::get_offset_of_load_const_from_method_toc_at(address a) {
goetz@6458 283 assert(is_load_const_from_method_toc_at(a), "must be load_const_from_method_toc");
goetz@6458 284
goetz@6458 285 const address inst1_addr = a;
goetz@6458 286 const int inst1 = *(int *)inst1_addr;
goetz@6458 287
goetz@6458 288 if (is_ld(inst1)) {
goetz@6458 289 return inv_d1_field(inst1);
goetz@6458 290 } else if (is_addis(inst1)) {
goetz@6458 291 const int dst = inv_rt_field(inst1);
goetz@6458 292
goetz@6458 293 // Now, find the succeeding ld which reads and writes to dst.
goetz@6458 294 address inst2_addr = inst1_addr + BytesPerInstWord;
goetz@6458 295 int inst2 = 0;
goetz@6458 296 while (true) {
goetz@6458 297 inst2 = *(int *) inst2_addr;
goetz@6458 298 if (is_ld(inst2) && inv_ra_field(inst2) == dst && inv_rt_field(inst2) == dst) {
goetz@6458 299 // Stop, found the ld which reads and writes dst.
goetz@6458 300 break;
goetz@6458 301 }
goetz@6458 302 inst2_addr += BytesPerInstWord;
goetz@6458 303 }
goetz@6458 304 return (inv_d1_field(inst1) << 16) + inv_d1_field(inst2);
goetz@6458 305 }
goetz@6458 306 ShouldNotReachHere();
goetz@6458 307 return 0;
goetz@6458 308 }
goetz@6458 309
goetz@6458 310 // Get the constant from a `load_const' sequence.
goetz@6458 311 long MacroAssembler::get_const(address a) {
goetz@6458 312 assert(is_load_const_at(a), "not a load of a constant");
goetz@6458 313 const int *p = (const int*) a;
goetz@6458 314 unsigned long x = (((unsigned long) (get_imm(a,0) & 0xffff)) << 48);
goetz@6458 315 if (is_ori(*(p+1))) {
goetz@6458 316 x |= (((unsigned long) (get_imm(a,1) & 0xffff)) << 32);
goetz@6458 317 x |= (((unsigned long) (get_imm(a,3) & 0xffff)) << 16);
goetz@6458 318 x |= (((unsigned long) (get_imm(a,4) & 0xffff)));
goetz@6458 319 } else if (is_lis(*(p+1))) {
goetz@6458 320 x |= (((unsigned long) (get_imm(a,2) & 0xffff)) << 32);
goetz@6458 321 x |= (((unsigned long) (get_imm(a,1) & 0xffff)) << 16);
goetz@6458 322 x |= (((unsigned long) (get_imm(a,3) & 0xffff)));
goetz@6458 323 } else {
goetz@6458 324 ShouldNotReachHere();
goetz@6458 325 return (long) 0;
goetz@6458 326 }
goetz@6458 327 return (long) x;
goetz@6458 328 }
goetz@6458 329
goetz@6458 330 // Patch the 64 bit constant of a `load_const' sequence. This is a low
goetz@6458 331 // level procedure. It neither flushes the instruction cache nor is it
goetz@6458 332 // mt safe.
goetz@6458 333 void MacroAssembler::patch_const(address a, long x) {
goetz@6458 334 assert(is_load_const_at(a), "not a load of a constant");
goetz@6458 335 int *p = (int*) a;
goetz@6458 336 if (is_ori(*(p+1))) {
goetz@6458 337 set_imm(0 + p, (x >> 48) & 0xffff);
goetz@6458 338 set_imm(1 + p, (x >> 32) & 0xffff);
goetz@6458 339 set_imm(3 + p, (x >> 16) & 0xffff);
goetz@6458 340 set_imm(4 + p, x & 0xffff);
goetz@6458 341 } else if (is_lis(*(p+1))) {
goetz@6458 342 set_imm(0 + p, (x >> 48) & 0xffff);
goetz@6458 343 set_imm(2 + p, (x >> 32) & 0xffff);
goetz@6458 344 set_imm(1 + p, (x >> 16) & 0xffff);
goetz@6458 345 set_imm(3 + p, x & 0xffff);
goetz@6458 346 } else {
goetz@6458 347 ShouldNotReachHere();
goetz@6458 348 }
goetz@6458 349 }
goetz@6458 350
goetz@6458 351 AddressLiteral MacroAssembler::allocate_metadata_address(Metadata* obj) {
goetz@6458 352 assert(oop_recorder() != NULL, "this assembler needs a Recorder");
goetz@6458 353 int index = oop_recorder()->allocate_metadata_index(obj);
goetz@6458 354 RelocationHolder rspec = metadata_Relocation::spec(index);
goetz@6458 355 return AddressLiteral((address)obj, rspec);
goetz@6458 356 }
goetz@6458 357
goetz@6458 358 AddressLiteral MacroAssembler::constant_metadata_address(Metadata* obj) {
goetz@6458 359 assert(oop_recorder() != NULL, "this assembler needs a Recorder");
goetz@6458 360 int index = oop_recorder()->find_index(obj);
goetz@6458 361 RelocationHolder rspec = metadata_Relocation::spec(index);
goetz@6458 362 return AddressLiteral((address)obj, rspec);
goetz@6458 363 }
goetz@6458 364
goetz@6458 365 AddressLiteral MacroAssembler::allocate_oop_address(jobject obj) {
goetz@6458 366 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
goetz@6458 367 int oop_index = oop_recorder()->allocate_oop_index(obj);
goetz@6458 368 return AddressLiteral(address(obj), oop_Relocation::spec(oop_index));
goetz@6458 369 }
goetz@6458 370
goetz@6458 371 AddressLiteral MacroAssembler::constant_oop_address(jobject obj) {
goetz@6458 372 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
goetz@6458 373 int oop_index = oop_recorder()->find_index(obj);
goetz@6458 374 return AddressLiteral(address(obj), oop_Relocation::spec(oop_index));
goetz@6458 375 }
goetz@6458 376
goetz@6458 377 RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr,
goetz@6458 378 Register tmp, int offset) {
goetz@6458 379 intptr_t value = *delayed_value_addr;
goetz@6458 380 if (value != 0) {
goetz@6458 381 return RegisterOrConstant(value + offset);
goetz@6458 382 }
goetz@6458 383
goetz@6458 384 // Load indirectly to solve generation ordering problem.
goetz@6458 385 // static address, no relocation
goetz@6458 386 int simm16_offset = load_const_optimized(tmp, delayed_value_addr, noreg, true);
goetz@6458 387 ld(tmp, simm16_offset, tmp); // must be aligned ((xa & 3) == 0)
goetz@6458 388
goetz@6458 389 if (offset != 0) {
goetz@6458 390 addi(tmp, tmp, offset);
goetz@6458 391 }
goetz@6458 392
goetz@6458 393 return RegisterOrConstant(tmp);
goetz@6458 394 }
goetz@6458 395
goetz@6458 396 #ifndef PRODUCT
goetz@6458 397 void MacroAssembler::pd_print_patched_instruction(address branch) {
goetz@6458 398 Unimplemented(); // TODO: PPC port
goetz@6458 399 }
goetz@6458 400 #endif // ndef PRODUCT
goetz@6458 401
goetz@6458 402 // Conditional far branch for destinations encodable in 24+2 bits.
goetz@6458 403 void MacroAssembler::bc_far(int boint, int biint, Label& dest, int optimize) {
goetz@6458 404
goetz@6458 405 // If requested by flag optimize, relocate the bc_far as a
goetz@6458 406 // runtime_call and prepare for optimizing it when the code gets
goetz@6458 407 // relocated.
goetz@6458 408 if (optimize == bc_far_optimize_on_relocate) {
goetz@6458 409 relocate(relocInfo::runtime_call_type);
goetz@6458 410 }
goetz@6458 411
goetz@6458 412 // variant 2:
goetz@6458 413 //
goetz@6458 414 // b!cxx SKIP
goetz@6458 415 // bxx DEST
goetz@6458 416 // SKIP:
goetz@6458 417 //
goetz@6458 418
goetz@6458 419 const int opposite_boint = add_bhint_to_boint(opposite_bhint(inv_boint_bhint(boint)),
goetz@6458 420 opposite_bcond(inv_boint_bcond(boint)));
goetz@6458 421
goetz@6458 422 // We emit two branches.
goetz@6458 423 // First, a conditional branch which jumps around the far branch.
goetz@6458 424 const address not_taken_pc = pc() + 2 * BytesPerInstWord;
goetz@6458 425 const address bc_pc = pc();
goetz@6458 426 bc(opposite_boint, biint, not_taken_pc);
goetz@6458 427
goetz@6458 428 const int bc_instr = *(int*)bc_pc;
goetz@6458 429 assert(not_taken_pc == (address)inv_bd_field(bc_instr, (intptr_t)bc_pc), "postcondition");
goetz@6458 430 assert(opposite_boint == inv_bo_field(bc_instr), "postcondition");
goetz@6458 431 assert(boint == add_bhint_to_boint(opposite_bhint(inv_boint_bhint(inv_bo_field(bc_instr))),
goetz@6458 432 opposite_bcond(inv_boint_bcond(inv_bo_field(bc_instr)))),
goetz@6458 433 "postcondition");
goetz@6458 434 assert(biint == inv_bi_field(bc_instr), "postcondition");
goetz@6458 435
goetz@6458 436 // Second, an unconditional far branch which jumps to dest.
goetz@6458 437 // Note: target(dest) remembers the current pc (see CodeSection::target)
goetz@6458 438 // and returns the current pc if the label is not bound yet; when
goetz@6458 439 // the label gets bound, the unconditional far branch will be patched.
goetz@6458 440 const address target_pc = target(dest);
goetz@6458 441 const address b_pc = pc();
goetz@6458 442 b(target_pc);
goetz@6458 443
goetz@6458 444 assert(not_taken_pc == pc(), "postcondition");
goetz@6458 445 assert(dest.is_bound() || target_pc == b_pc, "postcondition");
goetz@6458 446 }
goetz@6458 447
goetz@6458 448 bool MacroAssembler::is_bc_far_at(address instruction_addr) {
goetz@6458 449 return is_bc_far_variant1_at(instruction_addr) ||
goetz@6458 450 is_bc_far_variant2_at(instruction_addr) ||
goetz@6458 451 is_bc_far_variant3_at(instruction_addr);
goetz@6458 452 }
goetz@6458 453
goetz@6458 454 address MacroAssembler::get_dest_of_bc_far_at(address instruction_addr) {
goetz@6458 455 if (is_bc_far_variant1_at(instruction_addr)) {
goetz@6458 456 const address instruction_1_addr = instruction_addr;
goetz@6458 457 const int instruction_1 = *(int*)instruction_1_addr;
goetz@6458 458 return (address)inv_bd_field(instruction_1, (intptr_t)instruction_1_addr);
goetz@6458 459 } else if (is_bc_far_variant2_at(instruction_addr)) {
goetz@6458 460 const address instruction_2_addr = instruction_addr + 4;
goetz@6458 461 return bxx_destination(instruction_2_addr);
goetz@6458 462 } else if (is_bc_far_variant3_at(instruction_addr)) {
goetz@6458 463 return instruction_addr + 8;
goetz@6458 464 }
goetz@6458 465 // variant 4 ???
goetz@6458 466 ShouldNotReachHere();
goetz@6458 467 return NULL;
goetz@6458 468 }
goetz@6458 469 void MacroAssembler::set_dest_of_bc_far_at(address instruction_addr, address dest) {
goetz@6458 470
goetz@6458 471 if (is_bc_far_variant3_at(instruction_addr)) {
goetz@6458 472 // variant 3, far cond branch to the next instruction, already patched to nops:
goetz@6458 473 //
goetz@6458 474 // nop
goetz@6458 475 // endgroup
goetz@6458 476 // SKIP/DEST:
goetz@6458 477 //
goetz@6458 478 return;
goetz@6458 479 }
goetz@6458 480
goetz@6458 481 // first, extract boint and biint from the current branch
goetz@6458 482 int boint = 0;
goetz@6458 483 int biint = 0;
goetz@6458 484
goetz@6458 485 ResourceMark rm;
goetz@6458 486 const int code_size = 2 * BytesPerInstWord;
goetz@6458 487 CodeBuffer buf(instruction_addr, code_size);
goetz@6458 488 MacroAssembler masm(&buf);
goetz@6458 489 if (is_bc_far_variant2_at(instruction_addr) && dest == instruction_addr + 8) {
goetz@6458 490 // Far branch to next instruction: Optimize it by patching nops (produce variant 3).
goetz@6458 491 masm.nop();
goetz@6458 492 masm.endgroup();
goetz@6458 493 } else {
goetz@6458 494 if (is_bc_far_variant1_at(instruction_addr)) {
goetz@6458 495 // variant 1, the 1st instruction contains the destination address:
goetz@6458 496 //
goetz@6458 497 // bcxx DEST
goetz@6458 498 // endgroup
goetz@6458 499 //
goetz@6458 500 const int instruction_1 = *(int*)(instruction_addr);
goetz@6458 501 boint = inv_bo_field(instruction_1);
goetz@6458 502 biint = inv_bi_field(instruction_1);
goetz@6458 503 } else if (is_bc_far_variant2_at(instruction_addr)) {
goetz@6458 504 // variant 2, the 2nd instruction contains the destination address:
goetz@6458 505 //
goetz@6458 506 // b!cxx SKIP
goetz@6458 507 // bxx DEST
goetz@6458 508 // SKIP:
goetz@6458 509 //
goetz@6458 510 const int instruction_1 = *(int*)(instruction_addr);
goetz@6458 511 boint = add_bhint_to_boint(opposite_bhint(inv_boint_bhint(inv_bo_field(instruction_1))),
goetz@6458 512 opposite_bcond(inv_boint_bcond(inv_bo_field(instruction_1))));
goetz@6458 513 biint = inv_bi_field(instruction_1);
goetz@6458 514 } else {
goetz@6458 515 // variant 4???
goetz@6458 516 ShouldNotReachHere();
goetz@6458 517 }
goetz@6458 518
goetz@6458 519 // second, set the new branch destination and optimize the code
goetz@6458 520 if (dest != instruction_addr + 4 && // the bc_far is still unbound!
goetz@6458 521 masm.is_within_range_of_bcxx(dest, instruction_addr)) {
goetz@6458 522 // variant 1:
goetz@6458 523 //
goetz@6458 524 // bcxx DEST
goetz@6458 525 // endgroup
goetz@6458 526 //
goetz@6458 527 masm.bc(boint, biint, dest);
goetz@6458 528 masm.endgroup();
goetz@6458 529 } else {
goetz@6458 530 // variant 2:
goetz@6458 531 //
goetz@6458 532 // b!cxx SKIP
goetz@6458 533 // bxx DEST
goetz@6458 534 // SKIP:
goetz@6458 535 //
goetz@6458 536 const int opposite_boint = add_bhint_to_boint(opposite_bhint(inv_boint_bhint(boint)),
goetz@6458 537 opposite_bcond(inv_boint_bcond(boint)));
goetz@6458 538 const address not_taken_pc = masm.pc() + 2 * BytesPerInstWord;
goetz@6458 539 masm.bc(opposite_boint, biint, not_taken_pc);
goetz@6458 540 masm.b(dest);
goetz@6458 541 }
goetz@6458 542 }
goetz@6495 543 ICache::ppc64_flush_icache_bytes(instruction_addr, code_size);
goetz@6458 544 }
goetz@6458 545
goetz@6458 546 // Emit a NOT mt-safe patchable 64 bit absolute call/jump.
goetz@6458 547 void MacroAssembler::bxx64_patchable(address dest, relocInfo::relocType rt, bool link) {
goetz@6458 548 // get current pc
goetz@6458 549 uint64_t start_pc = (uint64_t) pc();
goetz@6458 550
goetz@6458 551 const address pc_of_bl = (address) (start_pc + (6*BytesPerInstWord)); // bl is last
goetz@6458 552 const address pc_of_b = (address) (start_pc + (0*BytesPerInstWord)); // b is first
goetz@6458 553
goetz@6458 554 // relocate here
goetz@6458 555 if (rt != relocInfo::none) {
goetz@6458 556 relocate(rt);
goetz@6458 557 }
goetz@6458 558
goetz@6458 559 if ( ReoptimizeCallSequences &&
goetz@6458 560 (( link && is_within_range_of_b(dest, pc_of_bl)) ||
goetz@6458 561 (!link && is_within_range_of_b(dest, pc_of_b)))) {
goetz@6458 562 // variant 2:
goetz@6458 563 // Emit an optimized, pc-relative call/jump.
goetz@6458 564
goetz@6458 565 if (link) {
goetz@6458 566 // some padding
goetz@6458 567 nop();
goetz@6458 568 nop();
goetz@6458 569 nop();
goetz@6458 570 nop();
goetz@6458 571 nop();
goetz@6458 572 nop();
goetz@6458 573
goetz@6458 574 // do the call
goetz@6458 575 assert(pc() == pc_of_bl, "just checking");
goetz@6458 576 bl(dest, relocInfo::none);
goetz@6458 577 } else {
goetz@6458 578 // do the jump
goetz@6458 579 assert(pc() == pc_of_b, "just checking");
goetz@6458 580 b(dest, relocInfo::none);
goetz@6458 581
goetz@6458 582 // some padding
goetz@6458 583 nop();
goetz@6458 584 nop();
goetz@6458 585 nop();
goetz@6458 586 nop();
goetz@6458 587 nop();
goetz@6458 588 nop();
goetz@6458 589 }
goetz@6458 590
goetz@6458 591 // Assert that we can identify the emitted call/jump.
goetz@6458 592 assert(is_bxx64_patchable_variant2_at((address)start_pc, link),
goetz@6458 593 "can't identify emitted call");
goetz@6458 594 } else {
goetz@6458 595 // variant 1:
goetz@6458 596 mr(R0, R11); // spill R11 -> R0.
goetz@6458 597
goetz@6458 598 // Load the destination address into CTR,
goetz@6458 599 // calculate destination relative to global toc.
goetz@6458 600 calculate_address_from_global_toc(R11, dest, true, true, false);
goetz@6458 601
goetz@6458 602 mtctr(R11);
goetz@6458 603 mr(R11, R0); // spill R11 <- R0.
goetz@6458 604 nop();
goetz@6458 605
goetz@6458 606 // do the call/jump
goetz@6458 607 if (link) {
goetz@6458 608 bctrl();
goetz@6458 609 } else{
goetz@6458 610 bctr();
goetz@6458 611 }
goetz@6458 612 // Assert that we can identify the emitted call/jump.
goetz@6458 613 assert(is_bxx64_patchable_variant1b_at((address)start_pc, link),
goetz@6458 614 "can't identify emitted call");
goetz@6458 615 }
goetz@6458 616
goetz@6458 617 // Assert that we can identify the emitted call/jump.
goetz@6458 618 assert(is_bxx64_patchable_at((address)start_pc, link),
goetz@6458 619 "can't identify emitted call");
goetz@6458 620 assert(get_dest_of_bxx64_patchable_at((address)start_pc, link) == dest,
goetz@6458 621 "wrong encoding of dest address");
goetz@6458 622 }
goetz@6458 623
goetz@6458 624 // Identify a bxx64_patchable instruction.
goetz@6458 625 bool MacroAssembler::is_bxx64_patchable_at(address instruction_addr, bool link) {
goetz@6458 626 return is_bxx64_patchable_variant1b_at(instruction_addr, link)
goetz@6458 627 //|| is_bxx64_patchable_variant1_at(instruction_addr, link)
goetz@6458 628 || is_bxx64_patchable_variant2_at(instruction_addr, link);
goetz@6458 629 }
goetz@6458 630
goetz@6458 631 // Does the call64_patchable instruction use a pc-relative encoding of
goetz@6458 632 // the call destination?
goetz@6458 633 bool MacroAssembler::is_bxx64_patchable_pcrelative_at(address instruction_addr, bool link) {
goetz@6458 634 // variant 2 is pc-relative
goetz@6458 635 return is_bxx64_patchable_variant2_at(instruction_addr, link);
goetz@6458 636 }
goetz@6458 637
goetz@6458 638 // Identify variant 1.
goetz@6458 639 bool MacroAssembler::is_bxx64_patchable_variant1_at(address instruction_addr, bool link) {
goetz@6458 640 unsigned int* instr = (unsigned int*) instruction_addr;
goetz@6458 641 return (link ? is_bctrl(instr[6]) : is_bctr(instr[6])) // bctr[l]
goetz@6458 642 && is_mtctr(instr[5]) // mtctr
goetz@6458 643 && is_load_const_at(instruction_addr);
goetz@6458 644 }
goetz@6458 645
goetz@6458 646 // Identify variant 1b: load destination relative to global toc.
goetz@6458 647 bool MacroAssembler::is_bxx64_patchable_variant1b_at(address instruction_addr, bool link) {
goetz@6458 648 unsigned int* instr = (unsigned int*) instruction_addr;
goetz@6458 649 return (link ? is_bctrl(instr[6]) : is_bctr(instr[6])) // bctr[l]
goetz@6458 650 && is_mtctr(instr[3]) // mtctr
goetz@6458 651 && is_calculate_address_from_global_toc_at(instruction_addr + 2*BytesPerInstWord, instruction_addr);
goetz@6458 652 }
goetz@6458 653
goetz@6458 654 // Identify variant 2.
goetz@6458 655 bool MacroAssembler::is_bxx64_patchable_variant2_at(address instruction_addr, bool link) {
goetz@6458 656 unsigned int* instr = (unsigned int*) instruction_addr;
goetz@6458 657 if (link) {
goetz@6458 658 return is_bl (instr[6]) // bl dest is last
goetz@6458 659 && is_nop(instr[0]) // nop
goetz@6458 660 && is_nop(instr[1]) // nop
goetz@6458 661 && is_nop(instr[2]) // nop
goetz@6458 662 && is_nop(instr[3]) // nop
goetz@6458 663 && is_nop(instr[4]) // nop
goetz@6458 664 && is_nop(instr[5]); // nop
goetz@6458 665 } else {
goetz@6458 666 return is_b (instr[0]) // b dest is first
goetz@6458 667 && is_nop(instr[1]) // nop
goetz@6458 668 && is_nop(instr[2]) // nop
goetz@6458 669 && is_nop(instr[3]) // nop
goetz@6458 670 && is_nop(instr[4]) // nop
goetz@6458 671 && is_nop(instr[5]) // nop
goetz@6458 672 && is_nop(instr[6]); // nop
goetz@6458 673 }
goetz@6458 674 }
goetz@6458 675
goetz@6458 676 // Set dest address of a bxx64_patchable instruction.
goetz@6458 677 void MacroAssembler::set_dest_of_bxx64_patchable_at(address instruction_addr, address dest, bool link) {
goetz@6458 678 ResourceMark rm;
goetz@6458 679 int code_size = MacroAssembler::bxx64_patchable_size;
goetz@6458 680 CodeBuffer buf(instruction_addr, code_size);
goetz@6458 681 MacroAssembler masm(&buf);
goetz@6458 682 masm.bxx64_patchable(dest, relocInfo::none, link);
goetz@6495 683 ICache::ppc64_flush_icache_bytes(instruction_addr, code_size);
goetz@6458 684 }
goetz@6458 685
goetz@6458 686 // Get dest address of a bxx64_patchable instruction.
goetz@6458 687 address MacroAssembler::get_dest_of_bxx64_patchable_at(address instruction_addr, bool link) {
goetz@6458 688 if (is_bxx64_patchable_variant1_at(instruction_addr, link)) {
goetz@6458 689 return (address) (unsigned long) get_const(instruction_addr);
goetz@6458 690 } else if (is_bxx64_patchable_variant2_at(instruction_addr, link)) {
goetz@6458 691 unsigned int* instr = (unsigned int*) instruction_addr;
goetz@6458 692 if (link) {
goetz@6458 693 const int instr_idx = 6; // bl is last
goetz@6458 694 int branchoffset = branch_destination(instr[instr_idx], 0);
goetz@6458 695 return instruction_addr + branchoffset + instr_idx*BytesPerInstWord;
goetz@6458 696 } else {
goetz@6458 697 const int instr_idx = 0; // b is first
goetz@6458 698 int branchoffset = branch_destination(instr[instr_idx], 0);
goetz@6458 699 return instruction_addr + branchoffset + instr_idx*BytesPerInstWord;
goetz@6458 700 }
goetz@6458 701 // Load dest relative to global toc.
goetz@6458 702 } else if (is_bxx64_patchable_variant1b_at(instruction_addr, link)) {
goetz@6458 703 return get_address_of_calculate_address_from_global_toc_at(instruction_addr + 2*BytesPerInstWord,
goetz@6458 704 instruction_addr);
goetz@6458 705 } else {
goetz@6458 706 ShouldNotReachHere();
goetz@6458 707 return NULL;
goetz@6458 708 }
goetz@6458 709 }
goetz@6458 710
goetz@6458 711 // Uses ordering which corresponds to ABI:
goetz@6458 712 // _savegpr0_14: std r14,-144(r1)
goetz@6458 713 // _savegpr0_15: std r15,-136(r1)
goetz@6458 714 // _savegpr0_16: std r16,-128(r1)
goetz@6458 715 void MacroAssembler::save_nonvolatile_gprs(Register dst, int offset) {
goetz@6458 716 std(R14, offset, dst); offset += 8;
goetz@6458 717 std(R15, offset, dst); offset += 8;
goetz@6458 718 std(R16, offset, dst); offset += 8;
goetz@6458 719 std(R17, offset, dst); offset += 8;
goetz@6458 720 std(R18, offset, dst); offset += 8;
goetz@6458 721 std(R19, offset, dst); offset += 8;
goetz@6458 722 std(R20, offset, dst); offset += 8;
goetz@6458 723 std(R21, offset, dst); offset += 8;
goetz@6458 724 std(R22, offset, dst); offset += 8;
goetz@6458 725 std(R23, offset, dst); offset += 8;
goetz@6458 726 std(R24, offset, dst); offset += 8;
goetz@6458 727 std(R25, offset, dst); offset += 8;
goetz@6458 728 std(R26, offset, dst); offset += 8;
goetz@6458 729 std(R27, offset, dst); offset += 8;
goetz@6458 730 std(R28, offset, dst); offset += 8;
goetz@6458 731 std(R29, offset, dst); offset += 8;
goetz@6458 732 std(R30, offset, dst); offset += 8;
goetz@6458 733 std(R31, offset, dst); offset += 8;
goetz@6458 734
goetz@6458 735 stfd(F14, offset, dst); offset += 8;
goetz@6458 736 stfd(F15, offset, dst); offset += 8;
goetz@6458 737 stfd(F16, offset, dst); offset += 8;
goetz@6458 738 stfd(F17, offset, dst); offset += 8;
goetz@6458 739 stfd(F18, offset, dst); offset += 8;
goetz@6458 740 stfd(F19, offset, dst); offset += 8;
goetz@6458 741 stfd(F20, offset, dst); offset += 8;
goetz@6458 742 stfd(F21, offset, dst); offset += 8;
goetz@6458 743 stfd(F22, offset, dst); offset += 8;
goetz@6458 744 stfd(F23, offset, dst); offset += 8;
goetz@6458 745 stfd(F24, offset, dst); offset += 8;
goetz@6458 746 stfd(F25, offset, dst); offset += 8;
goetz@6458 747 stfd(F26, offset, dst); offset += 8;
goetz@6458 748 stfd(F27, offset, dst); offset += 8;
goetz@6458 749 stfd(F28, offset, dst); offset += 8;
goetz@6458 750 stfd(F29, offset, dst); offset += 8;
goetz@6458 751 stfd(F30, offset, dst); offset += 8;
goetz@6458 752 stfd(F31, offset, dst);
goetz@6458 753 }
goetz@6458 754
goetz@6458 755 // Uses ordering which corresponds to ABI:
goetz@6458 756 // _restgpr0_14: ld r14,-144(r1)
goetz@6458 757 // _restgpr0_15: ld r15,-136(r1)
goetz@6458 758 // _restgpr0_16: ld r16,-128(r1)
goetz@6458 759 void MacroAssembler::restore_nonvolatile_gprs(Register src, int offset) {
goetz@6458 760 ld(R14, offset, src); offset += 8;
goetz@6458 761 ld(R15, offset, src); offset += 8;
goetz@6458 762 ld(R16, offset, src); offset += 8;
goetz@6458 763 ld(R17, offset, src); offset += 8;
goetz@6458 764 ld(R18, offset, src); offset += 8;
goetz@6458 765 ld(R19, offset, src); offset += 8;
goetz@6458 766 ld(R20, offset, src); offset += 8;
goetz@6458 767 ld(R21, offset, src); offset += 8;
goetz@6458 768 ld(R22, offset, src); offset += 8;
goetz@6458 769 ld(R23, offset, src); offset += 8;
goetz@6458 770 ld(R24, offset, src); offset += 8;
goetz@6458 771 ld(R25, offset, src); offset += 8;
goetz@6458 772 ld(R26, offset, src); offset += 8;
goetz@6458 773 ld(R27, offset, src); offset += 8;
goetz@6458 774 ld(R28, offset, src); offset += 8;
goetz@6458 775 ld(R29, offset, src); offset += 8;
goetz@6458 776 ld(R30, offset, src); offset += 8;
goetz@6458 777 ld(R31, offset, src); offset += 8;
goetz@6458 778
goetz@6458 779 // FP registers
goetz@6458 780 lfd(F14, offset, src); offset += 8;
goetz@6458 781 lfd(F15, offset, src); offset += 8;
goetz@6458 782 lfd(F16, offset, src); offset += 8;
goetz@6458 783 lfd(F17, offset, src); offset += 8;
goetz@6458 784 lfd(F18, offset, src); offset += 8;
goetz@6458 785 lfd(F19, offset, src); offset += 8;
goetz@6458 786 lfd(F20, offset, src); offset += 8;
goetz@6458 787 lfd(F21, offset, src); offset += 8;
goetz@6458 788 lfd(F22, offset, src); offset += 8;
goetz@6458 789 lfd(F23, offset, src); offset += 8;
goetz@6458 790 lfd(F24, offset, src); offset += 8;
goetz@6458 791 lfd(F25, offset, src); offset += 8;
goetz@6458 792 lfd(F26, offset, src); offset += 8;
goetz@6458 793 lfd(F27, offset, src); offset += 8;
goetz@6458 794 lfd(F28, offset, src); offset += 8;
goetz@6458 795 lfd(F29, offset, src); offset += 8;
goetz@6458 796 lfd(F30, offset, src); offset += 8;
goetz@6458 797 lfd(F31, offset, src);
goetz@6458 798 }
goetz@6458 799
goetz@6458 800 // For verify_oops.
goetz@6458 801 void MacroAssembler::save_volatile_gprs(Register dst, int offset) {
goetz@7424 802 std(R2, offset, dst); offset += 8;
goetz@6458 803 std(R3, offset, dst); offset += 8;
goetz@6458 804 std(R4, offset, dst); offset += 8;
goetz@6458 805 std(R5, offset, dst); offset += 8;
goetz@6458 806 std(R6, offset, dst); offset += 8;
goetz@6458 807 std(R7, offset, dst); offset += 8;
goetz@6458 808 std(R8, offset, dst); offset += 8;
goetz@6458 809 std(R9, offset, dst); offset += 8;
goetz@6458 810 std(R10, offset, dst); offset += 8;
goetz@6458 811 std(R11, offset, dst); offset += 8;
goetz@6458 812 std(R12, offset, dst);
goetz@6458 813 }
goetz@6458 814
goetz@6458 815 // For verify_oops.
goetz@6458 816 void MacroAssembler::restore_volatile_gprs(Register src, int offset) {
goetz@7424 817 ld(R2, offset, src); offset += 8;
goetz@6458 818 ld(R3, offset, src); offset += 8;
goetz@6458 819 ld(R4, offset, src); offset += 8;
goetz@6458 820 ld(R5, offset, src); offset += 8;
goetz@6458 821 ld(R6, offset, src); offset += 8;
goetz@6458 822 ld(R7, offset, src); offset += 8;
goetz@6458 823 ld(R8, offset, src); offset += 8;
goetz@6458 824 ld(R9, offset, src); offset += 8;
goetz@6458 825 ld(R10, offset, src); offset += 8;
goetz@6458 826 ld(R11, offset, src); offset += 8;
goetz@6458 827 ld(R12, offset, src);
goetz@6458 828 }
goetz@6458 829
goetz@6458 830 void MacroAssembler::save_LR_CR(Register tmp) {
goetz@6458 831 mfcr(tmp);
goetz@6458 832 std(tmp, _abi(cr), R1_SP);
goetz@6458 833 mflr(tmp);
goetz@6458 834 std(tmp, _abi(lr), R1_SP);
goetz@6458 835 // Tmp must contain lr on exit! (see return_addr and prolog in ppc64.ad)
goetz@6458 836 }
goetz@6458 837
goetz@6458 838 void MacroAssembler::restore_LR_CR(Register tmp) {
goetz@6458 839 assert(tmp != R1_SP, "must be distinct");
goetz@6458 840 ld(tmp, _abi(lr), R1_SP);
goetz@6458 841 mtlr(tmp);
goetz@6458 842 ld(tmp, _abi(cr), R1_SP);
goetz@6458 843 mtcr(tmp);
goetz@6458 844 }
goetz@6458 845
goetz@6458 846 address MacroAssembler::get_PC_trash_LR(Register result) {
goetz@6458 847 Label L;
goetz@6458 848 bl(L);
goetz@6458 849 bind(L);
goetz@6458 850 address lr_pc = pc();
goetz@6458 851 mflr(result);
goetz@6458 852 return lr_pc;
goetz@6458 853 }
goetz@6458 854
goetz@6458 855 void MacroAssembler::resize_frame(Register offset, Register tmp) {
goetz@6458 856 #ifdef ASSERT
goetz@6458 857 assert_different_registers(offset, tmp, R1_SP);
goetz@6458 858 andi_(tmp, offset, frame::alignment_in_bytes-1);
goetz@6458 859 asm_assert_eq("resize_frame: unaligned", 0x204);
goetz@6458 860 #endif
goetz@6458 861
goetz@6458 862 // tmp <- *(SP)
goetz@6458 863 ld(tmp, _abi(callers_sp), R1_SP);
goetz@6458 864 // addr <- SP + offset;
goetz@6458 865 // *(addr) <- tmp;
goetz@6458 866 // SP <- addr
goetz@6458 867 stdux(tmp, R1_SP, offset);
goetz@6458 868 }
goetz@6458 869
goetz@6458 870 void MacroAssembler::resize_frame(int offset, Register tmp) {
goetz@6458 871 assert(is_simm(offset, 16), "too big an offset");
goetz@6458 872 assert_different_registers(tmp, R1_SP);
goetz@6458 873 assert((offset & (frame::alignment_in_bytes-1))==0, "resize_frame: unaligned");
goetz@6458 874 // tmp <- *(SP)
goetz@6458 875 ld(tmp, _abi(callers_sp), R1_SP);
goetz@6458 876 // addr <- SP + offset;
goetz@6458 877 // *(addr) <- tmp;
goetz@6458 878 // SP <- addr
goetz@6458 879 stdu(tmp, offset, R1_SP);
goetz@6458 880 }
goetz@6458 881
goetz@6458 882 void MacroAssembler::resize_frame_absolute(Register addr, Register tmp1, Register tmp2) {
goetz@6458 883 // (addr == tmp1) || (addr == tmp2) is allowed here!
goetz@6458 884 assert(tmp1 != tmp2, "must be distinct");
goetz@6458 885
goetz@6458 886 // compute offset w.r.t. current stack pointer
goetz@6458 887 // tmp_1 <- addr - SP (!)
goetz@6458 888 subf(tmp1, R1_SP, addr);
goetz@6458 889
goetz@6458 890 // atomically update SP keeping back link.
goetz@6458 891 resize_frame(tmp1/* offset */, tmp2/* tmp */);
goetz@6458 892 }
goetz@6458 893
goetz@6458 894 void MacroAssembler::push_frame(Register bytes, Register tmp) {
goetz@6458 895 #ifdef ASSERT
goetz@6458 896 assert(bytes != R0, "r0 not allowed here");
goetz@6458 897 andi_(R0, bytes, frame::alignment_in_bytes-1);
goetz@6458 898 asm_assert_eq("push_frame(Reg, Reg): unaligned", 0x203);
goetz@6458 899 #endif
goetz@6458 900 neg(tmp, bytes);
goetz@6458 901 stdux(R1_SP, R1_SP, tmp);
goetz@6458 902 }
goetz@6458 903
goetz@6458 904 // Push a frame of size `bytes'.
goetz@6458 905 void MacroAssembler::push_frame(unsigned int bytes, Register tmp) {
goetz@6458 906 long offset = align_addr(bytes, frame::alignment_in_bytes);
goetz@6458 907 if (is_simm(-offset, 16)) {
goetz@6458 908 stdu(R1_SP, -offset, R1_SP);
goetz@6458 909 } else {
goetz@6458 910 load_const(tmp, -offset);
goetz@6458 911 stdux(R1_SP, R1_SP, tmp);
goetz@6458 912 }
goetz@6458 913 }
goetz@6458 914
goetz@6511 915 // Push a frame of size `bytes' plus abi_reg_args on top.
goetz@6511 916 void MacroAssembler::push_frame_reg_args(unsigned int bytes, Register tmp) {
goetz@6511 917 push_frame(bytes + frame::abi_reg_args_size, tmp);
goetz@6458 918 }
goetz@6458 919
goetz@6458 920 // Setup up a new C frame with a spill area for non-volatile GPRs and
goetz@6458 921 // additional space for local variables.
goetz@6511 922 void MacroAssembler::push_frame_reg_args_nonvolatiles(unsigned int bytes,
goetz@6511 923 Register tmp) {
goetz@6511 924 push_frame(bytes + frame::abi_reg_args_size + frame::spill_nonvolatiles_size, tmp);
goetz@6458 925 }
goetz@6458 926
goetz@6458 927 // Pop current C frame.
goetz@6458 928 void MacroAssembler::pop_frame() {
goetz@6458 929 ld(R1_SP, _abi(callers_sp), R1_SP);
goetz@6458 930 }
goetz@6458 931
goetz@6511 932 #if defined(ABI_ELFv2)
goetz@6511 933 address MacroAssembler::branch_to(Register r_function_entry, bool and_link) {
goetz@6511 934 // TODO(asmundak): make sure the caller uses R12 as function descriptor
goetz@6511 935 // most of the times.
goetz@6511 936 if (R12 != r_function_entry) {
goetz@6511 937 mr(R12, r_function_entry);
goetz@6511 938 }
goetz@6511 939 mtctr(R12);
goetz@6511 940 // Do a call or a branch.
goetz@6511 941 if (and_link) {
goetz@6511 942 bctrl();
goetz@6511 943 } else {
goetz@6511 944 bctr();
goetz@6511 945 }
goetz@6511 946 _last_calls_return_pc = pc();
goetz@6511 947
goetz@6511 948 return _last_calls_return_pc;
goetz@6511 949 }
goetz@6511 950
goetz@6511 951 // Call a C function via a function descriptor and use full C
goetz@6511 952 // calling conventions. Updates and returns _last_calls_return_pc.
goetz@6511 953 address MacroAssembler::call_c(Register r_function_entry) {
goetz@6511 954 return branch_to(r_function_entry, /*and_link=*/true);
goetz@6511 955 }
goetz@6511 956
goetz@6511 957 // For tail calls: only branch, don't link, so callee returns to caller of this function.
goetz@6511 958 address MacroAssembler::call_c_and_return_to_caller(Register r_function_entry) {
goetz@6511 959 return branch_to(r_function_entry, /*and_link=*/false);
goetz@6511 960 }
goetz@6511 961
goetz@6511 962 address MacroAssembler::call_c(address function_entry, relocInfo::relocType rt) {
goetz@6511 963 load_const(R12, function_entry, R0);
goetz@6511 964 return branch_to(R12, /*and_link=*/true);
goetz@6511 965 }
goetz@6511 966
goetz@6511 967 #else
goetz@6458 968 // Generic version of a call to C function via a function descriptor
goetz@6458 969 // with variable support for C calling conventions (TOC, ENV, etc.).
goetz@6458 970 // Updates and returns _last_calls_return_pc.
goetz@6458 971 address MacroAssembler::branch_to(Register function_descriptor, bool and_link, bool save_toc_before_call,
goetz@6458 972 bool restore_toc_after_call, bool load_toc_of_callee, bool load_env_of_callee) {
goetz@6458 973 // we emit standard ptrgl glue code here
goetz@6458 974 assert((function_descriptor != R0), "function_descriptor cannot be R0");
goetz@6458 975
goetz@6458 976 // retrieve necessary entries from the function descriptor
goetz@6458 977 ld(R0, in_bytes(FunctionDescriptor::entry_offset()), function_descriptor);
goetz@6458 978 mtctr(R0);
goetz@6458 979
goetz@6458 980 if (load_toc_of_callee) {
goetz@6458 981 ld(R2_TOC, in_bytes(FunctionDescriptor::toc_offset()), function_descriptor);
goetz@6458 982 }
goetz@6458 983 if (load_env_of_callee) {
goetz@6458 984 ld(R11, in_bytes(FunctionDescriptor::env_offset()), function_descriptor);
goetz@6458 985 } else if (load_toc_of_callee) {
goetz@6458 986 li(R11, 0);
goetz@6458 987 }
goetz@6458 988
goetz@6458 989 // do a call or a branch
goetz@6458 990 if (and_link) {
goetz@6458 991 bctrl();
goetz@6458 992 } else {
goetz@6458 993 bctr();
goetz@6458 994 }
goetz@6458 995 _last_calls_return_pc = pc();
goetz@6458 996
goetz@6458 997 return _last_calls_return_pc;
goetz@6458 998 }
goetz@6458 999
goetz@6458 1000 // Call a C function via a function descriptor and use full C calling
goetz@6458 1001 // conventions.
goetz@6458 1002 // We don't use the TOC in generated code, so there is no need to save
goetz@6458 1003 // and restore its value.
goetz@6458 1004 address MacroAssembler::call_c(Register fd) {
goetz@6458 1005 return branch_to(fd, /*and_link=*/true,
goetz@6458 1006 /*save toc=*/false,
goetz@6458 1007 /*restore toc=*/false,
goetz@6458 1008 /*load toc=*/true,
goetz@6458 1009 /*load env=*/true);
goetz@6458 1010 }
goetz@6458 1011
goetz@6495 1012 address MacroAssembler::call_c_and_return_to_caller(Register fd) {
goetz@6495 1013 return branch_to(fd, /*and_link=*/false,
goetz@6495 1014 /*save toc=*/false,
goetz@6495 1015 /*restore toc=*/false,
goetz@6495 1016 /*load toc=*/true,
goetz@6495 1017 /*load env=*/true);
goetz@6495 1018 }
goetz@6495 1019
goetz@6458 1020 address MacroAssembler::call_c(const FunctionDescriptor* fd, relocInfo::relocType rt) {
goetz@6458 1021 if (rt != relocInfo::none) {
goetz@6458 1022 // this call needs to be relocatable
goetz@6458 1023 if (!ReoptimizeCallSequences
goetz@6458 1024 || (rt != relocInfo::runtime_call_type && rt != relocInfo::none)
goetz@6458 1025 || fd == NULL // support code-size estimation
goetz@6458 1026 || !fd->is_friend_function()
goetz@6458 1027 || fd->entry() == NULL) {
goetz@6458 1028 // it's not a friend function as defined by class FunctionDescriptor,
goetz@6458 1029 // so do a full call-c here.
goetz@6458 1030 load_const(R11, (address)fd, R0);
goetz@6458 1031
goetz@6458 1032 bool has_env = (fd != NULL && fd->env() != NULL);
goetz@6458 1033 return branch_to(R11, /*and_link=*/true,
goetz@6501 1034 /*save toc=*/false,
goetz@6501 1035 /*restore toc=*/false,
goetz@6501 1036 /*load toc=*/true,
goetz@6501 1037 /*load env=*/has_env);
goetz@6458 1038 } else {
goetz@6458 1039 // It's a friend function. Load the entry point and don't care about
goetz@6458 1040 // toc and env. Use an optimizable call instruction, but ensure the
goetz@6458 1041 // same code-size as in the case of a non-friend function.
goetz@6458 1042 nop();
goetz@6458 1043 nop();
goetz@6458 1044 nop();
goetz@6458 1045 bl64_patchable(fd->entry(), rt);
goetz@6458 1046 _last_calls_return_pc = pc();
goetz@6458 1047 return _last_calls_return_pc;
goetz@6458 1048 }
goetz@6458 1049 } else {
goetz@6458 1050 // This call does not need to be relocatable, do more aggressive
goetz@6458 1051 // optimizations.
goetz@6458 1052 if (!ReoptimizeCallSequences
goetz@6458 1053 || !fd->is_friend_function()) {
goetz@6458 1054 // It's not a friend function as defined by class FunctionDescriptor,
goetz@6458 1055 // so do a full call-c here.
goetz@6458 1056 load_const(R11, (address)fd, R0);
goetz@6458 1057 return branch_to(R11, /*and_link=*/true,
goetz@6501 1058 /*save toc=*/false,
goetz@6501 1059 /*restore toc=*/false,
goetz@6501 1060 /*load toc=*/true,
goetz@6501 1061 /*load env=*/true);
goetz@6458 1062 } else {
goetz@6458 1063 // it's a friend function, load the entry point and don't care about
goetz@6458 1064 // toc and env.
goetz@6458 1065 address dest = fd->entry();
goetz@6458 1066 if (is_within_range_of_b(dest, pc())) {
goetz@6458 1067 bl(dest);
goetz@6458 1068 } else {
goetz@6458 1069 bl64_patchable(dest, rt);
goetz@6458 1070 }
goetz@6458 1071 _last_calls_return_pc = pc();
goetz@6458 1072 return _last_calls_return_pc;
goetz@6458 1073 }
goetz@6458 1074 }
goetz@6458 1075 }
goetz@6458 1076
goetz@6458 1077 // Call a C function. All constants needed reside in TOC.
goetz@6458 1078 //
goetz@6458 1079 // Read the address to call from the TOC.
goetz@6458 1080 // Read env from TOC, if fd specifies an env.
goetz@6458 1081 // Read new TOC from TOC.
goetz@6458 1082 address MacroAssembler::call_c_using_toc(const FunctionDescriptor* fd,
goetz@6458 1083 relocInfo::relocType rt, Register toc) {
goetz@6458 1084 if (!ReoptimizeCallSequences
goetz@6458 1085 || (rt != relocInfo::runtime_call_type && rt != relocInfo::none)
goetz@6458 1086 || !fd->is_friend_function()) {
goetz@6458 1087 // It's not a friend function as defined by class FunctionDescriptor,
goetz@6458 1088 // so do a full call-c here.
goetz@6458 1089 assert(fd->entry() != NULL, "function must be linked");
goetz@6458 1090
goetz@6458 1091 AddressLiteral fd_entry(fd->entry());
goetz@6458 1092 load_const_from_method_toc(R11, fd_entry, toc);
goetz@6458 1093 mtctr(R11);
goetz@6458 1094 if (fd->env() == NULL) {
goetz@6458 1095 li(R11, 0);
goetz@6458 1096 nop();
goetz@6458 1097 } else {
goetz@6458 1098 AddressLiteral fd_env(fd->env());
goetz@6458 1099 load_const_from_method_toc(R11, fd_env, toc);
goetz@6458 1100 }
goetz@6458 1101 AddressLiteral fd_toc(fd->toc());
goetz@6458 1102 load_toc_from_toc(R2_TOC, fd_toc, toc);
goetz@6458 1103 // R2_TOC is killed.
goetz@6458 1104 bctrl();
goetz@6458 1105 _last_calls_return_pc = pc();
goetz@6458 1106 } else {
goetz@6458 1107 // It's a friend function, load the entry point and don't care about
goetz@6458 1108 // toc and env. Use an optimizable call instruction, but ensure the
goetz@6458 1109 // same code-size as in the case of a non-friend function.
goetz@6458 1110 nop();
goetz@6458 1111 bl64_patchable(fd->entry(), rt);
goetz@6458 1112 _last_calls_return_pc = pc();
goetz@6458 1113 }
goetz@6458 1114 return _last_calls_return_pc;
goetz@6458 1115 }
goetz@6515 1116 #endif // ABI_ELFv2
goetz@6458 1117
goetz@6458 1118 void MacroAssembler::call_VM_base(Register oop_result,
goetz@6458 1119 Register last_java_sp,
goetz@6458 1120 address entry_point,
goetz@6458 1121 bool check_exceptions) {
goetz@6458 1122 BLOCK_COMMENT("call_VM {");
goetz@6458 1123 // Determine last_java_sp register.
goetz@6458 1124 if (!last_java_sp->is_valid()) {
goetz@6458 1125 last_java_sp = R1_SP;
goetz@6458 1126 }
goetz@6458 1127 set_top_ijava_frame_at_SP_as_last_Java_frame(last_java_sp, R11_scratch1);
goetz@6458 1128
goetz@6458 1129 // ARG1 must hold thread address.
goetz@6458 1130 mr(R3_ARG1, R16_thread);
goetz@6511 1131 #if defined(ABI_ELFv2)
goetz@6511 1132 address return_pc = call_c(entry_point, relocInfo::none);
goetz@6511 1133 #else
goetz@6458 1134 address return_pc = call_c((FunctionDescriptor*)entry_point, relocInfo::none);
goetz@6511 1135 #endif
goetz@6458 1136
goetz@6458 1137 reset_last_Java_frame();
goetz@6458 1138
goetz@6458 1139 // Check for pending exceptions.
goetz@6458 1140 if (check_exceptions) {
goetz@6458 1141 // We don't check for exceptions here.
goetz@6458 1142 ShouldNotReachHere();
goetz@6458 1143 }
goetz@6458 1144
goetz@6458 1145 // Get oop result if there is one and reset the value in the thread.
goetz@6458 1146 if (oop_result->is_valid()) {
goetz@6458 1147 get_vm_result(oop_result);
goetz@6458 1148 }
goetz@6458 1149
goetz@6458 1150 _last_calls_return_pc = return_pc;
goetz@6458 1151 BLOCK_COMMENT("} call_VM");
goetz@6458 1152 }
goetz@6458 1153
goetz@6458 1154 void MacroAssembler::call_VM_leaf_base(address entry_point) {
goetz@6458 1155 BLOCK_COMMENT("call_VM_leaf {");
goetz@6511 1156 #if defined(ABI_ELFv2)
goetz@6511 1157 call_c(entry_point, relocInfo::none);
goetz@6511 1158 #else
goetz@6458 1159 call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, entry_point), relocInfo::none);
goetz@6511 1160 #endif
goetz@6458 1161 BLOCK_COMMENT("} call_VM_leaf");
goetz@6458 1162 }
goetz@6458 1163
goetz@6458 1164 void MacroAssembler::call_VM(Register oop_result, address entry_point, bool check_exceptions) {
goetz@6458 1165 call_VM_base(oop_result, noreg, entry_point, check_exceptions);
goetz@6458 1166 }
goetz@6458 1167
goetz@6458 1168 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1,
goetz@6458 1169 bool check_exceptions) {
goetz@6458 1170 // R3_ARG1 is reserved for the thread.
goetz@6458 1171 mr_if_needed(R4_ARG2, arg_1);
goetz@6458 1172 call_VM(oop_result, entry_point, check_exceptions);
goetz@6458 1173 }
goetz@6458 1174
goetz@6458 1175 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2,
goetz@6458 1176 bool check_exceptions) {
goetz@6458 1177 // R3_ARG1 is reserved for the thread
goetz@6458 1178 mr_if_needed(R4_ARG2, arg_1);
goetz@6458 1179 assert(arg_2 != R4_ARG2, "smashed argument");
goetz@6458 1180 mr_if_needed(R5_ARG3, arg_2);
goetz@6458 1181 call_VM(oop_result, entry_point, check_exceptions);
goetz@6458 1182 }
goetz@6458 1183
goetz@7424 1184 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg_3,
goetz@7424 1185 bool check_exceptions) {
goetz@7424 1186 // R3_ARG1 is reserved for the thread
goetz@7424 1187 mr_if_needed(R4_ARG2, arg_1);
goetz@7424 1188 assert(arg_2 != R4_ARG2, "smashed argument");
goetz@7424 1189 mr_if_needed(R5_ARG3, arg_2);
goetz@7424 1190 mr_if_needed(R6_ARG4, arg_3);
goetz@7424 1191 call_VM(oop_result, entry_point, check_exceptions);
goetz@7424 1192 }
goetz@7424 1193
goetz@6458 1194 void MacroAssembler::call_VM_leaf(address entry_point) {
goetz@6458 1195 call_VM_leaf_base(entry_point);
goetz@6458 1196 }
goetz@6458 1197
goetz@6458 1198 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1) {
goetz@6458 1199 mr_if_needed(R3_ARG1, arg_1);
goetz@6458 1200 call_VM_leaf(entry_point);
goetz@6458 1201 }
goetz@6458 1202
goetz@6458 1203 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1, Register arg_2) {
goetz@6458 1204 mr_if_needed(R3_ARG1, arg_1);
goetz@6458 1205 assert(arg_2 != R3_ARG1, "smashed argument");
goetz@6458 1206 mr_if_needed(R4_ARG2, arg_2);
goetz@6458 1207 call_VM_leaf(entry_point);
goetz@6458 1208 }
goetz@6458 1209
goetz@6458 1210 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3) {
goetz@6458 1211 mr_if_needed(R3_ARG1, arg_1);
goetz@6458 1212 assert(arg_2 != R3_ARG1, "smashed argument");
goetz@6458 1213 mr_if_needed(R4_ARG2, arg_2);
goetz@6458 1214 assert(arg_3 != R3_ARG1 && arg_3 != R4_ARG2, "smashed argument");
goetz@6458 1215 mr_if_needed(R5_ARG3, arg_3);
goetz@6458 1216 call_VM_leaf(entry_point);
goetz@6458 1217 }
goetz@6458 1218
goetz@6458 1219 // Check whether instruction is a read access to the polling page
goetz@6458 1220 // which was emitted by load_from_polling_page(..).
goetz@6458 1221 bool MacroAssembler::is_load_from_polling_page(int instruction, void* ucontext,
goetz@6458 1222 address* polling_address_ptr) {
goetz@6458 1223 if (!is_ld(instruction))
goetz@6458 1224 return false; // It's not a ld. Fail.
goetz@6458 1225
goetz@6458 1226 int rt = inv_rt_field(instruction);
goetz@6458 1227 int ra = inv_ra_field(instruction);
goetz@6458 1228 int ds = inv_ds_field(instruction);
goetz@6458 1229 if (!(ds == 0 && ra != 0 && rt == 0)) {
goetz@6458 1230 return false; // It's not a ld(r0, X, ra). Fail.
goetz@6458 1231 }
goetz@6458 1232
goetz@6458 1233 if (!ucontext) {
goetz@6458 1234 // Set polling address.
goetz@6458 1235 if (polling_address_ptr != NULL) {
goetz@6458 1236 *polling_address_ptr = NULL;
goetz@6458 1237 }
goetz@6458 1238 return true; // No ucontext given. Can't check value of ra. Assume true.
goetz@6458 1239 }
goetz@6458 1240
goetz@6458 1241 #ifdef LINUX
goetz@6458 1242 // Ucontext given. Check that register ra contains the address of
goetz@6458 1243 // the safepoing polling page.
goetz@6458 1244 ucontext_t* uc = (ucontext_t*) ucontext;
goetz@6458 1245 // Set polling address.
goetz@6458 1246 address addr = (address)uc->uc_mcontext.regs->gpr[ra] + (ssize_t)ds;
goetz@6458 1247 if (polling_address_ptr != NULL) {
goetz@6458 1248 *polling_address_ptr = addr;
goetz@6458 1249 }
goetz@6458 1250 return os::is_poll_address(addr);
goetz@6458 1251 #else
goetz@6458 1252 // Not on Linux, ucontext must be NULL.
goetz@6458 1253 ShouldNotReachHere();
goetz@6458 1254 return false;
goetz@6458 1255 #endif
goetz@6458 1256 }
goetz@6458 1257
goetz@6458 1258 bool MacroAssembler::is_memory_serialization(int instruction, JavaThread* thread, void* ucontext) {
goetz@6458 1259 #ifdef LINUX
goetz@6458 1260 ucontext_t* uc = (ucontext_t*) ucontext;
goetz@6458 1261
goetz@6458 1262 if (is_stwx(instruction) || is_stwux(instruction)) {
goetz@6458 1263 int ra = inv_ra_field(instruction);
goetz@6458 1264 int rb = inv_rb_field(instruction);
goetz@6458 1265
goetz@6458 1266 // look up content of ra and rb in ucontext
goetz@6458 1267 address ra_val=(address)uc->uc_mcontext.regs->gpr[ra];
goetz@6458 1268 long rb_val=(long)uc->uc_mcontext.regs->gpr[rb];
goetz@6458 1269 return os::is_memory_serialize_page(thread, ra_val+rb_val);
goetz@6458 1270 } else if (is_stw(instruction) || is_stwu(instruction)) {
goetz@6458 1271 int ra = inv_ra_field(instruction);
goetz@6458 1272 int d1 = inv_d1_field(instruction);
goetz@6458 1273
goetz@6458 1274 // look up content of ra in ucontext
goetz@6458 1275 address ra_val=(address)uc->uc_mcontext.regs->gpr[ra];
goetz@6458 1276 return os::is_memory_serialize_page(thread, ra_val+d1);
goetz@6458 1277 } else {
goetz@6458 1278 return false;
goetz@6458 1279 }
goetz@6458 1280 #else
goetz@6458 1281 // workaround not needed on !LINUX :-)
goetz@6458 1282 ShouldNotCallThis();
goetz@6458 1283 return false;
goetz@6458 1284 #endif
goetz@6458 1285 }
goetz@6458 1286
goetz@6458 1287 void MacroAssembler::bang_stack_with_offset(int offset) {
goetz@6458 1288 // When increasing the stack, the old stack pointer will be written
goetz@6458 1289 // to the new top of stack according to the PPC64 abi.
goetz@6458 1290 // Therefore, stack banging is not necessary when increasing
goetz@6458 1291 // the stack by <= os::vm_page_size() bytes.
goetz@6458 1292 // When increasing the stack by a larger amount, this method is
goetz@6458 1293 // called repeatedly to bang the intermediate pages.
goetz@6458 1294
goetz@6458 1295 // Stack grows down, caller passes positive offset.
goetz@6458 1296 assert(offset > 0, "must bang with positive offset");
goetz@6458 1297
goetz@6458 1298 long stdoffset = -offset;
goetz@6458 1299
goetz@6458 1300 if (is_simm(stdoffset, 16)) {
goetz@6458 1301 // Signed 16 bit offset, a simple std is ok.
goetz@6458 1302 if (UseLoadInstructionsForStackBangingPPC64) {
goetz@6458 1303 ld(R0, (int)(signed short)stdoffset, R1_SP);
goetz@6458 1304 } else {
goetz@6458 1305 std(R0,(int)(signed short)stdoffset, R1_SP);
goetz@6458 1306 }
goetz@6458 1307 } else if (is_simm(stdoffset, 31)) {
goetz@6458 1308 const int hi = MacroAssembler::largeoffset_si16_si16_hi(stdoffset);
goetz@6458 1309 const int lo = MacroAssembler::largeoffset_si16_si16_lo(stdoffset);
goetz@6458 1310
goetz@6458 1311 Register tmp = R11;
goetz@6458 1312 addis(tmp, R1_SP, hi);
goetz@6458 1313 if (UseLoadInstructionsForStackBangingPPC64) {
goetz@6458 1314 ld(R0, lo, tmp);
goetz@6458 1315 } else {
goetz@6458 1316 std(R0, lo, tmp);
goetz@6458 1317 }
goetz@6458 1318 } else {
goetz@6458 1319 ShouldNotReachHere();
goetz@6458 1320 }
goetz@6458 1321 }
goetz@6458 1322
goetz@6458 1323 // If instruction is a stack bang of the form
goetz@6458 1324 // std R0, x(Ry), (see bang_stack_with_offset())
goetz@6458 1325 // stdu R1_SP, x(R1_SP), (see push_frame(), resize_frame())
goetz@6458 1326 // or stdux R1_SP, Rx, R1_SP (see push_frame(), resize_frame())
goetz@6458 1327 // return the banged address. Otherwise, return 0.
goetz@6458 1328 address MacroAssembler::get_stack_bang_address(int instruction, void *ucontext) {
goetz@6458 1329 #ifdef LINUX
goetz@6458 1330 ucontext_t* uc = (ucontext_t*) ucontext;
goetz@6458 1331 int rs = inv_rs_field(instruction);
goetz@6458 1332 int ra = inv_ra_field(instruction);
goetz@6458 1333 if ( (is_ld(instruction) && rs == 0 && UseLoadInstructionsForStackBangingPPC64)
goetz@6458 1334 || (is_std(instruction) && rs == 0 && !UseLoadInstructionsForStackBangingPPC64)
goetz@6458 1335 || (is_stdu(instruction) && rs == 1)) {
goetz@6458 1336 int ds = inv_ds_field(instruction);
goetz@6458 1337 // return banged address
goetz@6458 1338 return ds+(address)uc->uc_mcontext.regs->gpr[ra];
goetz@6458 1339 } else if (is_stdux(instruction) && rs == 1) {
goetz@6458 1340 int rb = inv_rb_field(instruction);
goetz@6458 1341 address sp = (address)uc->uc_mcontext.regs->gpr[1];
goetz@6458 1342 long rb_val = (long)uc->uc_mcontext.regs->gpr[rb];
goetz@6458 1343 return ra != 1 || rb_val >= 0 ? NULL // not a stack bang
goetz@6458 1344 : sp + rb_val; // banged address
goetz@6458 1345 }
goetz@6458 1346 return NULL; // not a stack bang
goetz@6458 1347 #else
goetz@6458 1348 // workaround not needed on !LINUX :-)
goetz@6458 1349 ShouldNotCallThis();
goetz@6458 1350 return NULL;
goetz@6458 1351 #endif
goetz@6458 1352 }
goetz@6458 1353
goetz@6458 1354 // CmpxchgX sets condition register to cmpX(current, compare).
goetz@6458 1355 void MacroAssembler::cmpxchgw(ConditionRegister flag, Register dest_current_value,
goetz@6458 1356 Register compare_value, Register exchange_value,
goetz@6458 1357 Register addr_base, int semantics, bool cmpxchgx_hint,
goetz@6458 1358 Register int_flag_success, bool contention_hint) {
goetz@6458 1359 Label retry;
goetz@6458 1360 Label failed;
goetz@6458 1361 Label done;
goetz@6458 1362
goetz@6458 1363 // Save one branch if result is returned via register and
goetz@6458 1364 // result register is different from the other ones.
goetz@6458 1365 bool use_result_reg = (int_flag_success != noreg);
goetz@6458 1366 bool preset_result_reg = (int_flag_success != dest_current_value && int_flag_success != compare_value &&
goetz@6458 1367 int_flag_success != exchange_value && int_flag_success != addr_base);
goetz@6458 1368
goetz@6458 1369 // release/fence semantics
goetz@6458 1370 if (semantics & MemBarRel) {
goetz@6458 1371 release();
goetz@6458 1372 }
goetz@6458 1373
goetz@6458 1374 if (use_result_reg && preset_result_reg) {
goetz@6458 1375 li(int_flag_success, 0); // preset (assume cas failed)
goetz@6458 1376 }
goetz@6458 1377
goetz@6458 1378 // Add simple guard in order to reduce risk of starving under high contention (recommended by IBM).
goetz@6458 1379 if (contention_hint) { // Don't try to reserve if cmp fails.
goetz@6458 1380 lwz(dest_current_value, 0, addr_base);
goetz@6458 1381 cmpw(flag, dest_current_value, compare_value);
goetz@6458 1382 bne(flag, failed);
goetz@6458 1383 }
goetz@6458 1384
goetz@6458 1385 // atomic emulation loop
goetz@6458 1386 bind(retry);
goetz@6458 1387
goetz@6458 1388 lwarx(dest_current_value, addr_base, cmpxchgx_hint);
goetz@6458 1389 cmpw(flag, dest_current_value, compare_value);
goetz@6458 1390 if (UseStaticBranchPredictionInCompareAndSwapPPC64) {
goetz@6458 1391 bne_predict_not_taken(flag, failed);
goetz@6458 1392 } else {
goetz@6458 1393 bne( flag, failed);
goetz@6458 1394 }
goetz@6458 1395 // branch to done => (flag == ne), (dest_current_value != compare_value)
goetz@6458 1396 // fall through => (flag == eq), (dest_current_value == compare_value)
goetz@6458 1397
goetz@6458 1398 stwcx_(exchange_value, addr_base);
goetz@6458 1399 if (UseStaticBranchPredictionInCompareAndSwapPPC64) {
goetz@6458 1400 bne_predict_not_taken(CCR0, retry); // StXcx_ sets CCR0.
goetz@6458 1401 } else {
goetz@6458 1402 bne( CCR0, retry); // StXcx_ sets CCR0.
goetz@6458 1403 }
goetz@6458 1404 // fall through => (flag == eq), (dest_current_value == compare_value), (swapped)
goetz@6458 1405
goetz@6458 1406 // Result in register (must do this at the end because int_flag_success can be the
goetz@6458 1407 // same register as one above).
goetz@6458 1408 if (use_result_reg) {
goetz@6458 1409 li(int_flag_success, 1);
goetz@6458 1410 }
goetz@6458 1411
goetz@6458 1412 if (semantics & MemBarFenceAfter) {
goetz@6458 1413 fence();
goetz@6458 1414 } else if (semantics & MemBarAcq) {
goetz@6458 1415 isync();
goetz@6458 1416 }
goetz@6458 1417
goetz@6458 1418 if (use_result_reg && !preset_result_reg) {
goetz@6458 1419 b(done);
goetz@6458 1420 }
goetz@6458 1421
goetz@6458 1422 bind(failed);
goetz@6458 1423 if (use_result_reg && !preset_result_reg) {
goetz@6458 1424 li(int_flag_success, 0);
goetz@6458 1425 }
goetz@6458 1426
goetz@6458 1427 bind(done);
goetz@6458 1428 // (flag == ne) => (dest_current_value != compare_value), (!swapped)
goetz@6458 1429 // (flag == eq) => (dest_current_value == compare_value), ( swapped)
goetz@6458 1430 }
goetz@6458 1431
goetz@6458 1432 // Preforms atomic compare exchange:
goetz@6458 1433 // if (compare_value == *addr_base)
goetz@6458 1434 // *addr_base = exchange_value
goetz@6458 1435 // int_flag_success = 1;
goetz@6458 1436 // else
goetz@6458 1437 // int_flag_success = 0;
goetz@6458 1438 //
goetz@6458 1439 // ConditionRegister flag = cmp(compare_value, *addr_base)
goetz@6458 1440 // Register dest_current_value = *addr_base
goetz@6458 1441 // Register compare_value Used to compare with value in memory
goetz@6458 1442 // Register exchange_value Written to memory if compare_value == *addr_base
goetz@6458 1443 // Register addr_base The memory location to compareXChange
goetz@6458 1444 // Register int_flag_success Set to 1 if exchange_value was written to *addr_base
goetz@6458 1445 //
goetz@6458 1446 // To avoid the costly compare exchange the value is tested beforehand.
goetz@6458 1447 // Several special cases exist to avoid that unnecessary information is generated.
goetz@6458 1448 //
goetz@6458 1449 void MacroAssembler::cmpxchgd(ConditionRegister flag,
goetz@6458 1450 Register dest_current_value, Register compare_value, Register exchange_value,
goetz@6458 1451 Register addr_base, int semantics, bool cmpxchgx_hint,
goetz@6458 1452 Register int_flag_success, Label* failed_ext, bool contention_hint) {
goetz@6458 1453 Label retry;
goetz@6458 1454 Label failed_int;
goetz@6458 1455 Label& failed = (failed_ext != NULL) ? *failed_ext : failed_int;
goetz@6458 1456 Label done;
goetz@6458 1457
goetz@6458 1458 // Save one branch if result is returned via register and result register is different from the other ones.
goetz@6458 1459 bool use_result_reg = (int_flag_success!=noreg);
goetz@6458 1460 bool preset_result_reg = (int_flag_success!=dest_current_value && int_flag_success!=compare_value &&
goetz@6458 1461 int_flag_success!=exchange_value && int_flag_success!=addr_base);
goetz@6458 1462 assert(int_flag_success == noreg || failed_ext == NULL, "cannot have both");
goetz@6458 1463
goetz@6458 1464 // release/fence semantics
goetz@6458 1465 if (semantics & MemBarRel) {
goetz@6458 1466 release();
goetz@6458 1467 }
goetz@6458 1468
goetz@6458 1469 if (use_result_reg && preset_result_reg) {
goetz@6458 1470 li(int_flag_success, 0); // preset (assume cas failed)
goetz@6458 1471 }
goetz@6458 1472
goetz@6458 1473 // Add simple guard in order to reduce risk of starving under high contention (recommended by IBM).
goetz@6458 1474 if (contention_hint) { // Don't try to reserve if cmp fails.
goetz@6458 1475 ld(dest_current_value, 0, addr_base);
goetz@6458 1476 cmpd(flag, dest_current_value, compare_value);
goetz@6458 1477 bne(flag, failed);
goetz@6458 1478 }
goetz@6458 1479
goetz@6458 1480 // atomic emulation loop
goetz@6458 1481 bind(retry);
goetz@6458 1482
goetz@6458 1483 ldarx(dest_current_value, addr_base, cmpxchgx_hint);
goetz@6458 1484 cmpd(flag, dest_current_value, compare_value);
goetz@6458 1485 if (UseStaticBranchPredictionInCompareAndSwapPPC64) {
goetz@6458 1486 bne_predict_not_taken(flag, failed);
goetz@6458 1487 } else {
goetz@6458 1488 bne( flag, failed);
goetz@6458 1489 }
goetz@6458 1490
goetz@6458 1491 stdcx_(exchange_value, addr_base);
goetz@6458 1492 if (UseStaticBranchPredictionInCompareAndSwapPPC64) {
goetz@6458 1493 bne_predict_not_taken(CCR0, retry); // stXcx_ sets CCR0
goetz@6458 1494 } else {
goetz@6458 1495 bne( CCR0, retry); // stXcx_ sets CCR0
goetz@6458 1496 }
goetz@6458 1497
goetz@6458 1498 // result in register (must do this at the end because int_flag_success can be the same register as one above)
goetz@6458 1499 if (use_result_reg) {
goetz@6458 1500 li(int_flag_success, 1);
goetz@6458 1501 }
goetz@6458 1502
goetz@6458 1503 // POWER6 doesn't need isync in CAS.
goetz@6458 1504 // Always emit isync to be on the safe side.
goetz@6458 1505 if (semantics & MemBarFenceAfter) {
goetz@6458 1506 fence();
goetz@6458 1507 } else if (semantics & MemBarAcq) {
goetz@6458 1508 isync();
goetz@6458 1509 }
goetz@6458 1510
goetz@6458 1511 if (use_result_reg && !preset_result_reg) {
goetz@6458 1512 b(done);
goetz@6458 1513 }
goetz@6458 1514
goetz@6458 1515 bind(failed_int);
goetz@6458 1516 if (use_result_reg && !preset_result_reg) {
goetz@6458 1517 li(int_flag_success, 0);
goetz@6458 1518 }
goetz@6458 1519
goetz@6458 1520 bind(done);
goetz@6458 1521 // (flag == ne) => (dest_current_value != compare_value), (!swapped)
goetz@6458 1522 // (flag == eq) => (dest_current_value == compare_value), ( swapped)
goetz@6458 1523 }
goetz@6458 1524
goetz@6458 1525 // Look up the method for a megamorphic invokeinterface call.
goetz@6458 1526 // The target method is determined by <intf_klass, itable_index>.
goetz@6458 1527 // The receiver klass is in recv_klass.
goetz@6458 1528 // On success, the result will be in method_result, and execution falls through.
goetz@6458 1529 // On failure, execution transfers to the given label.
goetz@6458 1530 void MacroAssembler::lookup_interface_method(Register recv_klass,
goetz@6458 1531 Register intf_klass,
goetz@6458 1532 RegisterOrConstant itable_index,
goetz@6458 1533 Register method_result,
goetz@6458 1534 Register scan_temp,
mdoerr@9034 1535 Register temp2,
mdoerr@9034 1536 Label& L_no_such_interface,
mdoerr@9034 1537 bool return_method) {
goetz@6458 1538 assert_different_registers(recv_klass, intf_klass, method_result, scan_temp);
goetz@6458 1539
goetz@6458 1540 // Compute start of first itableOffsetEntry (which is at the end of the vtable).
goetz@6458 1541 int vtable_base = InstanceKlass::vtable_start_offset() * wordSize;
goetz@6458 1542 int itentry_off = itableMethodEntry::method_offset_in_bytes();
goetz@6458 1543 int logMEsize = exact_log2(itableMethodEntry::size() * wordSize);
goetz@6458 1544 int scan_step = itableOffsetEntry::size() * wordSize;
goetz@6458 1545 int log_vte_size= exact_log2(vtableEntry::size() * wordSize);
goetz@6458 1546
goetz@6458 1547 lwz(scan_temp, InstanceKlass::vtable_length_offset() * wordSize, recv_klass);
goetz@6458 1548 // %%% We should store the aligned, prescaled offset in the klassoop.
goetz@6458 1549 // Then the next several instructions would fold away.
goetz@6458 1550
goetz@6458 1551 sldi(scan_temp, scan_temp, log_vte_size);
goetz@6458 1552 addi(scan_temp, scan_temp, vtable_base);
goetz@6458 1553 add(scan_temp, recv_klass, scan_temp);
goetz@6458 1554
goetz@6458 1555 // Adjust recv_klass by scaled itable_index, so we can free itable_index.
mdoerr@9034 1556 if (return_method) {
mdoerr@9034 1557 if (itable_index.is_register()) {
mdoerr@9034 1558 Register itable_offset = itable_index.as_register();
mdoerr@9034 1559 sldi(method_result, itable_offset, logMEsize);
mdoerr@9034 1560 if (itentry_off) { addi(method_result, method_result, itentry_off); }
mdoerr@9034 1561 add(method_result, method_result, recv_klass);
mdoerr@9034 1562 } else {
mdoerr@9034 1563 long itable_offset = (long)itable_index.as_constant();
mdoerr@9034 1564 // static address, no relocation
mdoerr@9034 1565 load_const_optimized(temp2, (itable_offset << logMEsize) + itentry_off); // static address, no relocation
mdoerr@9034 1566 add(method_result, temp2, recv_klass);
mdoerr@9034 1567 }
goetz@6458 1568 }
goetz@6458 1569
goetz@6458 1570 // for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) {
goetz@6458 1571 // if (scan->interface() == intf) {
goetz@6458 1572 // result = (klass + scan->offset() + itable_index);
goetz@6458 1573 // }
goetz@6458 1574 // }
goetz@6458 1575 Label search, found_method;
goetz@6458 1576
goetz@6458 1577 for (int peel = 1; peel >= 0; peel--) {
goetz@6458 1578 // %%%% Could load both offset and interface in one ldx, if they were
goetz@6458 1579 // in the opposite order. This would save a load.
mdoerr@9034 1580 ld(temp2, itableOffsetEntry::interface_offset_in_bytes(), scan_temp);
goetz@6458 1581
goetz@6458 1582 // Check that this entry is non-null. A null entry means that
goetz@6458 1583 // the receiver class doesn't implement the interface, and wasn't the
goetz@6458 1584 // same as when the caller was compiled.
mdoerr@9034 1585 cmpd(CCR0, temp2, intf_klass);
goetz@6458 1586
goetz@6458 1587 if (peel) {
goetz@6458 1588 beq(CCR0, found_method);
goetz@6458 1589 } else {
goetz@6458 1590 bne(CCR0, search);
goetz@6458 1591 // (invert the test to fall through to found_method...)
goetz@6458 1592 }
goetz@6458 1593
goetz@6458 1594 if (!peel) break;
goetz@6458 1595
goetz@6458 1596 bind(search);
goetz@6458 1597
mdoerr@9034 1598 cmpdi(CCR0, temp2, 0);
goetz@6458 1599 beq(CCR0, L_no_such_interface);
goetz@6458 1600 addi(scan_temp, scan_temp, scan_step);
goetz@6458 1601 }
goetz@6458 1602
goetz@6458 1603 bind(found_method);
goetz@6458 1604
goetz@6458 1605 // Got a hit.
mdoerr@9034 1606 if (return_method) {
mdoerr@9034 1607 int ito_offset = itableOffsetEntry::offset_offset_in_bytes();
mdoerr@9034 1608 lwz(scan_temp, ito_offset, scan_temp);
mdoerr@9034 1609 ldx(method_result, scan_temp, method_result);
mdoerr@9034 1610 }
goetz@6458 1611 }
goetz@6458 1612
goetz@6458 1613 // virtual method calling
goetz@6458 1614 void MacroAssembler::lookup_virtual_method(Register recv_klass,
goetz@6458 1615 RegisterOrConstant vtable_index,
goetz@6458 1616 Register method_result) {
goetz@6458 1617
goetz@6458 1618 assert_different_registers(recv_klass, method_result, vtable_index.register_or_noreg());
goetz@6458 1619
goetz@6458 1620 const int base = InstanceKlass::vtable_start_offset() * wordSize;
goetz@6458 1621 assert(vtableEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
goetz@6458 1622
goetz@6458 1623 if (vtable_index.is_register()) {
goetz@6458 1624 sldi(vtable_index.as_register(), vtable_index.as_register(), LogBytesPerWord);
goetz@6458 1625 add(recv_klass, vtable_index.as_register(), recv_klass);
goetz@6458 1626 } else {
goetz@6458 1627 addi(recv_klass, recv_klass, vtable_index.as_constant() << LogBytesPerWord);
goetz@6458 1628 }
goetz@6458 1629 ld(R19_method, base + vtableEntry::method_offset_in_bytes(), recv_klass);
goetz@6458 1630 }
goetz@6458 1631
goetz@6458 1632 /////////////////////////////////////////// subtype checking ////////////////////////////////////////////
goetz@6458 1633
goetz@6458 1634 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass,
goetz@6458 1635 Register super_klass,
goetz@6458 1636 Register temp1_reg,
goetz@6458 1637 Register temp2_reg,
goetz@6458 1638 Label& L_success,
goetz@6458 1639 Label& L_failure) {
goetz@6458 1640
goetz@6458 1641 const Register check_cache_offset = temp1_reg;
goetz@6458 1642 const Register cached_super = temp2_reg;
goetz@6458 1643
goetz@6458 1644 assert_different_registers(sub_klass, super_klass, check_cache_offset, cached_super);
goetz@6458 1645
goetz@6458 1646 int sco_offset = in_bytes(Klass::super_check_offset_offset());
goetz@6458 1647 int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
goetz@6458 1648
goetz@6458 1649 // If the pointers are equal, we are done (e.g., String[] elements).
goetz@6458 1650 // This self-check enables sharing of secondary supertype arrays among
goetz@6458 1651 // non-primary types such as array-of-interface. Otherwise, each such
goetz@6458 1652 // type would need its own customized SSA.
goetz@6458 1653 // We move this check to the front of the fast path because many
goetz@6458 1654 // type checks are in fact trivially successful in this manner,
goetz@6458 1655 // so we get a nicely predicted branch right at the start of the check.
goetz@6458 1656 cmpd(CCR0, sub_klass, super_klass);
goetz@6458 1657 beq(CCR0, L_success);
goetz@6458 1658
goetz@6458 1659 // Check the supertype display:
goetz@6458 1660 lwz(check_cache_offset, sco_offset, super_klass);
goetz@6458 1661 // The loaded value is the offset from KlassOopDesc.
goetz@6458 1662
goetz@6458 1663 ldx(cached_super, check_cache_offset, sub_klass);
goetz@6458 1664 cmpd(CCR0, cached_super, super_klass);
goetz@6458 1665 beq(CCR0, L_success);
goetz@6458 1666
goetz@6458 1667 // This check has worked decisively for primary supers.
goetz@6458 1668 // Secondary supers are sought in the super_cache ('super_cache_addr').
goetz@6458 1669 // (Secondary supers are interfaces and very deeply nested subtypes.)
goetz@6458 1670 // This works in the same check above because of a tricky aliasing
goetz@6458 1671 // between the super_cache and the primary super display elements.
goetz@6458 1672 // (The 'super_check_addr' can address either, as the case requires.)
goetz@6458 1673 // Note that the cache is updated below if it does not help us find
goetz@6458 1674 // what we need immediately.
goetz@6458 1675 // So if it was a primary super, we can just fail immediately.
goetz@6458 1676 // Otherwise, it's the slow path for us (no success at this point).
goetz@6458 1677
goetz@6458 1678 cmpwi(CCR0, check_cache_offset, sc_offset);
goetz@6458 1679 bne(CCR0, L_failure);
goetz@6458 1680 // bind(slow_path); // fallthru
goetz@6458 1681 }
goetz@6458 1682
goetz@6458 1683 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
goetz@6458 1684 Register super_klass,
goetz@6458 1685 Register temp1_reg,
goetz@6458 1686 Register temp2_reg,
goetz@6458 1687 Label* L_success,
goetz@6458 1688 Register result_reg) {
goetz@6458 1689 const Register array_ptr = temp1_reg; // current value from cache array
goetz@6458 1690 const Register temp = temp2_reg;
goetz@6458 1691
goetz@6458 1692 assert_different_registers(sub_klass, super_klass, array_ptr, temp);
goetz@6458 1693
goetz@6458 1694 int source_offset = in_bytes(Klass::secondary_supers_offset());
goetz@6458 1695 int target_offset = in_bytes(Klass::secondary_super_cache_offset());
goetz@6458 1696
goetz@6458 1697 int length_offset = Array<Klass*>::length_offset_in_bytes();
goetz@6458 1698 int base_offset = Array<Klass*>::base_offset_in_bytes();
goetz@6458 1699
goetz@6458 1700 Label hit, loop, failure, fallthru;
goetz@6458 1701
goetz@6458 1702 ld(array_ptr, source_offset, sub_klass);
goetz@6458 1703
goetz@6458 1704 //assert(4 == arrayOopDesc::length_length_in_bytes(), "precondition violated.");
goetz@6458 1705 lwz(temp, length_offset, array_ptr);
goetz@6458 1706 cmpwi(CCR0, temp, 0);
goetz@6458 1707 beq(CCR0, result_reg!=noreg ? failure : fallthru); // length 0
goetz@6458 1708
goetz@6458 1709 mtctr(temp); // load ctr
goetz@6458 1710
goetz@6458 1711 bind(loop);
goetz@6458 1712 // Oops in table are NO MORE compressed.
goetz@6458 1713 ld(temp, base_offset, array_ptr);
goetz@6458 1714 cmpd(CCR0, temp, super_klass);
goetz@6458 1715 beq(CCR0, hit);
goetz@6458 1716 addi(array_ptr, array_ptr, BytesPerWord);
goetz@6458 1717 bdnz(loop);
goetz@6458 1718
goetz@6458 1719 bind(failure);
goetz@6458 1720 if (result_reg!=noreg) li(result_reg, 1); // load non-zero result (indicates a miss)
goetz@6458 1721 b(fallthru);
goetz@6458 1722
goetz@6458 1723 bind(hit);
goetz@6458 1724 std(super_klass, target_offset, sub_klass); // save result to cache
goetz@6458 1725 if (result_reg != noreg) li(result_reg, 0); // load zero result (indicates a hit)
goetz@6458 1726 if (L_success != NULL) b(*L_success);
goetz@6458 1727
goetz@6458 1728 bind(fallthru);
goetz@6458 1729 }
goetz@6458 1730
goetz@6458 1731 // Try fast path, then go to slow one if not successful
goetz@6458 1732 void MacroAssembler::check_klass_subtype(Register sub_klass,
goetz@6458 1733 Register super_klass,
goetz@6458 1734 Register temp1_reg,
goetz@6458 1735 Register temp2_reg,
goetz@6458 1736 Label& L_success) {
goetz@6458 1737 Label L_failure;
goetz@6458 1738 check_klass_subtype_fast_path(sub_klass, super_klass, temp1_reg, temp2_reg, L_success, L_failure);
goetz@6458 1739 check_klass_subtype_slow_path(sub_klass, super_klass, temp1_reg, temp2_reg, &L_success);
goetz@6458 1740 bind(L_failure); // Fallthru if not successful.
goetz@6458 1741 }
goetz@6458 1742
goetz@6458 1743 void MacroAssembler::check_method_handle_type(Register mtype_reg, Register mh_reg,
goetz@6458 1744 Register temp_reg,
goetz@6458 1745 Label& wrong_method_type) {
goetz@6458 1746 assert_different_registers(mtype_reg, mh_reg, temp_reg);
goetz@6458 1747 // Compare method type against that of the receiver.
goetz@6458 1748 load_heap_oop_not_null(temp_reg, delayed_value(java_lang_invoke_MethodHandle::type_offset_in_bytes, temp_reg), mh_reg);
goetz@6458 1749 cmpd(CCR0, temp_reg, mtype_reg);
goetz@6458 1750 bne(CCR0, wrong_method_type);
goetz@6458 1751 }
goetz@6458 1752
goetz@6458 1753 RegisterOrConstant MacroAssembler::argument_offset(RegisterOrConstant arg_slot,
goetz@6458 1754 Register temp_reg,
goetz@6458 1755 int extra_slot_offset) {
goetz@6458 1756 // cf. TemplateTable::prepare_invoke(), if (load_receiver).
goetz@6458 1757 int stackElementSize = Interpreter::stackElementSize;
goetz@6458 1758 int offset = extra_slot_offset * stackElementSize;
goetz@6458 1759 if (arg_slot.is_constant()) {
goetz@6458 1760 offset += arg_slot.as_constant() * stackElementSize;
goetz@6458 1761 return offset;
goetz@6458 1762 } else {
goetz@6458 1763 assert(temp_reg != noreg, "must specify");
goetz@6458 1764 sldi(temp_reg, arg_slot.as_register(), exact_log2(stackElementSize));
goetz@6458 1765 if (offset != 0)
goetz@6458 1766 addi(temp_reg, temp_reg, offset);
goetz@6458 1767 return temp_reg;
goetz@6458 1768 }
goetz@6458 1769 }
goetz@6458 1770
goetz@6458 1771 void MacroAssembler::biased_locking_enter(ConditionRegister cr_reg, Register obj_reg,
goetz@6458 1772 Register mark_reg, Register temp_reg,
goetz@6458 1773 Register temp2_reg, Label& done, Label* slow_case) {
goetz@6458 1774 assert(UseBiasedLocking, "why call this otherwise?");
goetz@6458 1775
goetz@6458 1776 #ifdef ASSERT
goetz@6458 1777 assert_different_registers(obj_reg, mark_reg, temp_reg, temp2_reg);
goetz@6458 1778 #endif
goetz@6458 1779
goetz@6458 1780 Label cas_label;
goetz@6458 1781
goetz@6458 1782 // Branch to done if fast path fails and no slow_case provided.
goetz@6458 1783 Label *slow_case_int = (slow_case != NULL) ? slow_case : &done;
goetz@6458 1784
goetz@6458 1785 // Biased locking
goetz@6458 1786 // See whether the lock is currently biased toward our thread and
goetz@6458 1787 // whether the epoch is still valid
goetz@6458 1788 // Note that the runtime guarantees sufficient alignment of JavaThread
goetz@6458 1789 // pointers to allow age to be placed into low bits
goetz@6458 1790 assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits,
goetz@6458 1791 "biased locking makes assumptions about bit layout");
goetz@6458 1792
goetz@6458 1793 if (PrintBiasedLockingStatistics) {
goetz@6458 1794 load_const(temp_reg, (address) BiasedLocking::total_entry_count_addr(), temp2_reg);
goetz@6458 1795 lwz(temp2_reg, 0, temp_reg);
goetz@6458 1796 addi(temp2_reg, temp2_reg, 1);
goetz@6458 1797 stw(temp2_reg, 0, temp_reg);
goetz@6458 1798 }
goetz@6458 1799
goetz@6458 1800 andi(temp_reg, mark_reg, markOopDesc::biased_lock_mask_in_place);
goetz@6458 1801 cmpwi(cr_reg, temp_reg, markOopDesc::biased_lock_pattern);
goetz@6458 1802 bne(cr_reg, cas_label);
goetz@6458 1803
goetz@6515 1804 load_klass(temp_reg, obj_reg);
goetz@6458 1805
goetz@6458 1806 load_const_optimized(temp2_reg, ~((int) markOopDesc::age_mask_in_place));
goetz@6458 1807 ld(temp_reg, in_bytes(Klass::prototype_header_offset()), temp_reg);
goetz@6458 1808 orr(temp_reg, R16_thread, temp_reg);
goetz@6458 1809 xorr(temp_reg, mark_reg, temp_reg);
goetz@6458 1810 andr(temp_reg, temp_reg, temp2_reg);
goetz@6458 1811 cmpdi(cr_reg, temp_reg, 0);
goetz@6458 1812 if (PrintBiasedLockingStatistics) {
goetz@6458 1813 Label l;
goetz@6458 1814 bne(cr_reg, l);
goetz@6458 1815 load_const(mark_reg, (address) BiasedLocking::biased_lock_entry_count_addr());
goetz@6458 1816 lwz(temp2_reg, 0, mark_reg);
goetz@6458 1817 addi(temp2_reg, temp2_reg, 1);
goetz@6458 1818 stw(temp2_reg, 0, mark_reg);
goetz@6458 1819 // restore mark_reg
goetz@6458 1820 ld(mark_reg, oopDesc::mark_offset_in_bytes(), obj_reg);
goetz@6458 1821 bind(l);
goetz@6458 1822 }
goetz@6458 1823 beq(cr_reg, done);
goetz@6458 1824
goetz@6458 1825 Label try_revoke_bias;
goetz@6458 1826 Label try_rebias;
goetz@6458 1827
goetz@6458 1828 // At this point we know that the header has the bias pattern and
goetz@6458 1829 // that we are not the bias owner in the current epoch. We need to
goetz@6458 1830 // figure out more details about the state of the header in order to
goetz@6458 1831 // know what operations can be legally performed on the object's
goetz@6458 1832 // header.
goetz@6458 1833
goetz@6458 1834 // If the low three bits in the xor result aren't clear, that means
goetz@6458 1835 // the prototype header is no longer biased and we have to revoke
goetz@6458 1836 // the bias on this object.
goetz@6458 1837 andi(temp2_reg, temp_reg, markOopDesc::biased_lock_mask_in_place);
goetz@6458 1838 cmpwi(cr_reg, temp2_reg, 0);
goetz@6458 1839 bne(cr_reg, try_revoke_bias);
goetz@6458 1840
goetz@6458 1841 // Biasing is still enabled for this data type. See whether the
goetz@6458 1842 // epoch of the current bias is still valid, meaning that the epoch
goetz@6458 1843 // bits of the mark word are equal to the epoch bits of the
goetz@6458 1844 // prototype header. (Note that the prototype header's epoch bits
goetz@6458 1845 // only change at a safepoint.) If not, attempt to rebias the object
goetz@6458 1846 // toward the current thread. Note that we must be absolutely sure
goetz@6458 1847 // that the current epoch is invalid in order to do this because
goetz@6458 1848 // otherwise the manipulations it performs on the mark word are
goetz@6458 1849 // illegal.
goetz@6458 1850
goetz@6458 1851 int shift_amount = 64 - markOopDesc::epoch_shift;
goetz@6458 1852 // rotate epoch bits to right (little) end and set other bits to 0
goetz@6458 1853 // [ big part | epoch | little part ] -> [ 0..0 | epoch ]
goetz@6458 1854 rldicl_(temp2_reg, temp_reg, shift_amount, 64 - markOopDesc::epoch_bits);
goetz@6458 1855 // branch if epoch bits are != 0, i.e. they differ, because the epoch has been incremented
goetz@6458 1856 bne(CCR0, try_rebias);
goetz@6458 1857
goetz@6458 1858 // The epoch of the current bias is still valid but we know nothing
goetz@6458 1859 // about the owner; it might be set or it might be clear. Try to
goetz@6458 1860 // acquire the bias of the object using an atomic operation. If this
goetz@6458 1861 // fails we will go in to the runtime to revoke the object's bias.
goetz@6458 1862 // Note that we first construct the presumed unbiased header so we
goetz@6458 1863 // don't accidentally blow away another thread's valid bias.
goetz@6458 1864 andi(mark_reg, mark_reg, (markOopDesc::biased_lock_mask_in_place |
goetz@6458 1865 markOopDesc::age_mask_in_place |
goetz@6458 1866 markOopDesc::epoch_mask_in_place));
goetz@6458 1867 orr(temp_reg, R16_thread, mark_reg);
goetz@6458 1868
goetz@6458 1869 assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
goetz@6458 1870
goetz@6458 1871 // CmpxchgX sets cr_reg to cmpX(temp2_reg, mark_reg).
goetz@6458 1872 fence(); // TODO: replace by MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq ?
goetz@6458 1873 cmpxchgd(/*flag=*/cr_reg, /*current_value=*/temp2_reg,
goetz@6458 1874 /*compare_value=*/mark_reg, /*exchange_value=*/temp_reg,
goetz@6458 1875 /*where=*/obj_reg,
goetz@6458 1876 MacroAssembler::MemBarAcq,
goetz@6458 1877 MacroAssembler::cmpxchgx_hint_acquire_lock(),
goetz@6458 1878 noreg, slow_case_int); // bail out if failed
goetz@6458 1879
goetz@6458 1880 // If the biasing toward our thread failed, this means that
goetz@6458 1881 // another thread succeeded in biasing it toward itself and we
goetz@6458 1882 // need to revoke that bias. The revocation will occur in the
goetz@6458 1883 // interpreter runtime in the slow case.
goetz@6458 1884 if (PrintBiasedLockingStatistics) {
goetz@6458 1885 load_const(temp_reg, (address) BiasedLocking::anonymously_biased_lock_entry_count_addr(), temp2_reg);
goetz@6458 1886 lwz(temp2_reg, 0, temp_reg);
goetz@6458 1887 addi(temp2_reg, temp2_reg, 1);
goetz@6458 1888 stw(temp2_reg, 0, temp_reg);
goetz@6458 1889 }
goetz@6458 1890 b(done);
goetz@6458 1891
goetz@6458 1892 bind(try_rebias);
goetz@6458 1893 // At this point we know the epoch has expired, meaning that the
goetz@6458 1894 // current "bias owner", if any, is actually invalid. Under these
goetz@6458 1895 // circumstances _only_, we are allowed to use the current header's
goetz@6458 1896 // value as the comparison value when doing the cas to acquire the
goetz@6458 1897 // bias in the current epoch. In other words, we allow transfer of
goetz@6458 1898 // the bias from one thread to another directly in this situation.
goetz@6458 1899 andi(temp_reg, mark_reg, markOopDesc::age_mask_in_place);
goetz@6458 1900 orr(temp_reg, R16_thread, temp_reg);
goetz@6515 1901 load_klass(temp2_reg, obj_reg);
goetz@6458 1902 ld(temp2_reg, in_bytes(Klass::prototype_header_offset()), temp2_reg);
goetz@6458 1903 orr(temp_reg, temp_reg, temp2_reg);
goetz@6458 1904
goetz@6458 1905 assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
goetz@6458 1906
goetz@6458 1907 // CmpxchgX sets cr_reg to cmpX(temp2_reg, mark_reg).
goetz@6458 1908 fence(); // TODO: replace by MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq ?
goetz@6458 1909 cmpxchgd(/*flag=*/cr_reg, /*current_value=*/temp2_reg,
goetz@6458 1910 /*compare_value=*/mark_reg, /*exchange_value=*/temp_reg,
goetz@6458 1911 /*where=*/obj_reg,
goetz@6458 1912 MacroAssembler::MemBarAcq,
goetz@6458 1913 MacroAssembler::cmpxchgx_hint_acquire_lock(),
goetz@6458 1914 noreg, slow_case_int); // bail out if failed
goetz@6458 1915
goetz@6458 1916 // If the biasing toward our thread failed, this means that
goetz@6458 1917 // another thread succeeded in biasing it toward itself and we
goetz@6458 1918 // need to revoke that bias. The revocation will occur in the
goetz@6458 1919 // interpreter runtime in the slow case.
goetz@6458 1920 if (PrintBiasedLockingStatistics) {
goetz@6458 1921 load_const(temp_reg, (address) BiasedLocking::rebiased_lock_entry_count_addr(), temp2_reg);
goetz@6458 1922 lwz(temp2_reg, 0, temp_reg);
goetz@6458 1923 addi(temp2_reg, temp2_reg, 1);
goetz@6458 1924 stw(temp2_reg, 0, temp_reg);
goetz@6458 1925 }
goetz@6458 1926 b(done);
goetz@6458 1927
goetz@6458 1928 bind(try_revoke_bias);
goetz@6458 1929 // The prototype mark in the klass doesn't have the bias bit set any
goetz@6458 1930 // more, indicating that objects of this data type are not supposed
goetz@6458 1931 // to be biased any more. We are going to try to reset the mark of
goetz@6458 1932 // this object to the prototype value and fall through to the
goetz@6458 1933 // CAS-based locking scheme. Note that if our CAS fails, it means
goetz@6458 1934 // that another thread raced us for the privilege of revoking the
goetz@6458 1935 // bias of this particular object, so it's okay to continue in the
goetz@6458 1936 // normal locking code.
goetz@6515 1937 load_klass(temp_reg, obj_reg);
goetz@6458 1938 ld(temp_reg, in_bytes(Klass::prototype_header_offset()), temp_reg);
goetz@6458 1939 andi(temp2_reg, mark_reg, markOopDesc::age_mask_in_place);
goetz@6458 1940 orr(temp_reg, temp_reg, temp2_reg);
goetz@6458 1941
goetz@6458 1942 assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
goetz@6458 1943
goetz@6458 1944 // CmpxchgX sets cr_reg to cmpX(temp2_reg, mark_reg).
goetz@6458 1945 fence(); // TODO: replace by MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq ?
goetz@6458 1946 cmpxchgd(/*flag=*/cr_reg, /*current_value=*/temp2_reg,
goetz@6458 1947 /*compare_value=*/mark_reg, /*exchange_value=*/temp_reg,
goetz@6458 1948 /*where=*/obj_reg,
goetz@6458 1949 MacroAssembler::MemBarAcq,
goetz@6458 1950 MacroAssembler::cmpxchgx_hint_acquire_lock());
goetz@6458 1951
goetz@6458 1952 // reload markOop in mark_reg before continuing with lightweight locking
goetz@6458 1953 ld(mark_reg, oopDesc::mark_offset_in_bytes(), obj_reg);
goetz@6458 1954
goetz@6458 1955 // Fall through to the normal CAS-based lock, because no matter what
goetz@6458 1956 // the result of the above CAS, some thread must have succeeded in
goetz@6458 1957 // removing the bias bit from the object's header.
goetz@6458 1958 if (PrintBiasedLockingStatistics) {
goetz@6458 1959 Label l;
goetz@6458 1960 bne(cr_reg, l);
goetz@6458 1961 load_const(temp_reg, (address) BiasedLocking::revoked_lock_entry_count_addr(), temp2_reg);
goetz@6458 1962 lwz(temp2_reg, 0, temp_reg);
goetz@6458 1963 addi(temp2_reg, temp2_reg, 1);
goetz@6458 1964 stw(temp2_reg, 0, temp_reg);
goetz@6458 1965 bind(l);
goetz@6458 1966 }
goetz@6458 1967
goetz@6458 1968 bind(cas_label);
goetz@6458 1969 }
goetz@6458 1970
goetz@6458 1971 void MacroAssembler::biased_locking_exit (ConditionRegister cr_reg, Register mark_addr, Register temp_reg, Label& done) {
goetz@6458 1972 // Check for biased locking unlock case, which is a no-op
goetz@6458 1973 // Note: we do not have to check the thread ID for two reasons.
goetz@6458 1974 // First, the interpreter checks for IllegalMonitorStateException at
goetz@6458 1975 // a higher level. Second, if the bias was revoked while we held the
goetz@6458 1976 // lock, the object could not be rebiased toward another thread, so
goetz@6458 1977 // the bias bit would be clear.
goetz@6458 1978
goetz@6458 1979 ld(temp_reg, 0, mark_addr);
goetz@6458 1980 andi(temp_reg, temp_reg, markOopDesc::biased_lock_mask_in_place);
goetz@6458 1981
goetz@6458 1982 cmpwi(cr_reg, temp_reg, markOopDesc::biased_lock_pattern);
goetz@6458 1983 beq(cr_reg, done);
goetz@6458 1984 }
goetz@6458 1985
goetz@6458 1986 // "The box" is the space on the stack where we copy the object mark.
goetz@6458 1987 void MacroAssembler::compiler_fast_lock_object(ConditionRegister flag, Register oop, Register box,
goetz@6458 1988 Register temp, Register displaced_header, Register current_header) {
goetz@6458 1989 assert_different_registers(oop, box, temp, displaced_header, current_header);
goetz@6458 1990 assert(flag != CCR0, "bad condition register");
goetz@6458 1991 Label cont;
goetz@6458 1992 Label object_has_monitor;
goetz@6458 1993 Label cas_failed;
goetz@6458 1994
goetz@6458 1995 // Load markOop from object into displaced_header.
goetz@6458 1996 ld(displaced_header, oopDesc::mark_offset_in_bytes(), oop);
goetz@6458 1997
goetz@6458 1998
goetz@6458 1999 // Always do locking in runtime.
goetz@6458 2000 if (EmitSync & 0x01) {
goetz@6458 2001 cmpdi(flag, oop, 0); // Oop can't be 0 here => always false.
goetz@6458 2002 return;
goetz@6458 2003 }
goetz@6458 2004
goetz@6458 2005 if (UseBiasedLocking) {
goetz@6458 2006 biased_locking_enter(flag, oop, displaced_header, temp, current_header, cont);
goetz@6458 2007 }
goetz@6458 2008
goetz@6458 2009 // Handle existing monitor.
goetz@6458 2010 if ((EmitSync & 0x02) == 0) {
goetz@6458 2011 // The object has an existing monitor iff (mark & monitor_value) != 0.
goetz@6458 2012 andi_(temp, displaced_header, markOopDesc::monitor_value);
goetz@6458 2013 bne(CCR0, object_has_monitor);
goetz@6458 2014 }
goetz@6458 2015
goetz@6458 2016 // Set displaced_header to be (markOop of object | UNLOCK_VALUE).
goetz@6458 2017 ori(displaced_header, displaced_header, markOopDesc::unlocked_value);
goetz@6458 2018
goetz@6458 2019 // Load Compare Value application register.
goetz@6458 2020
goetz@6458 2021 // Initialize the box. (Must happen before we update the object mark!)
goetz@6458 2022 std(displaced_header, BasicLock::displaced_header_offset_in_bytes(), box);
goetz@6458 2023
goetz@6458 2024 // Must fence, otherwise, preceding store(s) may float below cmpxchg.
goetz@6458 2025 // Compare object markOop with mark and if equal exchange scratch1 with object markOop.
goetz@6458 2026 // CmpxchgX sets cr_reg to cmpX(current, displaced).
goetz@6501 2027 membar(Assembler::StoreStore);
goetz@6458 2028 cmpxchgd(/*flag=*/flag,
goetz@6458 2029 /*current_value=*/current_header,
goetz@6458 2030 /*compare_value=*/displaced_header,
goetz@6458 2031 /*exchange_value=*/box,
goetz@6458 2032 /*where=*/oop,
goetz@6501 2033 MacroAssembler::MemBarAcq,
goetz@6458 2034 MacroAssembler::cmpxchgx_hint_acquire_lock(),
goetz@6458 2035 noreg,
goetz@6458 2036 &cas_failed);
goetz@6458 2037 assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
goetz@6458 2038
goetz@6458 2039 // If the compare-and-exchange succeeded, then we found an unlocked
goetz@6458 2040 // object and we have now locked it.
goetz@6458 2041 b(cont);
goetz@6458 2042
goetz@6458 2043 bind(cas_failed);
goetz@6458 2044 // We did not see an unlocked object so try the fast recursive case.
goetz@6458 2045
goetz@6458 2046 // Check if the owner is self by comparing the value in the markOop of object
goetz@6458 2047 // (current_header) with the stack pointer.
goetz@6458 2048 sub(current_header, current_header, R1_SP);
goetz@6458 2049 load_const_optimized(temp, (address) (~(os::vm_page_size()-1) |
goetz@6458 2050 markOopDesc::lock_mask_in_place));
goetz@6458 2051
goetz@6458 2052 and_(R0/*==0?*/, current_header, temp);
goetz@6458 2053 // If condition is true we are cont and hence we can store 0 as the
goetz@6458 2054 // displaced header in the box, which indicates that it is a recursive lock.
goetz@6458 2055 mcrf(flag,CCR0);
goetz@6458 2056 std(R0/*==0, perhaps*/, BasicLock::displaced_header_offset_in_bytes(), box);
goetz@6458 2057
goetz@6458 2058 // Handle existing monitor.
goetz@6458 2059 if ((EmitSync & 0x02) == 0) {
goetz@6458 2060 b(cont);
goetz@6458 2061
goetz@6458 2062 bind(object_has_monitor);
goetz@6458 2063 // The object's monitor m is unlocked iff m->owner == NULL,
goetz@6458 2064 // otherwise m->owner may contain a thread or a stack address.
goetz@6458 2065 //
goetz@6458 2066 // Try to CAS m->owner from NULL to current thread.
goetz@6458 2067 addi(temp, displaced_header, ObjectMonitor::owner_offset_in_bytes()-markOopDesc::monitor_value);
goetz@6458 2068 li(displaced_header, 0);
goetz@6458 2069 // CmpxchgX sets flag to cmpX(current, displaced).
goetz@6458 2070 cmpxchgd(/*flag=*/flag,
goetz@6458 2071 /*current_value=*/current_header,
goetz@6458 2072 /*compare_value=*/displaced_header,
goetz@6458 2073 /*exchange_value=*/R16_thread,
goetz@6458 2074 /*where=*/temp,
goetz@6458 2075 MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq,
goetz@6458 2076 MacroAssembler::cmpxchgx_hint_acquire_lock());
goetz@6458 2077
goetz@6458 2078 // Store a non-null value into the box.
goetz@6458 2079 std(box, BasicLock::displaced_header_offset_in_bytes(), box);
goetz@6458 2080
goetz@6458 2081 # ifdef ASSERT
goetz@6458 2082 bne(flag, cont);
goetz@6458 2083 // We have acquired the monitor, check some invariants.
goetz@6458 2084 addi(/*monitor=*/temp, temp, -ObjectMonitor::owner_offset_in_bytes());
goetz@6458 2085 // Invariant 1: _recursions should be 0.
goetz@6458 2086 //assert(ObjectMonitor::recursions_size_in_bytes() == 8, "unexpected size");
goetz@6458 2087 asm_assert_mem8_is_zero(ObjectMonitor::recursions_offset_in_bytes(), temp,
goetz@6458 2088 "monitor->_recursions should be 0", -1);
goetz@6458 2089 // Invariant 2: OwnerIsThread shouldn't be 0.
goetz@6458 2090 //assert(ObjectMonitor::OwnerIsThread_size_in_bytes() == 4, "unexpected size");
goetz@6458 2091 //asm_assert_mem4_isnot_zero(ObjectMonitor::OwnerIsThread_offset_in_bytes(), temp,
goetz@6458 2092 // "monitor->OwnerIsThread shouldn't be 0", -1);
goetz@6458 2093 # endif
goetz@6458 2094 }
goetz@6458 2095
goetz@6458 2096 bind(cont);
goetz@6458 2097 // flag == EQ indicates success
goetz@6458 2098 // flag == NE indicates failure
goetz@6458 2099 }
goetz@6458 2100
goetz@6458 2101 void MacroAssembler::compiler_fast_unlock_object(ConditionRegister flag, Register oop, Register box,
goetz@6458 2102 Register temp, Register displaced_header, Register current_header) {
goetz@6458 2103 assert_different_registers(oop, box, temp, displaced_header, current_header);
goetz@6458 2104 assert(flag != CCR0, "bad condition register");
goetz@6458 2105 Label cont;
goetz@6458 2106 Label object_has_monitor;
goetz@6458 2107
goetz@6458 2108 // Always do locking in runtime.
goetz@6458 2109 if (EmitSync & 0x01) {
goetz@6458 2110 cmpdi(flag, oop, 0); // Oop can't be 0 here => always false.
goetz@6458 2111 return;
goetz@6458 2112 }
goetz@6458 2113
goetz@6458 2114 if (UseBiasedLocking) {
goetz@6458 2115 biased_locking_exit(flag, oop, current_header, cont);
goetz@6458 2116 }
goetz@6458 2117
goetz@6458 2118 // Find the lock address and load the displaced header from the stack.
goetz@6458 2119 ld(displaced_header, BasicLock::displaced_header_offset_in_bytes(), box);
goetz@6458 2120
goetz@6458 2121 // If the displaced header is 0, we have a recursive unlock.
goetz@6458 2122 cmpdi(flag, displaced_header, 0);
goetz@6458 2123 beq(flag, cont);
goetz@6458 2124
goetz@6458 2125 // Handle existing monitor.
goetz@6458 2126 if ((EmitSync & 0x02) == 0) {
goetz@6458 2127 // The object has an existing monitor iff (mark & monitor_value) != 0.
goetz@6458 2128 ld(current_header, oopDesc::mark_offset_in_bytes(), oop);
goetz@6458 2129 andi(temp, current_header, markOopDesc::monitor_value);
goetz@6458 2130 cmpdi(flag, temp, 0);
goetz@6458 2131 bne(flag, object_has_monitor);
goetz@6458 2132 }
goetz@6458 2133
goetz@6458 2134
goetz@6458 2135 // Check if it is still a light weight lock, this is is true if we see
goetz@6458 2136 // the stack address of the basicLock in the markOop of the object.
goetz@6458 2137 // Cmpxchg sets flag to cmpd(current_header, box).
goetz@6458 2138 cmpxchgd(/*flag=*/flag,
goetz@6458 2139 /*current_value=*/current_header,
goetz@6458 2140 /*compare_value=*/box,
goetz@6458 2141 /*exchange_value=*/displaced_header,
goetz@6458 2142 /*where=*/oop,
goetz@6458 2143 MacroAssembler::MemBarRel,
goetz@6458 2144 MacroAssembler::cmpxchgx_hint_release_lock(),
goetz@6458 2145 noreg,
goetz@6458 2146 &cont);
goetz@6458 2147
goetz@6458 2148 assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
goetz@6458 2149
goetz@6458 2150 // Handle existing monitor.
goetz@6458 2151 if ((EmitSync & 0x02) == 0) {
goetz@6458 2152 b(cont);
goetz@6458 2153
goetz@6458 2154 bind(object_has_monitor);
goetz@6458 2155 addi(current_header, current_header, -markOopDesc::monitor_value); // monitor
goetz@6458 2156 ld(temp, ObjectMonitor::owner_offset_in_bytes(), current_header);
goetz@6458 2157 ld(displaced_header, ObjectMonitor::recursions_offset_in_bytes(), current_header);
goetz@6458 2158 xorr(temp, R16_thread, temp); // Will be 0 if we are the owner.
goetz@6458 2159 orr(temp, temp, displaced_header); // Will be 0 if there are 0 recursions.
goetz@6458 2160 cmpdi(flag, temp, 0);
goetz@6458 2161 bne(flag, cont);
goetz@6458 2162
goetz@6458 2163 ld(temp, ObjectMonitor::EntryList_offset_in_bytes(), current_header);
goetz@6458 2164 ld(displaced_header, ObjectMonitor::cxq_offset_in_bytes(), current_header);
goetz@6458 2165 orr(temp, temp, displaced_header); // Will be 0 if both are 0.
goetz@6458 2166 cmpdi(flag, temp, 0);
goetz@6458 2167 bne(flag, cont);
goetz@6458 2168 release();
goetz@6458 2169 std(temp, ObjectMonitor::owner_offset_in_bytes(), current_header);
goetz@6458 2170 }
goetz@6458 2171
goetz@6458 2172 bind(cont);
goetz@6458 2173 // flag == EQ indicates success
goetz@6458 2174 // flag == NE indicates failure
goetz@6458 2175 }
goetz@6458 2176
goetz@6458 2177 // Write serialization page so VM thread can do a pseudo remote membar.
goetz@6458 2178 // We use the current thread pointer to calculate a thread specific
goetz@6458 2179 // offset to write to within the page. This minimizes bus traffic
goetz@6458 2180 // due to cache line collision.
goetz@6458 2181 void MacroAssembler::serialize_memory(Register thread, Register tmp1, Register tmp2) {
goetz@6458 2182 srdi(tmp2, thread, os::get_serialize_page_shift_count());
goetz@6458 2183
goetz@6458 2184 int mask = os::vm_page_size() - sizeof(int);
goetz@6458 2185 if (Assembler::is_simm(mask, 16)) {
goetz@6458 2186 andi(tmp2, tmp2, mask);
goetz@6458 2187 } else {
goetz@6458 2188 lis(tmp1, (int)((signed short) (mask >> 16)));
goetz@6458 2189 ori(tmp1, tmp1, mask & 0x0000ffff);
goetz@6458 2190 andr(tmp2, tmp2, tmp1);
goetz@6458 2191 }
goetz@6458 2192
goetz@6458 2193 load_const(tmp1, (long) os::get_memory_serialize_page());
goetz@6458 2194 release();
goetz@6458 2195 stwx(R0, tmp1, tmp2);
goetz@6458 2196 }
goetz@6458 2197
goetz@6458 2198
goetz@6458 2199 // GC barrier helper macros
goetz@6458 2200
goetz@6458 2201 // Write the card table byte if needed.
goetz@6458 2202 void MacroAssembler::card_write_barrier_post(Register Rstore_addr, Register Rnew_val, Register Rtmp) {
goetz@6458 2203 CardTableModRefBS* bs = (CardTableModRefBS*) Universe::heap()->barrier_set();
goetz@6458 2204 assert(bs->kind() == BarrierSet::CardTableModRef ||
goetz@6458 2205 bs->kind() == BarrierSet::CardTableExtension, "wrong barrier");
goetz@6458 2206 #ifdef ASSERT
goetz@6458 2207 cmpdi(CCR0, Rnew_val, 0);
goetz@6458 2208 asm_assert_ne("null oop not allowed", 0x321);
goetz@6458 2209 #endif
goetz@6458 2210 card_table_write(bs->byte_map_base, Rtmp, Rstore_addr);
goetz@6458 2211 }
goetz@6458 2212
goetz@6458 2213 // Write the card table byte.
goetz@6458 2214 void MacroAssembler::card_table_write(jbyte* byte_map_base, Register Rtmp, Register Robj) {
goetz@6458 2215 assert_different_registers(Robj, Rtmp, R0);
goetz@6458 2216 load_const_optimized(Rtmp, (address)byte_map_base, R0);
goetz@6458 2217 srdi(Robj, Robj, CardTableModRefBS::card_shift);
goetz@6458 2218 li(R0, 0); // dirty
goetz@6501 2219 if (UseConcMarkSweepGC) membar(Assembler::StoreStore);
goetz@6458 2220 stbx(R0, Rtmp, Robj);
goetz@6458 2221 }
goetz@6458 2222
phh@9669 2223 // Kills R31 if value is a volatile register.
phh@9669 2224 void MacroAssembler::resolve_jobject(Register value, Register tmp1, Register tmp2, bool needs_frame) {
phh@9669 2225 Label done;
phh@9669 2226 cmpdi(CCR0, value, 0);
phh@9669 2227 beq(CCR0, done); // Use NULL as-is.
phh@9669 2228
phh@9669 2229 clrrdi(tmp1, value, JNIHandles::weak_tag_size);
phh@9669 2230 #if INCLUDE_ALL_GCS
phh@9669 2231 if (UseG1GC) { andi_(tmp2, value, JNIHandles::weak_tag_mask); }
phh@9669 2232 #endif
phh@9669 2233 ld(value, 0, tmp1); // Resolve (untagged) jobject.
phh@9669 2234
phh@9669 2235 #if INCLUDE_ALL_GCS
phh@9669 2236 if (UseG1GC) {
phh@9669 2237 Label not_weak;
phh@9669 2238 beq(CCR0, not_weak); // Test for jweak tag.
phh@9669 2239 verify_oop(value);
phh@9669 2240 g1_write_barrier_pre(noreg, // obj
phh@9669 2241 noreg, // offset
phh@9669 2242 value, // pre_val
phh@9669 2243 tmp1, tmp2, needs_frame);
phh@9669 2244 bind(not_weak);
phh@9669 2245 }
phh@9669 2246 #endif // INCLUDE_ALL_GCS
phh@9669 2247 verify_oop(value);
phh@9669 2248 bind(done);
phh@9669 2249 }
phh@9669 2250
goetz@6515 2251 #if INCLUDE_ALL_GCS
goetz@6458 2252 // General G1 pre-barrier generator.
goetz@6458 2253 // Goal: record the previous value if it is not null.
goetz@6458 2254 void MacroAssembler::g1_write_barrier_pre(Register Robj, RegisterOrConstant offset, Register Rpre_val,
goetz@6458 2255 Register Rtmp1, Register Rtmp2, bool needs_frame) {
goetz@6458 2256 Label runtime, filtered;
goetz@6458 2257
goetz@6458 2258 // Is marking active?
goetz@6458 2259 if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
goetz@6458 2260 lwz(Rtmp1, in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_active()), R16_thread);
goetz@6458 2261 } else {
goetz@6458 2262 guarantee(in_bytes(PtrQueue::byte_width_of_active()) == 1, "Assumption");
goetz@6458 2263 lbz(Rtmp1, in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_active()), R16_thread);
goetz@6458 2264 }
goetz@6458 2265 cmpdi(CCR0, Rtmp1, 0);
goetz@6458 2266 beq(CCR0, filtered);
goetz@6458 2267
goetz@6458 2268 // Do we need to load the previous value?
goetz@6458 2269 if (Robj != noreg) {
goetz@6458 2270 // Load the previous value...
goetz@6458 2271 if (UseCompressedOops) {
goetz@6458 2272 lwz(Rpre_val, offset, Robj);
goetz@6458 2273 } else {
goetz@6458 2274 ld(Rpre_val, offset, Robj);
goetz@6458 2275 }
goetz@6458 2276 // Previous value has been loaded into Rpre_val.
goetz@6458 2277 }
goetz@6458 2278 assert(Rpre_val != noreg, "must have a real register");
goetz@6458 2279
goetz@6458 2280 // Is the previous value null?
goetz@6458 2281 cmpdi(CCR0, Rpre_val, 0);
goetz@6458 2282 beq(CCR0, filtered);
goetz@6458 2283
goetz@6458 2284 if (Robj != noreg && UseCompressedOops) {
goetz@6458 2285 decode_heap_oop_not_null(Rpre_val);
goetz@6458 2286 }
goetz@6458 2287
goetz@6458 2288 // OK, it's not filtered, so we'll need to call enqueue. In the normal
goetz@6458 2289 // case, pre_val will be a scratch G-reg, but there are some cases in
goetz@6458 2290 // which it's an O-reg. In the first case, do a normal call. In the
goetz@6458 2291 // latter, do a save here and call the frameless version.
goetz@6458 2292
goetz@6458 2293 // Can we store original value in the thread's buffer?
goetz@6458 2294 // Is index == 0?
goetz@6458 2295 // (The index field is typed as size_t.)
goetz@6458 2296 const Register Rbuffer = Rtmp1, Rindex = Rtmp2;
goetz@6458 2297
goetz@6458 2298 ld(Rindex, in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_index()), R16_thread);
goetz@6458 2299 cmpdi(CCR0, Rindex, 0);
goetz@6458 2300 beq(CCR0, runtime); // If index == 0, goto runtime.
goetz@6458 2301 ld(Rbuffer, in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_buf()), R16_thread);
goetz@6458 2302
goetz@6458 2303 addi(Rindex, Rindex, -wordSize); // Decrement index.
goetz@6458 2304 std(Rindex, in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_index()), R16_thread);
goetz@6458 2305
goetz@6458 2306 // Record the previous value.
goetz@6458 2307 stdx(Rpre_val, Rbuffer, Rindex);
goetz@6458 2308 b(filtered);
goetz@6458 2309
goetz@6458 2310 bind(runtime);
goetz@6458 2311
phh@9669 2312 // May need to preserve LR. Also needed if current frame is not compatible with C calling convention.
goetz@6458 2313 if (needs_frame) {
goetz@6458 2314 save_LR_CR(Rtmp1);
goetz@6511 2315 push_frame_reg_args(0, Rtmp2);
goetz@6458 2316 }
goetz@6458 2317
goetz@6458 2318 if (Rpre_val->is_volatile() && Robj == noreg) mr(R31, Rpre_val); // Save pre_val across C call if it was preloaded.
goetz@6458 2319 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), Rpre_val, R16_thread);
goetz@6458 2320 if (Rpre_val->is_volatile() && Robj == noreg) mr(Rpre_val, R31); // restore
goetz@6458 2321
goetz@6458 2322 if (needs_frame) {
goetz@6458 2323 pop_frame();
goetz@6458 2324 restore_LR_CR(Rtmp1);
goetz@6458 2325 }
goetz@6458 2326
goetz@6458 2327 bind(filtered);
goetz@6458 2328 }
goetz@6458 2329
goetz@6458 2330 // General G1 post-barrier generator
goetz@6458 2331 // Store cross-region card.
goetz@6458 2332 void MacroAssembler::g1_write_barrier_post(Register Rstore_addr, Register Rnew_val, Register Rtmp1, Register Rtmp2, Register Rtmp3, Label *filtered_ext) {
goetz@6458 2333 Label runtime, filtered_int;
goetz@6458 2334 Label& filtered = (filtered_ext != NULL) ? *filtered_ext : filtered_int;
goetz@6458 2335 assert_different_registers(Rstore_addr, Rnew_val, Rtmp1, Rtmp2);
goetz@6458 2336
goetz@6458 2337 G1SATBCardTableModRefBS* bs = (G1SATBCardTableModRefBS*) Universe::heap()->barrier_set();
goetz@6458 2338 assert(bs->kind() == BarrierSet::G1SATBCT ||
goetz@6458 2339 bs->kind() == BarrierSet::G1SATBCTLogging, "wrong barrier");
goetz@6458 2340
goetz@6458 2341 // Does store cross heap regions?
goetz@6458 2342 if (G1RSBarrierRegionFilter) {
goetz@6458 2343 xorr(Rtmp1, Rstore_addr, Rnew_val);
goetz@6458 2344 srdi_(Rtmp1, Rtmp1, HeapRegion::LogOfHRGrainBytes);
goetz@6458 2345 beq(CCR0, filtered);
goetz@6458 2346 }
goetz@6458 2347
goetz@6458 2348 // Crosses regions, storing NULL?
goetz@6458 2349 #ifdef ASSERT
goetz@6458 2350 cmpdi(CCR0, Rnew_val, 0);
goetz@6458 2351 asm_assert_ne("null oop not allowed (G1)", 0x322); // Checked by caller on PPC64, so following branch is obsolete:
goetz@6458 2352 //beq(CCR0, filtered);
goetz@6458 2353 #endif
goetz@6458 2354
goetz@6458 2355 // Storing region crossing non-NULL, is card already dirty?
goetz@6458 2356 assert(sizeof(*bs->byte_map_base) == sizeof(jbyte), "adjust this code");
goetz@6458 2357 const Register Rcard_addr = Rtmp1;
goetz@6458 2358 Register Rbase = Rtmp2;
goetz@6458 2359 load_const_optimized(Rbase, (address)bs->byte_map_base, /*temp*/ Rtmp3);
goetz@6458 2360
goetz@6458 2361 srdi(Rcard_addr, Rstore_addr, CardTableModRefBS::card_shift);
goetz@6458 2362
goetz@6458 2363 // Get the address of the card.
goetz@6458 2364 lbzx(/*card value*/ Rtmp3, Rbase, Rcard_addr);
goetz@6515 2365 cmpwi(CCR0, Rtmp3, (int)G1SATBCardTableModRefBS::g1_young_card_val());
goetz@6515 2366 beq(CCR0, filtered);
goetz@6515 2367
goetz@6515 2368 membar(Assembler::StoreLoad);
goetz@6515 2369 lbzx(/*card value*/ Rtmp3, Rbase, Rcard_addr); // Reload after membar.
goetz@6515 2370 cmpwi(CCR0, Rtmp3 /* card value */, CardTableModRefBS::dirty_card_val());
goetz@6458 2371 beq(CCR0, filtered);
goetz@6458 2372
goetz@6458 2373 // Storing a region crossing, non-NULL oop, card is clean.
goetz@6458 2374 // Dirty card and log.
goetz@6515 2375 li(Rtmp3, CardTableModRefBS::dirty_card_val());
goetz@6458 2376 //release(); // G1: oops are allowed to get visible after dirty marking.
goetz@6458 2377 stbx(Rtmp3, Rbase, Rcard_addr);
goetz@6458 2378
goetz@6458 2379 add(Rcard_addr, Rbase, Rcard_addr); // This is the address which needs to get enqueued.
goetz@6458 2380 Rbase = noreg; // end of lifetime
goetz@6458 2381
goetz@6458 2382 const Register Rqueue_index = Rtmp2,
goetz@6458 2383 Rqueue_buf = Rtmp3;
goetz@6458 2384 ld(Rqueue_index, in_bytes(JavaThread::dirty_card_queue_offset() + PtrQueue::byte_offset_of_index()), R16_thread);
goetz@6458 2385 cmpdi(CCR0, Rqueue_index, 0);
goetz@6458 2386 beq(CCR0, runtime); // index == 0 then jump to runtime
goetz@6458 2387 ld(Rqueue_buf, in_bytes(JavaThread::dirty_card_queue_offset() + PtrQueue::byte_offset_of_buf()), R16_thread);
goetz@6458 2388
goetz@6458 2389 addi(Rqueue_index, Rqueue_index, -wordSize); // decrement index
goetz@6458 2390 std(Rqueue_index, in_bytes(JavaThread::dirty_card_queue_offset() + PtrQueue::byte_offset_of_index()), R16_thread);
goetz@6458 2391
goetz@6458 2392 stdx(Rcard_addr, Rqueue_buf, Rqueue_index); // store card
goetz@6458 2393 b(filtered);
goetz@6458 2394
goetz@6458 2395 bind(runtime);
goetz@6458 2396
goetz@6458 2397 // Save the live input values.
goetz@6458 2398 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), Rcard_addr, R16_thread);
goetz@6458 2399
goetz@6458 2400 bind(filtered_int);
goetz@6458 2401 }
goetz@6515 2402 #endif // INCLUDE_ALL_GCS
goetz@6458 2403
goetz@6458 2404 // Values for last_Java_pc, and last_Java_sp must comply to the rules
goetz@7222 2405 // in frame_ppc.hpp.
goetz@6458 2406 void MacroAssembler::set_last_Java_frame(Register last_Java_sp, Register last_Java_pc) {
goetz@6458 2407 // Always set last_Java_pc and flags first because once last_Java_sp
goetz@6458 2408 // is visible has_last_Java_frame is true and users will look at the
goetz@6458 2409 // rest of the fields. (Note: flags should always be zero before we
goetz@6458 2410 // get here so doesn't need to be set.)
goetz@6458 2411
goetz@6458 2412 // Verify that last_Java_pc was zeroed on return to Java
goetz@6458 2413 asm_assert_mem8_is_zero(in_bytes(JavaThread::last_Java_pc_offset()), R16_thread,
goetz@6458 2414 "last_Java_pc not zeroed before leaving Java", 0x200);
goetz@6458 2415
goetz@6458 2416 // When returning from calling out from Java mode the frame anchor's
goetz@6458 2417 // last_Java_pc will always be set to NULL. It is set here so that
goetz@6458 2418 // if we are doing a call to native (not VM) that we capture the
goetz@6458 2419 // known pc and don't have to rely on the native call having a
goetz@6458 2420 // standard frame linkage where we can find the pc.
goetz@6458 2421 if (last_Java_pc != noreg)
goetz@6458 2422 std(last_Java_pc, in_bytes(JavaThread::last_Java_pc_offset()), R16_thread);
goetz@6458 2423
goetz@6495 2424 // Set last_Java_sp last.
goetz@6458 2425 std(last_Java_sp, in_bytes(JavaThread::last_Java_sp_offset()), R16_thread);
goetz@6458 2426 }
goetz@6458 2427
goetz@6458 2428 void MacroAssembler::reset_last_Java_frame(void) {
goetz@6458 2429 asm_assert_mem8_isnot_zero(in_bytes(JavaThread::last_Java_sp_offset()),
goetz@6458 2430 R16_thread, "SP was not set, still zero", 0x202);
goetz@6458 2431
goetz@6458 2432 BLOCK_COMMENT("reset_last_Java_frame {");
goetz@6458 2433 li(R0, 0);
goetz@6458 2434
goetz@6458 2435 // _last_Java_sp = 0
goetz@6458 2436 std(R0, in_bytes(JavaThread::last_Java_sp_offset()), R16_thread);
goetz@6458 2437
goetz@6458 2438 // _last_Java_pc = 0
goetz@6458 2439 std(R0, in_bytes(JavaThread::last_Java_pc_offset()), R16_thread);
goetz@6458 2440 BLOCK_COMMENT("} reset_last_Java_frame");
goetz@6458 2441 }
goetz@6458 2442
goetz@6458 2443 void MacroAssembler::set_top_ijava_frame_at_SP_as_last_Java_frame(Register sp, Register tmp1) {
goetz@6458 2444 assert_different_registers(sp, tmp1);
goetz@6458 2445
goetz@6458 2446 // sp points to a TOP_IJAVA_FRAME, retrieve frame's PC via
goetz@6458 2447 // TOP_IJAVA_FRAME_ABI.
goetz@6458 2448 // FIXME: assert that we really have a TOP_IJAVA_FRAME here!
goetz@6458 2449 #ifdef CC_INTERP
goetz@6458 2450 ld(tmp1/*pc*/, _top_ijava_frame_abi(frame_manager_lr), sp);
goetz@6458 2451 #else
goetz@6512 2452 address entry = pc();
goetz@6512 2453 load_const_optimized(tmp1, entry);
goetz@6458 2454 #endif
goetz@6458 2455
goetz@6458 2456 set_last_Java_frame(/*sp=*/sp, /*pc=*/tmp1);
goetz@6458 2457 }
goetz@6458 2458
goetz@6458 2459 void MacroAssembler::get_vm_result(Register oop_result) {
goetz@6458 2460 // Read:
goetz@6458 2461 // R16_thread
goetz@6458 2462 // R16_thread->in_bytes(JavaThread::vm_result_offset())
goetz@6458 2463 //
goetz@6458 2464 // Updated:
goetz@6458 2465 // oop_result
goetz@6458 2466 // R16_thread->in_bytes(JavaThread::vm_result_offset())
goetz@6458 2467
goetz@6458 2468 ld(oop_result, in_bytes(JavaThread::vm_result_offset()), R16_thread);
goetz@6458 2469 li(R0, 0);
goetz@6458 2470 std(R0, in_bytes(JavaThread::vm_result_offset()), R16_thread);
goetz@6458 2471
goetz@6458 2472 verify_oop(oop_result);
goetz@6458 2473 }
goetz@6458 2474
goetz@6458 2475 void MacroAssembler::get_vm_result_2(Register metadata_result) {
goetz@6458 2476 // Read:
goetz@6458 2477 // R16_thread
goetz@6458 2478 // R16_thread->in_bytes(JavaThread::vm_result_2_offset())
goetz@6458 2479 //
goetz@6458 2480 // Updated:
goetz@6458 2481 // metadata_result
goetz@6458 2482 // R16_thread->in_bytes(JavaThread::vm_result_2_offset())
goetz@6458 2483
goetz@6458 2484 ld(metadata_result, in_bytes(JavaThread::vm_result_2_offset()), R16_thread);
goetz@6458 2485 li(R0, 0);
goetz@6458 2486 std(R0, in_bytes(JavaThread::vm_result_2_offset()), R16_thread);
goetz@6458 2487 }
goetz@6458 2488
goetz@6458 2489
goetz@6458 2490 void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
goetz@6501 2491 Register current = (src != noreg) ? src : dst; // Klass is in dst if no src provided.
goetz@6477 2492 if (Universe::narrow_klass_base() != 0) {
goetz@6515 2493 // Use dst as temp if it is free.
goetz@6515 2494 load_const(R0, Universe::narrow_klass_base(), (dst != current && dst != R0) ? dst : noreg);
goetz@6501 2495 sub(dst, current, R0);
goetz@6501 2496 current = dst;
goetz@6477 2497 }
goetz@6501 2498 if (Universe::narrow_klass_shift() != 0) {
goetz@6501 2499 srdi(dst, current, Universe::narrow_klass_shift());
goetz@6501 2500 current = dst;
goetz@6458 2501 }
goetz@6501 2502 mr_if_needed(dst, current); // Move may be required.
goetz@6458 2503 }
goetz@6458 2504
goetz@6458 2505 void MacroAssembler::store_klass(Register dst_oop, Register klass, Register ck) {
goetz@6474 2506 if (UseCompressedClassPointers) {
goetz@6458 2507 encode_klass_not_null(ck, klass);
goetz@6458 2508 stw(ck, oopDesc::klass_offset_in_bytes(), dst_oop);
goetz@6458 2509 } else {
goetz@6458 2510 std(klass, oopDesc::klass_offset_in_bytes(), dst_oop);
goetz@6458 2511 }
goetz@6458 2512 }
goetz@6458 2513
goetz@6512 2514 void MacroAssembler::store_klass_gap(Register dst_oop, Register val) {
goetz@6512 2515 if (UseCompressedClassPointers) {
goetz@6512 2516 if (val == noreg) {
goetz@6512 2517 val = R0;
goetz@6512 2518 li(val, 0);
goetz@6512 2519 }
goetz@6512 2520 stw(val, oopDesc::klass_gap_offset_in_bytes(), dst_oop); // klass gap if compressed
goetz@6512 2521 }
goetz@6512 2522 }
goetz@6512 2523
goetz@6477 2524 int MacroAssembler::instr_size_for_decode_klass_not_null() {
goetz@6477 2525 if (!UseCompressedClassPointers) return 0;
goetz@6477 2526 int num_instrs = 1; // shift or move
goetz@6477 2527 if (Universe::narrow_klass_base() != 0) num_instrs = 7; // shift + load const + add
goetz@6477 2528 return num_instrs * BytesPerInstWord;
goetz@6477 2529 }
goetz@6477 2530
goetz@6458 2531 void MacroAssembler::decode_klass_not_null(Register dst, Register src) {
goetz@7222 2532 assert(dst != R0, "Dst reg may not be R0, as R0 is used here.");
goetz@6458 2533 if (src == noreg) src = dst;
goetz@6463 2534 Register shifted_src = src;
goetz@6477 2535 if (Universe::narrow_klass_shift() != 0 ||
goetz@6477 2536 Universe::narrow_klass_base() == 0 && src != dst) { // Move required.
goetz@6463 2537 shifted_src = dst;
goetz@6463 2538 sldi(shifted_src, src, Universe::narrow_klass_shift());
goetz@6458 2539 }
goetz@6477 2540 if (Universe::narrow_klass_base() != 0) {
goetz@6477 2541 load_const(R0, Universe::narrow_klass_base());
goetz@6477 2542 add(dst, shifted_src, R0);
goetz@6477 2543 }
goetz@6458 2544 }
goetz@6458 2545
goetz@6458 2546 void MacroAssembler::load_klass(Register dst, Register src) {
goetz@6474 2547 if (UseCompressedClassPointers) {
goetz@6458 2548 lwz(dst, oopDesc::klass_offset_in_bytes(), src);
goetz@6458 2549 // Attention: no null check here!
goetz@6458 2550 decode_klass_not_null(dst, dst);
goetz@6458 2551 } else {
goetz@6458 2552 ld(dst, oopDesc::klass_offset_in_bytes(), src);
goetz@6458 2553 }
goetz@6458 2554 }
goetz@6458 2555
goetz@6458 2556 void MacroAssembler::load_klass_with_trap_null_check(Register dst, Register src) {
goetz@6486 2557 if (!os::zero_page_read_protected()) {
goetz@6458 2558 if (TrapBasedNullChecks) {
goetz@6458 2559 trap_null_check(src);
goetz@6458 2560 }
goetz@6458 2561 }
goetz@6458 2562 load_klass(dst, src);
goetz@6458 2563 }
goetz@6458 2564
goetz@6458 2565 void MacroAssembler::reinit_heapbase(Register d, Register tmp) {
goetz@6463 2566 if (Universe::heap() != NULL) {
goetz@7222 2567 load_const_optimized(R30, Universe::narrow_ptrs_base(), tmp);
goetz@6463 2568 } else {
goetz@7222 2569 // Heap not yet allocated. Load indirectly.
goetz@7222 2570 int simm16_offset = load_const_optimized(R30, Universe::narrow_ptrs_base_addr(), tmp, true);
goetz@7222 2571 ld(R30, simm16_offset, R30);
goetz@6458 2572 }
goetz@6458 2573 }
goetz@6458 2574
goetz@6495 2575 // Clear Array
goetz@6495 2576 // Kills both input registers. tmp == R0 is allowed.
goetz@6495 2577 void MacroAssembler::clear_memory_doubleword(Register base_ptr, Register cnt_dwords, Register tmp) {
goetz@6495 2578 // Procedure for large arrays (uses data cache block zero instruction).
goetz@6495 2579 Label startloop, fast, fastloop, small_rest, restloop, done;
goetz@6495 2580 const int cl_size = VM_Version::get_cache_line_size(),
goetz@6495 2581 cl_dwords = cl_size>>3,
goetz@6495 2582 cl_dw_addr_bits = exact_log2(cl_dwords),
goetz@6495 2583 dcbz_min = 1; // Min count of dcbz executions, needs to be >0.
goetz@6495 2584
goetz@6495 2585 //2:
goetz@6495 2586 cmpdi(CCR1, cnt_dwords, ((dcbz_min+1)<<cl_dw_addr_bits)-1); // Big enough? (ensure >=dcbz_min lines included).
goetz@6495 2587 blt(CCR1, small_rest); // Too small.
goetz@6495 2588 rldicl_(tmp, base_ptr, 64-3, 64-cl_dw_addr_bits); // Extract dword offset within first cache line.
goetz@6495 2589 beq(CCR0, fast); // Already 128byte aligned.
goetz@6495 2590
goetz@6495 2591 subfic(tmp, tmp, cl_dwords);
goetz@6495 2592 mtctr(tmp); // Set ctr to hit 128byte boundary (0<ctr<cl_dwords).
goetz@6495 2593 subf(cnt_dwords, tmp, cnt_dwords); // rest.
goetz@6495 2594 li(tmp, 0);
goetz@6495 2595 //10:
goetz@6495 2596 bind(startloop); // Clear at the beginning to reach 128byte boundary.
goetz@6495 2597 std(tmp, 0, base_ptr); // Clear 8byte aligned block.
goetz@6495 2598 addi(base_ptr, base_ptr, 8);
goetz@6495 2599 bdnz(startloop);
goetz@6495 2600 //13:
goetz@6495 2601 bind(fast); // Clear 128byte blocks.
goetz@6495 2602 srdi(tmp, cnt_dwords, cl_dw_addr_bits); // Loop count for 128byte loop (>0).
goetz@6495 2603 andi(cnt_dwords, cnt_dwords, cl_dwords-1); // Rest in dwords.
goetz@6495 2604 mtctr(tmp); // Load counter.
goetz@6495 2605 //16:
goetz@6495 2606 bind(fastloop);
goetz@6495 2607 dcbz(base_ptr); // Clear 128byte aligned block.
goetz@6495 2608 addi(base_ptr, base_ptr, cl_size);
goetz@6495 2609 bdnz(fastloop);
goetz@6495 2610 if (InsertEndGroupPPC64) { endgroup(); } else { nop(); }
goetz@6495 2611 //20:
goetz@6495 2612 bind(small_rest);
goetz@6495 2613 cmpdi(CCR0, cnt_dwords, 0); // size 0?
goetz@6495 2614 beq(CCR0, done); // rest == 0
goetz@6495 2615 li(tmp, 0);
goetz@6495 2616 mtctr(cnt_dwords); // Load counter.
goetz@6495 2617 //24:
goetz@6495 2618 bind(restloop); // Clear rest.
goetz@6495 2619 std(tmp, 0, base_ptr); // Clear 8byte aligned block.
goetz@6495 2620 addi(base_ptr, base_ptr, 8);
goetz@6495 2621 bdnz(restloop);
goetz@6495 2622 //27:
goetz@6495 2623 bind(done);
goetz@6495 2624 }
goetz@6495 2625
goetz@6458 2626 /////////////////////////////////////////// String intrinsics ////////////////////////////////////////////
goetz@6458 2627
goetz@6458 2628 // Search for a single jchar in an jchar[].
goetz@6458 2629 //
goetz@6458 2630 // Assumes that result differs from all other registers.
goetz@6458 2631 //
goetz@6458 2632 // Haystack, needle are the addresses of jchar-arrays.
goetz@6458 2633 // NeedleChar is needle[0] if it is known at compile time.
goetz@6458 2634 // Haycnt is the length of the haystack. We assume haycnt >=1.
goetz@6458 2635 //
goetz@6458 2636 // Preserves haystack, haycnt, kills all other registers.
goetz@6458 2637 //
goetz@6458 2638 // If needle == R0, we search for the constant needleChar.
goetz@6458 2639 void MacroAssembler::string_indexof_1(Register result, Register haystack, Register haycnt,
goetz@6458 2640 Register needle, jchar needleChar,
goetz@6458 2641 Register tmp1, Register tmp2) {
goetz@6458 2642
goetz@6458 2643 assert_different_registers(result, haystack, haycnt, needle, tmp1, tmp2);
goetz@6458 2644
goetz@6458 2645 Label L_InnerLoop, L_FinalCheck, L_Found1, L_Found2, L_Found3, L_NotFound, L_End;
goetz@6458 2646 Register needle0 = needle, // Contains needle[0].
goetz@6458 2647 addr = tmp1,
goetz@6458 2648 ch1 = tmp2,
goetz@6458 2649 ch2 = R0;
goetz@6458 2650
goetz@6458 2651 //2 (variable) or 3 (const):
goetz@6458 2652 if (needle != R0) lhz(needle0, 0, needle); // Preload needle character, needle has len==1.
goetz@6458 2653 dcbtct(haystack, 0x00); // Indicate R/O access to haystack.
goetz@6458 2654
goetz@6458 2655 srwi_(tmp2, haycnt, 1); // Shift right by exact_log2(UNROLL_FACTOR).
goetz@6458 2656 mr(addr, haystack);
goetz@6458 2657 beq(CCR0, L_FinalCheck);
goetz@6458 2658 mtctr(tmp2); // Move to count register.
goetz@6458 2659 //8:
goetz@6458 2660 bind(L_InnerLoop); // Main work horse (2x unrolled search loop).
goetz@6458 2661 lhz(ch1, 0, addr); // Load characters from haystack.
goetz@6458 2662 lhz(ch2, 2, addr);
goetz@6458 2663 (needle != R0) ? cmpw(CCR0, ch1, needle0) : cmplwi(CCR0, ch1, needleChar);
goetz@6458 2664 (needle != R0) ? cmpw(CCR1, ch2, needle0) : cmplwi(CCR1, ch2, needleChar);
goetz@6458 2665 beq(CCR0, L_Found1); // Did we find the needle?
goetz@6458 2666 beq(CCR1, L_Found2);
goetz@6458 2667 addi(addr, addr, 4);
goetz@6458 2668 bdnz(L_InnerLoop);
goetz@6458 2669 //16:
goetz@6458 2670 bind(L_FinalCheck);
goetz@6458 2671 andi_(R0, haycnt, 1);
goetz@6458 2672 beq(CCR0, L_NotFound);
goetz@6458 2673 lhz(ch1, 0, addr); // One position left at which we have to compare.
goetz@6458 2674 (needle != R0) ? cmpw(CCR1, ch1, needle0) : cmplwi(CCR1, ch1, needleChar);
goetz@6458 2675 beq(CCR1, L_Found3);
goetz@6458 2676 //21:
goetz@6458 2677 bind(L_NotFound);
goetz@6458 2678 li(result, -1); // Not found.
goetz@6458 2679 b(L_End);
goetz@6458 2680
goetz@6458 2681 bind(L_Found2);
goetz@6458 2682 addi(addr, addr, 2);
goetz@6458 2683 //24:
goetz@6458 2684 bind(L_Found1);
goetz@6458 2685 bind(L_Found3); // Return index ...
goetz@6458 2686 subf(addr, haystack, addr); // relative to haystack,
goetz@6458 2687 srdi(result, addr, 1); // in characters.
goetz@6458 2688 bind(L_End);
goetz@6458 2689 }
goetz@6458 2690
goetz@6458 2691
goetz@6458 2692 // Implementation of IndexOf for jchar arrays.
goetz@6458 2693 //
goetz@6458 2694 // The length of haystack and needle are not constant, i.e. passed in a register.
goetz@6458 2695 //
goetz@6458 2696 // Preserves registers haystack, needle.
goetz@6458 2697 // Kills registers haycnt, needlecnt.
goetz@6458 2698 // Assumes that result differs from all other registers.
goetz@6458 2699 // Haystack, needle are the addresses of jchar-arrays.
goetz@6458 2700 // Haycnt, needlecnt are the lengths of them, respectively.
goetz@6458 2701 //
goetz@6458 2702 // Needlecntval must be zero or 15-bit unsigned immediate and > 1.
goetz@6458 2703 void MacroAssembler::string_indexof(Register result, Register haystack, Register haycnt,
goetz@6458 2704 Register needle, ciTypeArray* needle_values, Register needlecnt, int needlecntval,
goetz@6458 2705 Register tmp1, Register tmp2, Register tmp3, Register tmp4) {
goetz@6458 2706
goetz@6458 2707 // Ensure 0<needlecnt<=haycnt in ideal graph as prerequisite!
goetz@6458 2708 Label L_TooShort, L_Found, L_NotFound, L_End;
goetz@6458 2709 Register last_addr = haycnt, // Kill haycnt at the beginning.
goetz@6458 2710 addr = tmp1,
goetz@6458 2711 n_start = tmp2,
goetz@6458 2712 ch1 = tmp3,
goetz@6458 2713 ch2 = R0;
goetz@6458 2714
goetz@6458 2715 // **************************************************************************************************
goetz@6458 2716 // Prepare for main loop: optimized for needle count >=2, bail out otherwise.
goetz@6458 2717 // **************************************************************************************************
goetz@6458 2718
goetz@6458 2719 //1 (variable) or 3 (const):
goetz@6458 2720 dcbtct(needle, 0x00); // Indicate R/O access to str1.
goetz@6458 2721 dcbtct(haystack, 0x00); // Indicate R/O access to str2.
goetz@6458 2722
goetz@6458 2723 // Compute last haystack addr to use if no match gets found.
goetz@6458 2724 if (needlecntval == 0) { // variable needlecnt
goetz@6458 2725 //3:
goetz@6458 2726 subf(ch1, needlecnt, haycnt); // Last character index to compare is haycnt-needlecnt.
goetz@6458 2727 addi(addr, haystack, -2); // Accesses use pre-increment.
goetz@6458 2728 cmpwi(CCR6, needlecnt, 2);
goetz@6458 2729 blt(CCR6, L_TooShort); // Variable needlecnt: handle short needle separately.
goetz@6458 2730 slwi(ch1, ch1, 1); // Scale to number of bytes.
goetz@6458 2731 lwz(n_start, 0, needle); // Load first 2 characters of needle.
goetz@6458 2732 add(last_addr, haystack, ch1); // Point to last address to compare (haystack+2*(haycnt-needlecnt)).
goetz@6458 2733 addi(needlecnt, needlecnt, -2); // Rest of needle.
goetz@6458 2734 } else { // constant needlecnt
goetz@6458 2735 guarantee(needlecntval != 1, "IndexOf with single-character needle must be handled separately");
goetz@6458 2736 assert((needlecntval & 0x7fff) == needlecntval, "wrong immediate");
goetz@6458 2737 //5:
goetz@6458 2738 addi(ch1, haycnt, -needlecntval); // Last character index to compare is haycnt-needlecnt.
goetz@6458 2739 lwz(n_start, 0, needle); // Load first 2 characters of needle.
goetz@6458 2740 addi(addr, haystack, -2); // Accesses use pre-increment.
goetz@6458 2741 slwi(ch1, ch1, 1); // Scale to number of bytes.
goetz@6458 2742 add(last_addr, haystack, ch1); // Point to last address to compare (haystack+2*(haycnt-needlecnt)).
goetz@6458 2743 li(needlecnt, needlecntval-2); // Rest of needle.
goetz@6458 2744 }
goetz@6458 2745
goetz@6458 2746 // Main Loop (now we have at least 3 characters).
goetz@6458 2747 //11:
goetz@6458 2748 Label L_OuterLoop, L_InnerLoop, L_FinalCheck, L_Comp1, L_Comp2, L_Comp3;
goetz@6458 2749 bind(L_OuterLoop); // Search for 1st 2 characters.
goetz@6458 2750 Register addr_diff = tmp4;
goetz@6458 2751 subf(addr_diff, addr, last_addr); // Difference between already checked address and last address to check.
goetz@6458 2752 addi(addr, addr, 2); // This is the new address we want to use for comparing.
goetz@6458 2753 srdi_(ch2, addr_diff, 2);
goetz@6458 2754 beq(CCR0, L_FinalCheck); // 2 characters left?
goetz@6458 2755 mtctr(ch2); // addr_diff/4
goetz@6458 2756 //16:
goetz@6458 2757 bind(L_InnerLoop); // Main work horse (2x unrolled search loop)
goetz@6458 2758 lwz(ch1, 0, addr); // Load 2 characters of haystack (ignore alignment).
goetz@6458 2759 lwz(ch2, 2, addr);
goetz@6458 2760 cmpw(CCR0, ch1, n_start); // Compare 2 characters (1 would be sufficient but try to reduce branches to CompLoop).
goetz@6458 2761 cmpw(CCR1, ch2, n_start);
goetz@6458 2762 beq(CCR0, L_Comp1); // Did we find the needle start?
goetz@6458 2763 beq(CCR1, L_Comp2);
goetz@6458 2764 addi(addr, addr, 4);
goetz@6458 2765 bdnz(L_InnerLoop);
goetz@6458 2766 //24:
goetz@6458 2767 bind(L_FinalCheck);
goetz@6458 2768 rldicl_(addr_diff, addr_diff, 64-1, 63); // Remaining characters not covered by InnerLoop: (addr_diff>>1)&1.
goetz@6458 2769 beq(CCR0, L_NotFound);
goetz@6458 2770 lwz(ch1, 0, addr); // One position left at which we have to compare.
goetz@6458 2771 cmpw(CCR1, ch1, n_start);
goetz@6458 2772 beq(CCR1, L_Comp3);
goetz@6458 2773 //29:
goetz@6458 2774 bind(L_NotFound);
goetz@6458 2775 li(result, -1); // not found
goetz@6458 2776 b(L_End);
goetz@6458 2777
goetz@6458 2778
goetz@6458 2779 // **************************************************************************************************
goetz@6458 2780 // Special Case: unfortunately, the variable needle case can be called with needlecnt<2
goetz@6458 2781 // **************************************************************************************************
goetz@6458 2782 //31:
goetz@6458 2783 if ((needlecntval>>1) !=1 ) { // Const needlecnt is 2 or 3? Reduce code size.
goetz@6458 2784 int nopcnt = 5;
goetz@6458 2785 if (needlecntval !=0 ) ++nopcnt; // Balance alignment (other case: see below).
goetz@6458 2786 if (needlecntval == 0) { // We have to handle these cases separately.
goetz@6458 2787 Label L_OneCharLoop;
goetz@6458 2788 bind(L_TooShort);
goetz@6458 2789 mtctr(haycnt);
goetz@6458 2790 lhz(n_start, 0, needle); // First character of needle
goetz@6458 2791 bind(L_OneCharLoop);
goetz@6458 2792 lhzu(ch1, 2, addr);
goetz@6458 2793 cmpw(CCR1, ch1, n_start);
goetz@6458 2794 beq(CCR1, L_Found); // Did we find the one character needle?
goetz@6458 2795 bdnz(L_OneCharLoop);
goetz@6458 2796 li(result, -1); // Not found.
goetz@6458 2797 b(L_End);
goetz@6458 2798 } // 8 instructions, so no impact on alignment.
goetz@6458 2799 for (int x = 0; x < nopcnt; ++x) nop();
goetz@6458 2800 }
goetz@6458 2801
goetz@6458 2802 // **************************************************************************************************
goetz@6458 2803 // Regular Case Part II: compare rest of needle (first 2 characters have been compared already)
goetz@6458 2804 // **************************************************************************************************
goetz@6458 2805
goetz@6458 2806 // Compare the rest
goetz@6458 2807 //36 if needlecntval==0, else 37:
goetz@6458 2808 bind(L_Comp2);
goetz@6458 2809 addi(addr, addr, 2); // First comparison has failed, 2nd one hit.
goetz@6458 2810 bind(L_Comp1); // Addr points to possible needle start.
goetz@6458 2811 bind(L_Comp3); // Could have created a copy and use a different return address but saving code size here.
goetz@6458 2812 if (needlecntval != 2) { // Const needlecnt==2?
goetz@6458 2813 if (needlecntval != 3) {
goetz@6458 2814 if (needlecntval == 0) beq(CCR6, L_Found); // Variable needlecnt==2?
goetz@6458 2815 Register ind_reg = tmp4;
goetz@6458 2816 li(ind_reg, 2*2); // First 2 characters are already compared, use index 2.
goetz@6458 2817 mtctr(needlecnt); // Decremented by 2, still > 0.
goetz@6458 2818 //40:
goetz@6458 2819 Label L_CompLoop;
goetz@6458 2820 bind(L_CompLoop);
goetz@6458 2821 lhzx(ch2, needle, ind_reg);
goetz@6458 2822 lhzx(ch1, addr, ind_reg);
goetz@6458 2823 cmpw(CCR1, ch1, ch2);
goetz@6458 2824 bne(CCR1, L_OuterLoop);
goetz@6458 2825 addi(ind_reg, ind_reg, 2);
goetz@6458 2826 bdnz(L_CompLoop);
goetz@6458 2827 } else { // No loop required if there's only one needle character left.
goetz@6458 2828 lhz(ch2, 2*2, needle);
goetz@6458 2829 lhz(ch1, 2*2, addr);
goetz@6458 2830 cmpw(CCR1, ch1, ch2);
goetz@6458 2831 bne(CCR1, L_OuterLoop);
goetz@6458 2832 }
goetz@6458 2833 }
goetz@6458 2834 // Return index ...
goetz@6458 2835 //46:
goetz@6458 2836 bind(L_Found);
goetz@6458 2837 subf(addr, haystack, addr); // relative to haystack, ...
goetz@6458 2838 srdi(result, addr, 1); // in characters.
goetz@6458 2839 //48:
goetz@6458 2840 bind(L_End);
goetz@6458 2841 }
goetz@6458 2842
goetz@6458 2843 // Implementation of Compare for jchar arrays.
goetz@6458 2844 //
goetz@6458 2845 // Kills the registers str1, str2, cnt1, cnt2.
goetz@6458 2846 // Kills cr0, ctr.
goetz@6458 2847 // Assumes that result differes from the input registers.
goetz@6458 2848 void MacroAssembler::string_compare(Register str1_reg, Register str2_reg, Register cnt1_reg, Register cnt2_reg,
goetz@6458 2849 Register result_reg, Register tmp_reg) {
goetz@6458 2850 assert_different_registers(result_reg, str1_reg, str2_reg, cnt1_reg, cnt2_reg, tmp_reg);
goetz@6458 2851
goetz@6458 2852 Label Ldone, Lslow_case, Lslow_loop, Lfast_loop;
goetz@6458 2853 Register cnt_diff = R0,
goetz@6458 2854 limit_reg = cnt1_reg,
goetz@6458 2855 chr1_reg = result_reg,
goetz@6458 2856 chr2_reg = cnt2_reg,
goetz@6458 2857 addr_diff = str2_reg;
goetz@6458 2858
goetz@6458 2859 // Offset 0 should be 32 byte aligned.
goetz@6458 2860 //-4:
goetz@6458 2861 dcbtct(str1_reg, 0x00); // Indicate R/O access to str1.
goetz@6458 2862 dcbtct(str2_reg, 0x00); // Indicate R/O access to str2.
goetz@6458 2863 //-2:
goetz@6458 2864 // Compute min(cnt1, cnt2) and check if 0 (bail out if we don't need to compare characters).
goetz@6458 2865 subf(result_reg, cnt2_reg, cnt1_reg); // difference between cnt1/2
goetz@6458 2866 subf_(addr_diff, str1_reg, str2_reg); // alias?
goetz@6458 2867 beq(CCR0, Ldone); // return cnt difference if both ones are identical
goetz@6458 2868 srawi(limit_reg, result_reg, 31); // generate signmask (cnt1/2 must be non-negative so cnt_diff can't overflow)
goetz@6458 2869 mr(cnt_diff, result_reg);
goetz@6458 2870 andr(limit_reg, result_reg, limit_reg); // difference or zero (negative): cnt1<cnt2 ? cnt1-cnt2 : 0
goetz@6458 2871 add_(limit_reg, cnt2_reg, limit_reg); // min(cnt1, cnt2)==0?
goetz@6458 2872 beq(CCR0, Ldone); // return cnt difference if one has 0 length
goetz@6458 2873
goetz@6458 2874 lhz(chr1_reg, 0, str1_reg); // optional: early out if first characters mismatch
goetz@6458 2875 lhzx(chr2_reg, str1_reg, addr_diff); // optional: early out if first characters mismatch
goetz@6458 2876 addi(tmp_reg, limit_reg, -1); // min(cnt1, cnt2)-1
goetz@6458 2877 subf_(result_reg, chr2_reg, chr1_reg); // optional: early out if first characters mismatch
goetz@6458 2878 bne(CCR0, Ldone); // optional: early out if first characters mismatch
goetz@6458 2879
goetz@6458 2880 // Set loop counter by scaling down tmp_reg
goetz@6458 2881 srawi_(chr2_reg, tmp_reg, exact_log2(4)); // (min(cnt1, cnt2)-1)/4
goetz@6458 2882 ble(CCR0, Lslow_case); // need >4 characters for fast loop
goetz@6458 2883 andi(limit_reg, tmp_reg, 4-1); // remaining characters
goetz@6458 2884
goetz@6458 2885 // Adapt str1_reg str2_reg for the first loop iteration
goetz@6458 2886 mtctr(chr2_reg); // (min(cnt1, cnt2)-1)/4
goetz@6458 2887 addi(limit_reg, limit_reg, 4+1); // compare last 5-8 characters in slow_case if mismatch found in fast_loop
goetz@6458 2888 //16:
goetz@6458 2889 // Compare the rest of the characters
goetz@6458 2890 bind(Lfast_loop);
goetz@6458 2891 ld(chr1_reg, 0, str1_reg);
goetz@6458 2892 ldx(chr2_reg, str1_reg, addr_diff);
goetz@6458 2893 cmpd(CCR0, chr2_reg, chr1_reg);
goetz@6458 2894 bne(CCR0, Lslow_case); // return chr1_reg
goetz@6458 2895 addi(str1_reg, str1_reg, 4*2);
goetz@6458 2896 bdnz(Lfast_loop);
goetz@6458 2897 addi(limit_reg, limit_reg, -4); // no mismatch found in fast_loop, only 1-4 characters missing
goetz@6458 2898 //23:
goetz@6458 2899 bind(Lslow_case);
goetz@6458 2900 mtctr(limit_reg);
goetz@6458 2901 //24:
goetz@6458 2902 bind(Lslow_loop);
goetz@6458 2903 lhz(chr1_reg, 0, str1_reg);
goetz@6458 2904 lhzx(chr2_reg, str1_reg, addr_diff);
goetz@6458 2905 subf_(result_reg, chr2_reg, chr1_reg);
goetz@6458 2906 bne(CCR0, Ldone); // return chr1_reg
goetz@6458 2907 addi(str1_reg, str1_reg, 1*2);
goetz@6458 2908 bdnz(Lslow_loop);
goetz@6458 2909 //30:
goetz@6458 2910 // If strings are equal up to min length, return the length difference.
goetz@6458 2911 mr(result_reg, cnt_diff);
goetz@6458 2912 nop(); // alignment
goetz@6458 2913 //32:
goetz@6458 2914 // Otherwise, return the difference between the first mismatched chars.
goetz@6458 2915 bind(Ldone);
goetz@6458 2916 }
goetz@6458 2917
goetz@6458 2918
goetz@6458 2919 // Compare char[] arrays.
goetz@6458 2920 //
goetz@6458 2921 // str1_reg USE only
goetz@6458 2922 // str2_reg USE only
goetz@6458 2923 // cnt_reg USE_DEF, due to tmp reg shortage
goetz@6458 2924 // result_reg DEF only, might compromise USE only registers
goetz@6458 2925 void MacroAssembler::char_arrays_equals(Register str1_reg, Register str2_reg, Register cnt_reg, Register result_reg,
goetz@6458 2926 Register tmp1_reg, Register tmp2_reg, Register tmp3_reg, Register tmp4_reg,
goetz@6458 2927 Register tmp5_reg) {
goetz@6458 2928
goetz@6458 2929 // Str1 may be the same register as str2 which can occur e.g. after scalar replacement.
goetz@6458 2930 assert_different_registers(result_reg, str1_reg, cnt_reg, tmp1_reg, tmp2_reg, tmp3_reg, tmp4_reg, tmp5_reg);
goetz@6458 2931 assert_different_registers(result_reg, str2_reg, cnt_reg, tmp1_reg, tmp2_reg, tmp3_reg, tmp4_reg, tmp5_reg);
goetz@6458 2932
goetz@6458 2933 // Offset 0 should be 32 byte aligned.
goetz@6458 2934 Label Linit_cbc, Lcbc, Lloop, Ldone_true, Ldone_false;
goetz@6458 2935 Register index_reg = tmp5_reg;
goetz@6458 2936 Register cbc_iter = tmp4_reg;
goetz@6458 2937
goetz@6458 2938 //-1:
goetz@6458 2939 dcbtct(str1_reg, 0x00); // Indicate R/O access to str1.
goetz@6458 2940 dcbtct(str2_reg, 0x00); // Indicate R/O access to str2.
goetz@6458 2941 //1:
goetz@6458 2942 andi(cbc_iter, cnt_reg, 4-1); // Remaining iterations after 4 java characters per iteration loop.
goetz@6458 2943 li(index_reg, 0); // init
goetz@6458 2944 li(result_reg, 0); // assume false
goetz@6458 2945 srwi_(tmp2_reg, cnt_reg, exact_log2(4)); // Div: 4 java characters per iteration (main loop).
goetz@6458 2946
goetz@6458 2947 cmpwi(CCR1, cbc_iter, 0); // CCR1 = (cbc_iter==0)
goetz@6458 2948 beq(CCR0, Linit_cbc); // too short
goetz@6458 2949 mtctr(tmp2_reg);
goetz@6458 2950 //8:
goetz@6458 2951 bind(Lloop);
goetz@6458 2952 ldx(tmp1_reg, str1_reg, index_reg);
goetz@6458 2953 ldx(tmp2_reg, str2_reg, index_reg);
goetz@6458 2954 cmpd(CCR0, tmp1_reg, tmp2_reg);
goetz@6458 2955 bne(CCR0, Ldone_false); // Unequal char pair found -> done.
goetz@6458 2956 addi(index_reg, index_reg, 4*sizeof(jchar));
goetz@6458 2957 bdnz(Lloop);
goetz@6458 2958 //14:
goetz@6458 2959 bind(Linit_cbc);
goetz@6458 2960 beq(CCR1, Ldone_true);
goetz@6458 2961 mtctr(cbc_iter);
goetz@6458 2962 //16:
goetz@6458 2963 bind(Lcbc);
goetz@6458 2964 lhzx(tmp1_reg, str1_reg, index_reg);
goetz@6458 2965 lhzx(tmp2_reg, str2_reg, index_reg);
goetz@6458 2966 cmpw(CCR0, tmp1_reg, tmp2_reg);
goetz@6458 2967 bne(CCR0, Ldone_false); // Unequal char pair found -> done.
goetz@6458 2968 addi(index_reg, index_reg, 1*sizeof(jchar));
goetz@6458 2969 bdnz(Lcbc);
goetz@6458 2970 nop();
goetz@6458 2971 bind(Ldone_true);
goetz@6458 2972 li(result_reg, 1);
goetz@6458 2973 //24:
goetz@6458 2974 bind(Ldone_false);
goetz@6458 2975 }
goetz@6458 2976
goetz@6458 2977
goetz@6458 2978 void MacroAssembler::char_arrays_equalsImm(Register str1_reg, Register str2_reg, int cntval, Register result_reg,
goetz@6458 2979 Register tmp1_reg, Register tmp2_reg) {
goetz@6458 2980 // Str1 may be the same register as str2 which can occur e.g. after scalar replacement.
goetz@6458 2981 assert_different_registers(result_reg, str1_reg, tmp1_reg, tmp2_reg);
goetz@6458 2982 assert_different_registers(result_reg, str2_reg, tmp1_reg, tmp2_reg);
goetz@6458 2983 assert(sizeof(jchar) == 2, "must be");
goetz@6458 2984 assert(cntval >= 0 && ((cntval & 0x7fff) == cntval), "wrong immediate");
goetz@6458 2985
goetz@6458 2986 Label Ldone_false;
goetz@6458 2987
goetz@6458 2988 if (cntval < 16) { // short case
goetz@6458 2989 if (cntval != 0) li(result_reg, 0); // assume false
goetz@6458 2990
goetz@6458 2991 const int num_bytes = cntval*sizeof(jchar);
goetz@6458 2992 int index = 0;
goetz@6458 2993 for (int next_index; (next_index = index + 8) <= num_bytes; index = next_index) {
goetz@6458 2994 ld(tmp1_reg, index, str1_reg);
goetz@6458 2995 ld(tmp2_reg, index, str2_reg);
goetz@6458 2996 cmpd(CCR0, tmp1_reg, tmp2_reg);
goetz@6458 2997 bne(CCR0, Ldone_false);
goetz@6458 2998 }
goetz@6458 2999 if (cntval & 2) {
goetz@6458 3000 lwz(tmp1_reg, index, str1_reg);
goetz@6458 3001 lwz(tmp2_reg, index, str2_reg);
goetz@6458 3002 cmpw(CCR0, tmp1_reg, tmp2_reg);
goetz@6458 3003 bne(CCR0, Ldone_false);
goetz@6458 3004 index += 4;
goetz@6458 3005 }
goetz@6458 3006 if (cntval & 1) {
goetz@6458 3007 lhz(tmp1_reg, index, str1_reg);
goetz@6458 3008 lhz(tmp2_reg, index, str2_reg);
goetz@6458 3009 cmpw(CCR0, tmp1_reg, tmp2_reg);
goetz@6458 3010 bne(CCR0, Ldone_false);
goetz@6458 3011 }
goetz@6458 3012 // fallthrough: true
goetz@6458 3013 } else {
goetz@6458 3014 Label Lloop;
goetz@6458 3015 Register index_reg = tmp1_reg;
goetz@6458 3016 const int loopcnt = cntval/4;
goetz@6458 3017 assert(loopcnt > 0, "must be");
goetz@6458 3018 // Offset 0 should be 32 byte aligned.
goetz@6458 3019 //2:
goetz@6458 3020 dcbtct(str1_reg, 0x00); // Indicate R/O access to str1.
goetz@6458 3021 dcbtct(str2_reg, 0x00); // Indicate R/O access to str2.
goetz@6458 3022 li(tmp2_reg, loopcnt);
goetz@6458 3023 li(index_reg, 0); // init
goetz@6458 3024 li(result_reg, 0); // assume false
goetz@6458 3025 mtctr(tmp2_reg);
goetz@6458 3026 //8:
goetz@6458 3027 bind(Lloop);
goetz@6458 3028 ldx(R0, str1_reg, index_reg);
goetz@6458 3029 ldx(tmp2_reg, str2_reg, index_reg);
goetz@6458 3030 cmpd(CCR0, R0, tmp2_reg);
goetz@6458 3031 bne(CCR0, Ldone_false); // Unequal char pair found -> done.
goetz@6458 3032 addi(index_reg, index_reg, 4*sizeof(jchar));
goetz@6458 3033 bdnz(Lloop);
goetz@6458 3034 //14:
goetz@6458 3035 if (cntval & 2) {
goetz@6458 3036 lwzx(R0, str1_reg, index_reg);
goetz@6458 3037 lwzx(tmp2_reg, str2_reg, index_reg);
goetz@6458 3038 cmpw(CCR0, R0, tmp2_reg);
goetz@6458 3039 bne(CCR0, Ldone_false);
goetz@6458 3040 if (cntval & 1) addi(index_reg, index_reg, 2*sizeof(jchar));
goetz@6458 3041 }
goetz@6458 3042 if (cntval & 1) {
goetz@6458 3043 lhzx(R0, str1_reg, index_reg);
goetz@6458 3044 lhzx(tmp2_reg, str2_reg, index_reg);
goetz@6458 3045 cmpw(CCR0, R0, tmp2_reg);
goetz@6458 3046 bne(CCR0, Ldone_false);
goetz@6458 3047 }
goetz@6458 3048 // fallthru: true
goetz@6458 3049 }
goetz@6458 3050 li(result_reg, 1);
goetz@6458 3051 bind(Ldone_false);
goetz@6458 3052 }
goetz@6458 3053
gromero@9496 3054 // Helpers for Intrinsic Emitters
gromero@9496 3055 //
gromero@9496 3056 // Revert the byte order of a 32bit value in a register
gromero@9496 3057 // src: 0x44556677
gromero@9496 3058 // dst: 0x77665544
gromero@9496 3059 // Three steps to obtain the result:
gromero@9496 3060 // 1) Rotate src (as doubleword) left 5 bytes. That puts the leftmost byte of the src word
gromero@9496 3061 // into the rightmost byte position. Afterwards, everything left of the rightmost byte is cleared.
gromero@9496 3062 // This value initializes dst.
gromero@9496 3063 // 2) Rotate src (as word) left 3 bytes. That puts the rightmost byte of the src word into the leftmost
gromero@9496 3064 // byte position. Furthermore, byte 5 is rotated into byte 6 position where it is supposed to go.
gromero@9496 3065 // This value is mask inserted into dst with a [0..23] mask of 1s.
gromero@9496 3066 // 3) Rotate src (as word) left 1 byte. That puts byte 6 into byte 5 position.
gromero@9496 3067 // This value is mask inserted into dst with a [8..15] mask of 1s.
gromero@9496 3068 void MacroAssembler::load_reverse_32(Register dst, Register src) {
gromero@9496 3069 assert_different_registers(dst, src);
gromero@9496 3070
gromero@9496 3071 rldicl(dst, src, (4+1)*8, 56); // Rotate byte 4 into position 7 (rightmost), clear all to the left.
gromero@9496 3072 rlwimi(dst, src, 3*8, 0, 23); // Insert byte 5 into position 6, 7 into 4, leave pos 7 alone.
gromero@9496 3073 rlwimi(dst, src, 1*8, 8, 15); // Insert byte 6 into position 5, leave the rest alone.
gromero@9496 3074 }
gromero@9496 3075
gromero@9496 3076 // Calculate the column addresses of the crc32 lookup table into distinct registers.
gromero@9496 3077 // This loop-invariant calculation is moved out of the loop body, reducing the loop
gromero@9496 3078 // body size from 20 to 16 instructions.
gromero@9496 3079 // Returns the offset that was used to calculate the address of column tc3.
gromero@9496 3080 // Due to register shortage, setting tc3 may overwrite table. With the return offset
gromero@9496 3081 // at hand, the original table address can be easily reconstructed.
gromero@9496 3082 int MacroAssembler::crc32_table_columns(Register table, Register tc0, Register tc1, Register tc2, Register tc3) {
gromero@9496 3083
gromero@9496 3084 #ifdef VM_LITTLE_ENDIAN
gromero@9496 3085 // This is what we implement (the DOLIT4 part):
gromero@9496 3086 // ========================================================================= */
gromero@9496 3087 // #define DOLIT4 c ^= *buf4++; \
gromero@9496 3088 // c = crc_table[3][c & 0xff] ^ crc_table[2][(c >> 8) & 0xff] ^ \
gromero@9496 3089 // crc_table[1][(c >> 16) & 0xff] ^ crc_table[0][c >> 24]
gromero@9496 3090 // #define DOLIT32 DOLIT4; DOLIT4; DOLIT4; DOLIT4; DOLIT4; DOLIT4; DOLIT4; DOLIT4
gromero@9496 3091 // ========================================================================= */
gromero@9496 3092 const int ix0 = 3*(4*CRC32_COLUMN_SIZE);
gromero@9496 3093 const int ix1 = 2*(4*CRC32_COLUMN_SIZE);
gromero@9496 3094 const int ix2 = 1*(4*CRC32_COLUMN_SIZE);
gromero@9496 3095 const int ix3 = 0*(4*CRC32_COLUMN_SIZE);
gromero@9496 3096 #else
gromero@9496 3097 // This is what we implement (the DOBIG4 part):
gromero@9496 3098 // =========================================================================
gromero@9496 3099 // #define DOBIG4 c ^= *++buf4; \
gromero@9496 3100 // c = crc_table[4][c & 0xff] ^ crc_table[5][(c >> 8) & 0xff] ^ \
gromero@9496 3101 // crc_table[6][(c >> 16) & 0xff] ^ crc_table[7][c >> 24]
gromero@9496 3102 // #define DOBIG32 DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4
gromero@9496 3103 // =========================================================================
gromero@9496 3104 const int ix0 = 4*(4*CRC32_COLUMN_SIZE);
gromero@9496 3105 const int ix1 = 5*(4*CRC32_COLUMN_SIZE);
gromero@9496 3106 const int ix2 = 6*(4*CRC32_COLUMN_SIZE);
gromero@9496 3107 const int ix3 = 7*(4*CRC32_COLUMN_SIZE);
gromero@9496 3108 #endif
gromero@9496 3109 assert_different_registers(table, tc0, tc1, tc2);
gromero@9496 3110 assert(table == tc3, "must be!");
gromero@9496 3111
gromero@9496 3112 if (ix0 != 0) addi(tc0, table, ix0);
gromero@9496 3113 if (ix1 != 0) addi(tc1, table, ix1);
gromero@9496 3114 if (ix2 != 0) addi(tc2, table, ix2);
gromero@9496 3115 if (ix3 != 0) addi(tc3, table, ix3);
gromero@9496 3116
gromero@9496 3117 return ix3;
gromero@9496 3118 }
gromero@9496 3119
gromero@9496 3120 /**
gromero@9496 3121 * uint32_t crc;
gromero@9496 3122 * timesXtoThe32[crc & 0xFF] ^ (crc >> 8);
gromero@9496 3123 */
gromero@9496 3124 void MacroAssembler::fold_byte_crc32(Register crc, Register val, Register table, Register tmp) {
gromero@9496 3125 assert_different_registers(crc, table, tmp);
gromero@9496 3126 assert_different_registers(val, table);
gromero@9496 3127
gromero@9496 3128 if (crc == val) { // Must rotate first to use the unmodified value.
gromero@9496 3129 rlwinm(tmp, val, 2, 24-2, 31-2); // Insert (rightmost) byte 7 of val, shifted left by 2, into byte 6..7 of tmp, clear the rest.
gromero@9496 3130 // As we use a word (4-byte) instruction, we have to adapt the mask bit positions.
gromero@9496 3131 srwi(crc, crc, 8); // Unsigned shift, clear leftmost 8 bits.
gromero@9496 3132 } else {
gromero@9496 3133 srwi(crc, crc, 8); // Unsigned shift, clear leftmost 8 bits.
gromero@9496 3134 rlwinm(tmp, val, 2, 24-2, 31-2); // Insert (rightmost) byte 7 of val, shifted left by 2, into byte 6..7 of tmp, clear the rest.
gromero@9496 3135 }
gromero@9496 3136 lwzx(tmp, table, tmp);
gromero@9496 3137 xorr(crc, crc, tmp);
gromero@9496 3138 }
gromero@9496 3139
gromero@9496 3140 /**
gromero@9496 3141 * uint32_t crc;
gromero@9496 3142 * timesXtoThe32[crc & 0xFF] ^ (crc >> 8);
gromero@9496 3143 */
gromero@9496 3144 void MacroAssembler::fold_8bit_crc32(Register crc, Register table, Register tmp) {
gromero@9496 3145 fold_byte_crc32(crc, crc, table, tmp);
gromero@9496 3146 }
gromero@9496 3147
gromero@9496 3148 /**
gromero@9496 3149 * Emits code to update CRC-32 with a byte value according to constants in table.
gromero@9496 3150 *
gromero@9496 3151 * @param [in,out]crc Register containing the crc.
gromero@9496 3152 * @param [in]val Register containing the byte to fold into the CRC.
gromero@9496 3153 * @param [in]table Register containing the table of crc constants.
gromero@9496 3154 *
gromero@9496 3155 * uint32_t crc;
gromero@9496 3156 * val = crc_table[(val ^ crc) & 0xFF];
gromero@9496 3157 * crc = val ^ (crc >> 8);
gromero@9496 3158 */
gromero@9496 3159 void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) {
gromero@9496 3160 BLOCK_COMMENT("update_byte_crc32:");
gromero@9496 3161 xorr(val, val, crc);
gromero@9496 3162 fold_byte_crc32(crc, val, table, val);
gromero@9496 3163 }
gromero@9496 3164
gromero@9496 3165 /**
gromero@9496 3166 * @param crc register containing existing CRC (32-bit)
gromero@9496 3167 * @param buf register pointing to input byte buffer (byte*)
gromero@9496 3168 * @param len register containing number of bytes
gromero@9496 3169 * @param table register pointing to CRC table
gromero@9496 3170 */
gromero@9496 3171 void MacroAssembler::update_byteLoop_crc32(Register crc, Register buf, Register len, Register table,
gromero@9496 3172 Register data, bool loopAlignment, bool invertCRC) {
gromero@9496 3173 assert_different_registers(crc, buf, len, table, data);
gromero@9496 3174
gromero@9496 3175 Label L_mainLoop, L_done;
gromero@9496 3176 const int mainLoop_stepping = 1;
gromero@9496 3177 const int mainLoop_alignment = loopAlignment ? 32 : 4; // (InputForNewCode > 4 ? InputForNewCode : 32) : 4;
gromero@9496 3178
gromero@9496 3179 // Process all bytes in a single-byte loop.
gromero@9496 3180 cmpdi(CCR0, len, 0); // Anything to do?
gromero@9496 3181 mtctr(len);
gromero@9496 3182 beq(CCR0, L_done);
gromero@9496 3183
gromero@9496 3184 if (invertCRC) {
gromero@9496 3185 nand(crc, crc, crc); // ~c
gromero@9496 3186 }
gromero@9496 3187
gromero@9496 3188 align(mainLoop_alignment);
gromero@9496 3189 BIND(L_mainLoop);
gromero@9496 3190 lbz(data, 0, buf); // Byte from buffer, zero-extended.
gromero@9496 3191 addi(buf, buf, mainLoop_stepping); // Advance buffer position.
gromero@9496 3192 update_byte_crc32(crc, data, table);
gromero@9496 3193 bdnz(L_mainLoop); // Iterate.
gromero@9496 3194
gromero@9496 3195 if (invertCRC) {
gromero@9496 3196 nand(crc, crc, crc); // ~c
gromero@9496 3197 }
gromero@9496 3198
gromero@9496 3199 bind(L_done);
gromero@9496 3200 }
gromero@9496 3201
gromero@9496 3202 /**
gromero@9496 3203 * Emits code to update CRC-32 with a 4-byte value according to constants in table
gromero@9496 3204 * Implementation according to jdk/src/share/native/java/util/zip/zlib-1.2.8/crc32.c
gromero@9496 3205 */
gromero@9496 3206 // A not on the lookup table address(es):
gromero@9496 3207 // The lookup table consists of two sets of four columns each.
gromero@9496 3208 // The columns {0..3} are used for little-endian machines.
gromero@9496 3209 // The columns {4..7} are used for big-endian machines.
gromero@9496 3210 // To save the effort of adding the column offset to the table address each time
gromero@9496 3211 // a table element is looked up, it is possible to pass the pre-calculated
gromero@9496 3212 // column addresses.
gromero@9496 3213 // Uses R9..R12 as work register. Must be saved/restored by caller, if necessary.
gromero@9496 3214 void MacroAssembler::update_1word_crc32(Register crc, Register buf, Register table, int bufDisp, int bufInc,
gromero@9496 3215 Register t0, Register t1, Register t2, Register t3,
gromero@9496 3216 Register tc0, Register tc1, Register tc2, Register tc3) {
gromero@9496 3217 assert_different_registers(crc, t3);
gromero@9496 3218
gromero@9496 3219 // XOR crc with next four bytes of buffer.
gromero@9496 3220 lwz(t3, bufDisp, buf);
gromero@9496 3221 if (bufInc != 0) {
gromero@9496 3222 addi(buf, buf, bufInc);
gromero@9496 3223 }
gromero@9496 3224 xorr(t3, t3, crc);
gromero@9496 3225
gromero@9496 3226 // Chop crc into 4 single-byte pieces, shifted left 2 bits, to form the table indices.
gromero@9496 3227 rlwinm(t0, t3, 2, 24-2, 31-2); // ((t1 >> 0) & 0xff) << 2
gromero@9496 3228 rlwinm(t1, t3, 32+(2- 8), 24-2, 31-2); // ((t1 >> 8) & 0xff) << 2
gromero@9496 3229 rlwinm(t2, t3, 32+(2-16), 24-2, 31-2); // ((t1 >> 16) & 0xff) << 2
gromero@9496 3230 rlwinm(t3, t3, 32+(2-24), 24-2, 31-2); // ((t1 >> 24) & 0xff) << 2
gromero@9496 3231
gromero@9496 3232 // Use the pre-calculated column addresses.
gromero@9496 3233 // Load pre-calculated table values.
gromero@9496 3234 lwzx(t0, tc0, t0);
gromero@9496 3235 lwzx(t1, tc1, t1);
gromero@9496 3236 lwzx(t2, tc2, t2);
gromero@9496 3237 lwzx(t3, tc3, t3);
gromero@9496 3238
gromero@9496 3239 // Calculate new crc from table values.
gromero@9496 3240 xorr(t0, t0, t1);
gromero@9496 3241 xorr(t2, t2, t3);
gromero@9496 3242 xorr(crc, t0, t2); // Now crc contains the final checksum value.
gromero@9496 3243 }
gromero@9496 3244
gromero@9496 3245 /**
gromero@9496 3246 * @param crc register containing existing CRC (32-bit)
gromero@9496 3247 * @param buf register pointing to input byte buffer (byte*)
gromero@9496 3248 * @param len register containing number of bytes
gromero@9496 3249 * @param table register pointing to CRC table
gromero@9496 3250 *
gromero@9496 3251 * Uses R9..R12 as work register. Must be saved/restored by caller!
gromero@9496 3252 */
gromero@9496 3253 void MacroAssembler::kernel_crc32_2word(Register crc, Register buf, Register len, Register table,
gromero@9496 3254 Register t0, Register t1, Register t2, Register t3,
gromero@9496 3255 Register tc0, Register tc1, Register tc2, Register tc3) {
gromero@9496 3256 assert_different_registers(crc, buf, len, table);
gromero@9496 3257
gromero@9496 3258 Label L_mainLoop, L_tail;
gromero@9496 3259 Register tmp = t0;
gromero@9496 3260 Register data = t0;
gromero@9496 3261 Register tmp2 = t1;
gromero@9496 3262 const int mainLoop_stepping = 8;
gromero@9496 3263 const int tailLoop_stepping = 1;
gromero@9496 3264 const int log_stepping = exact_log2(mainLoop_stepping);
gromero@9496 3265 const int mainLoop_alignment = 32; // InputForNewCode > 4 ? InputForNewCode : 32;
gromero@9496 3266 const int complexThreshold = 2*mainLoop_stepping;
gromero@9496 3267
gromero@9496 3268 // Don't test for len <= 0 here. This pathological case should not occur anyway.
gromero@9496 3269 // Optimizing for it by adding a test and a branch seems to be a waste of CPU cycles.
gromero@9496 3270 // The situation itself is detected and handled correctly by the conditional branches
gromero@9496 3271 // following aghi(len, -stepping) and aghi(len, +stepping).
gromero@9496 3272 assert(tailLoop_stepping == 1, "check tailLoop_stepping!");
gromero@9496 3273
gromero@9496 3274 BLOCK_COMMENT("kernel_crc32_2word {");
gromero@9496 3275
gromero@9496 3276 nand(crc, crc, crc); // ~c
gromero@9496 3277
gromero@9496 3278 // Check for short (<mainLoop_stepping) buffer.
gromero@9496 3279 cmpdi(CCR0, len, complexThreshold);
gromero@9496 3280 blt(CCR0, L_tail);
gromero@9496 3281
gromero@9496 3282 // Pre-mainLoop alignment did show a slight (1%) positive effect on performance.
gromero@9496 3283 // We leave the code in for reference. Maybe we need alignment when we exploit vector instructions.
gromero@9496 3284 {
gromero@9496 3285 // Align buf addr to mainLoop_stepping boundary.
gromero@9496 3286 neg(tmp2, buf); // Calculate # preLoop iterations for alignment.
gromero@9496 3287 rldicl(tmp2, tmp2, 0, 64-log_stepping); // Rotate tmp2 0 bits, insert into tmp2, anding with mask with 1s from 62..63.
gromero@9496 3288
gromero@9496 3289 if (complexThreshold > mainLoop_stepping) {
gromero@9496 3290 sub(len, len, tmp2); // Remaining bytes for main loop (>=mainLoop_stepping is guaranteed).
gromero@9496 3291 } else {
gromero@9496 3292 sub(tmp, len, tmp2); // Remaining bytes for main loop.
gromero@9496 3293 cmpdi(CCR0, tmp, mainLoop_stepping);
gromero@9496 3294 blt(CCR0, L_tail); // For less than one mainloop_stepping left, do only tail processing
gromero@9496 3295 mr(len, tmp); // remaining bytes for main loop (>=mainLoop_stepping is guaranteed).
gromero@9496 3296 }
gromero@9496 3297 update_byteLoop_crc32(crc, buf, tmp2, table, data, false, false);
gromero@9496 3298 }
gromero@9496 3299
gromero@9496 3300 srdi(tmp2, len, log_stepping); // #iterations for mainLoop
gromero@9496 3301 andi(len, len, mainLoop_stepping-1); // remaining bytes for tailLoop
gromero@9496 3302 mtctr(tmp2);
gromero@9496 3303
gromero@9496 3304 #ifdef VM_LITTLE_ENDIAN
gromero@9496 3305 Register crc_rv = crc;
gromero@9496 3306 #else
gromero@9496 3307 Register crc_rv = tmp; // Load_reverse needs separate registers to work on.
gromero@9496 3308 // Occupies tmp, but frees up crc.
gromero@9496 3309 load_reverse_32(crc_rv, crc); // Revert byte order because we are dealing with big-endian data.
gromero@9496 3310 tmp = crc;
gromero@9496 3311 #endif
gromero@9496 3312
gromero@9496 3313 int reconstructTableOffset = crc32_table_columns(table, tc0, tc1, tc2, tc3);
gromero@9496 3314
gromero@9496 3315 align(mainLoop_alignment); // Octoword-aligned loop address. Shows 2% improvement.
gromero@9496 3316 BIND(L_mainLoop);
gromero@9496 3317 update_1word_crc32(crc_rv, buf, table, 0, 0, crc_rv, t1, t2, t3, tc0, tc1, tc2, tc3);
gromero@9496 3318 update_1word_crc32(crc_rv, buf, table, 4, mainLoop_stepping, crc_rv, t1, t2, t3, tc0, tc1, tc2, tc3);
gromero@9496 3319 bdnz(L_mainLoop);
gromero@9496 3320
gromero@9496 3321 #ifndef VM_LITTLE_ENDIAN
gromero@9496 3322 load_reverse_32(crc, crc_rv); // Revert byte order because we are dealing with big-endian data.
gromero@9496 3323 tmp = crc_rv; // Tmp uses it's original register again.
gromero@9496 3324 #endif
gromero@9496 3325
gromero@9496 3326 // Restore original table address for tailLoop.
gromero@9496 3327 if (reconstructTableOffset != 0) {
gromero@9496 3328 addi(table, table, -reconstructTableOffset);
gromero@9496 3329 }
gromero@9496 3330
gromero@9496 3331 // Process last few (<complexThreshold) bytes of buffer.
gromero@9496 3332 BIND(L_tail);
gromero@9496 3333 update_byteLoop_crc32(crc, buf, len, table, data, false, false);
gromero@9496 3334
gromero@9496 3335 nand(crc, crc, crc); // ~c
gromero@9496 3336 BLOCK_COMMENT("} kernel_crc32_2word");
gromero@9496 3337 }
gromero@9496 3338
gromero@9496 3339 /**
gromero@9496 3340 * @param crc register containing existing CRC (32-bit)
gromero@9496 3341 * @param buf register pointing to input byte buffer (byte*)
gromero@9496 3342 * @param len register containing number of bytes
gromero@9496 3343 * @param table register pointing to CRC table
gromero@9496 3344 *
gromero@9496 3345 * uses R9..R12 as work register. Must be saved/restored by caller!
gromero@9496 3346 */
gromero@9496 3347 void MacroAssembler::kernel_crc32_1word(Register crc, Register buf, Register len, Register table,
gromero@9496 3348 Register t0, Register t1, Register t2, Register t3,
gromero@9496 3349 Register tc0, Register tc1, Register tc2, Register tc3) {
gromero@9496 3350 assert_different_registers(crc, buf, len, table);
gromero@9496 3351
gromero@9496 3352 Label L_mainLoop, L_tail;
gromero@9496 3353 Register tmp = t0;
gromero@9496 3354 Register data = t0;
gromero@9496 3355 Register tmp2 = t1;
gromero@9496 3356 const int mainLoop_stepping = 4;
gromero@9496 3357 const int tailLoop_stepping = 1;
gromero@9496 3358 const int log_stepping = exact_log2(mainLoop_stepping);
gromero@9496 3359 const int mainLoop_alignment = 32; // InputForNewCode > 4 ? InputForNewCode : 32;
gromero@9496 3360 const int complexThreshold = 2*mainLoop_stepping;
gromero@9496 3361
gromero@9496 3362 // Don't test for len <= 0 here. This pathological case should not occur anyway.
gromero@9496 3363 // Optimizing for it by adding a test and a branch seems to be a waste of CPU cycles.
gromero@9496 3364 // The situation itself is detected and handled correctly by the conditional branches
gromero@9496 3365 // following aghi(len, -stepping) and aghi(len, +stepping).
gromero@9496 3366 assert(tailLoop_stepping == 1, "check tailLoop_stepping!");
gromero@9496 3367
gromero@9496 3368 BLOCK_COMMENT("kernel_crc32_1word {");
gromero@9496 3369
gromero@9496 3370 nand(crc, crc, crc); // ~c
gromero@9496 3371
gromero@9496 3372 // Check for short (<mainLoop_stepping) buffer.
gromero@9496 3373 cmpdi(CCR0, len, complexThreshold);
gromero@9496 3374 blt(CCR0, L_tail);
gromero@9496 3375
gromero@9496 3376 // Pre-mainLoop alignment did show a slight (1%) positive effect on performance.
gromero@9496 3377 // We leave the code in for reference. Maybe we need alignment when we exploit vector instructions.
gromero@9496 3378 {
gromero@9496 3379 // Align buf addr to mainLoop_stepping boundary.
gromero@9496 3380 neg(tmp2, buf); // Calculate # preLoop iterations for alignment.
gromero@9496 3381 rldicl(tmp2, tmp2, 0, 64-log_stepping); // Rotate tmp2 0 bits, insert into tmp2, anding with mask with 1s from 62..63.
gromero@9496 3382
gromero@9496 3383 if (complexThreshold > mainLoop_stepping) {
gromero@9496 3384 sub(len, len, tmp2); // Remaining bytes for main loop (>=mainLoop_stepping is guaranteed).
gromero@9496 3385 } else {
gromero@9496 3386 sub(tmp, len, tmp2); // Remaining bytes for main loop.
gromero@9496 3387 cmpdi(CCR0, tmp, mainLoop_stepping);
gromero@9496 3388 blt(CCR0, L_tail); // For less than one mainloop_stepping left, do only tail processing
gromero@9496 3389 mr(len, tmp); // remaining bytes for main loop (>=mainLoop_stepping is guaranteed).
gromero@9496 3390 }
gromero@9496 3391 update_byteLoop_crc32(crc, buf, tmp2, table, data, false, false);
gromero@9496 3392 }
gromero@9496 3393
gromero@9496 3394 srdi(tmp2, len, log_stepping); // #iterations for mainLoop
gromero@9496 3395 andi(len, len, mainLoop_stepping-1); // remaining bytes for tailLoop
gromero@9496 3396 mtctr(tmp2);
gromero@9496 3397
gromero@9496 3398 #ifdef VM_LITTLE_ENDIAN
gromero@9496 3399 Register crc_rv = crc;
gromero@9496 3400 #else
gromero@9496 3401 Register crc_rv = tmp; // Load_reverse needs separate registers to work on.
gromero@9496 3402 // Occupies tmp, but frees up crc.
gromero@9496 3403 load_reverse_32(crc_rv, crc); // evert byte order because we are dealing with big-endian data.
gromero@9496 3404 tmp = crc;
gromero@9496 3405 #endif
gromero@9496 3406
gromero@9496 3407 int reconstructTableOffset = crc32_table_columns(table, tc0, tc1, tc2, tc3);
gromero@9496 3408
gromero@9496 3409 align(mainLoop_alignment); // Octoword-aligned loop address. Shows 2% improvement.
gromero@9496 3410 BIND(L_mainLoop);
gromero@9496 3411 update_1word_crc32(crc_rv, buf, table, 0, mainLoop_stepping, crc_rv, t1, t2, t3, tc0, tc1, tc2, tc3);
gromero@9496 3412 bdnz(L_mainLoop);
gromero@9496 3413
gromero@9496 3414 #ifndef VM_LITTLE_ENDIAN
gromero@9496 3415 load_reverse_32(crc, crc_rv); // Revert byte order because we are dealing with big-endian data.
gromero@9496 3416 tmp = crc_rv; // Tmp uses it's original register again.
gromero@9496 3417 #endif
gromero@9496 3418
gromero@9496 3419 // Restore original table address for tailLoop.
gromero@9496 3420 if (reconstructTableOffset != 0) {
gromero@9496 3421 addi(table, table, -reconstructTableOffset);
gromero@9496 3422 }
gromero@9496 3423
gromero@9496 3424 // Process last few (<complexThreshold) bytes of buffer.
gromero@9496 3425 BIND(L_tail);
gromero@9496 3426 update_byteLoop_crc32(crc, buf, len, table, data, false, false);
gromero@9496 3427
gromero@9496 3428 nand(crc, crc, crc); // ~c
gromero@9496 3429 BLOCK_COMMENT("} kernel_crc32_1word");
gromero@9496 3430 }
gromero@9496 3431
gromero@9496 3432 /**
gromero@9496 3433 * @param crc register containing existing CRC (32-bit)
gromero@9496 3434 * @param buf register pointing to input byte buffer (byte*)
gromero@9496 3435 * @param len register containing number of bytes
gromero@9496 3436 * @param table register pointing to CRC table
gromero@9496 3437 *
gromero@9496 3438 * Uses R7_ARG5, R8_ARG6 as work registers.
gromero@9496 3439 */
gromero@9496 3440 void MacroAssembler::kernel_crc32_1byte(Register crc, Register buf, Register len, Register table,
gromero@9496 3441 Register t0, Register t1, Register t2, Register t3) {
gromero@9496 3442 assert_different_registers(crc, buf, len, table);
gromero@9496 3443
gromero@9496 3444 Register data = t0; // Holds the current byte to be folded into crc.
gromero@9496 3445
gromero@9496 3446 BLOCK_COMMENT("kernel_crc32_1byte {");
gromero@9496 3447
gromero@9496 3448 // Process all bytes in a single-byte loop.
gromero@9496 3449 update_byteLoop_crc32(crc, buf, len, table, data, true, true);
gromero@9496 3450
gromero@9496 3451 BLOCK_COMMENT("} kernel_crc32_1byte");
gromero@9496 3452 }
gromero@9496 3453
mdoerr@9497 3454 /**
mdoerr@9497 3455 * @param crc register containing existing CRC (32-bit)
mdoerr@9497 3456 * @param buf register pointing to input byte buffer (byte*)
mdoerr@9497 3457 * @param len register containing number of bytes
mdoerr@9497 3458 * @param table register pointing to CRC table
mdoerr@9497 3459 * @param constants register pointing to CRC table for 128-bit aligned memory
mdoerr@9497 3460 * @param barretConstants register pointing to table for barrett reduction
mdoerr@9497 3461 * @param t0 volatile register
mdoerr@9497 3462 * @param t1 volatile register
mdoerr@9497 3463 * @param t2 volatile register
mdoerr@9497 3464 * @param t3 volatile register
mdoerr@9497 3465 */
mdoerr@9497 3466 void MacroAssembler::kernel_crc32_1word_vpmsumd(Register crc, Register buf, Register len, Register table,
mdoerr@9497 3467 Register constants, Register barretConstants,
mdoerr@9497 3468 Register t0, Register t1, Register t2, Register t3, Register t4) {
mdoerr@9497 3469 assert_different_registers(crc, buf, len, table);
mdoerr@9497 3470
mdoerr@9497 3471 Label L_alignedHead, L_tail, L_alignTail, L_start, L_end;
mdoerr@9497 3472
mdoerr@9497 3473 Register prealign = t0;
mdoerr@9497 3474 Register postalign = t0;
mdoerr@9497 3475
mdoerr@9497 3476 BLOCK_COMMENT("kernel_crc32_1word_vpmsumb {");
mdoerr@9497 3477
mdoerr@9497 3478 // 1. use kernel_crc32_1word for shorter than 384bit
mdoerr@9497 3479 clrldi(len, len, 32);
mdoerr@9497 3480 cmpdi(CCR0, len, 384);
mdoerr@9497 3481 bge(CCR0, L_start);
mdoerr@9497 3482
mdoerr@9497 3483 Register tc0 = t4;
mdoerr@9497 3484 Register tc1 = constants;
mdoerr@9497 3485 Register tc2 = barretConstants;
mdoerr@9497 3486 kernel_crc32_1word(crc, buf, len, table,t0, t1, t2, t3, tc0, tc1, tc2, table);
mdoerr@9497 3487 b(L_end);
mdoerr@9497 3488
mdoerr@9497 3489 BIND(L_start);
mdoerr@9497 3490
mdoerr@9497 3491 // 2. ~c
mdoerr@9497 3492 nand(crc, crc, crc);
mdoerr@9497 3493
mdoerr@9497 3494 // 3. calculate from 0 to first 128bit-aligned address
mdoerr@9497 3495 clrldi_(prealign, buf, 57);
mdoerr@9497 3496 beq(CCR0, L_alignedHead);
mdoerr@9497 3497
mdoerr@9497 3498 subfic(prealign, prealign, 128);
mdoerr@9497 3499
mdoerr@9497 3500 subf(len, prealign, len);
mdoerr@9497 3501 update_byteLoop_crc32(crc, buf, prealign, table, t2, false, false);
mdoerr@9497 3502
mdoerr@9497 3503 // 4. calculate from first 128bit-aligned address to last 128bit-aligned address
mdoerr@9497 3504 BIND(L_alignedHead);
mdoerr@9497 3505
mdoerr@9497 3506 clrldi(postalign, len, 57);
mdoerr@9497 3507 subf(len, postalign, len);
mdoerr@9497 3508
mdoerr@9497 3509 // len must be more than 256bit
mdoerr@9497 3510 kernel_crc32_1word_aligned(crc, buf, len, constants, barretConstants, t1, t2, t3);
mdoerr@9497 3511
mdoerr@9497 3512 // 5. calculate remaining
mdoerr@9497 3513 cmpdi(CCR0, postalign, 0);
mdoerr@9497 3514 beq(CCR0, L_tail);
mdoerr@9497 3515
mdoerr@9497 3516 update_byteLoop_crc32(crc, buf, postalign, table, t2, false, false);
mdoerr@9497 3517
mdoerr@9497 3518 BIND(L_tail);
mdoerr@9497 3519
mdoerr@9497 3520 // 6. ~c
mdoerr@9497 3521 nand(crc, crc, crc);
mdoerr@9497 3522
mdoerr@9497 3523 BIND(L_end);
mdoerr@9497 3524
mdoerr@9497 3525 BLOCK_COMMENT("} kernel_crc32_1word_vpmsumb");
mdoerr@9497 3526 }
mdoerr@9497 3527
mdoerr@9497 3528 /**
mdoerr@9497 3529 * @param crc register containing existing CRC (32-bit)
mdoerr@9497 3530 * @param buf register pointing to input byte buffer (byte*)
mdoerr@9497 3531 * @param len register containing number of bytes
mdoerr@9497 3532 * @param constants register pointing to CRC table for 128-bit aligned memory
mdoerr@9497 3533 * @param barretConstants register pointing to table for barrett reduction
mdoerr@9497 3534 * @param t0 volatile register
mdoerr@9497 3535 * @param t1 volatile register
mdoerr@9497 3536 * @param t2 volatile register
mdoerr@9497 3537 */
mdoerr@9497 3538 void MacroAssembler::kernel_crc32_1word_aligned(Register crc, Register buf, Register len,
mdoerr@9497 3539 Register constants, Register barretConstants, Register t0, Register t1, Register t2) {
mdoerr@9497 3540 Label L_mainLoop, L_tail, L_alignTail, L_barrett_reduction, L_end, L_first_warm_up_done, L_first_cool_down, L_second_cool_down, L_XOR, L_test;
mdoerr@9497 3541 Label L_lv0, L_lv1, L_lv2, L_lv3, L_lv4, L_lv5, L_lv6, L_lv7, L_lv8, L_lv9, L_lv10, L_lv11, L_lv12, L_lv13, L_lv14, L_lv15;
mdoerr@9497 3542 Label L_1, L_2, L_3, L_4;
mdoerr@9497 3543
mdoerr@9497 3544 Register rLoaded = t0;
mdoerr@9497 3545 Register rTmp1 = t1;
mdoerr@9497 3546 Register rTmp2 = t2;
mdoerr@9497 3547 Register off16 = R22;
mdoerr@9497 3548 Register off32 = R23;
mdoerr@9497 3549 Register off48 = R24;
mdoerr@9497 3550 Register off64 = R25;
mdoerr@9497 3551 Register off80 = R26;
mdoerr@9497 3552 Register off96 = R27;
mdoerr@9497 3553 Register off112 = R28;
mdoerr@9497 3554 Register rIdx = R29;
mdoerr@9497 3555 Register rMax = R30;
mdoerr@9497 3556 Register constantsPos = R31;
mdoerr@9497 3557
mdoerr@9497 3558 VectorRegister mask_32bit = VR24;
mdoerr@9497 3559 VectorRegister mask_64bit = VR25;
mdoerr@9497 3560 VectorRegister zeroes = VR26;
mdoerr@9497 3561 VectorRegister const1 = VR27;
mdoerr@9497 3562 VectorRegister const2 = VR28;
mdoerr@9497 3563
mdoerr@9497 3564 // Save non-volatile vector registers (frameless).
mdoerr@9497 3565 Register offset = t1; int offsetInt = 0;
mdoerr@9497 3566 offsetInt -= 16; li(offset, -16); stvx(VR20, offset, R1_SP);
mdoerr@9497 3567 offsetInt -= 16; addi(offset, offset, -16); stvx(VR21, offset, R1_SP);
mdoerr@9497 3568 offsetInt -= 16; addi(offset, offset, -16); stvx(VR22, offset, R1_SP);
mdoerr@9497 3569 offsetInt -= 16; addi(offset, offset, -16); stvx(VR23, offset, R1_SP);
mdoerr@9497 3570 offsetInt -= 16; addi(offset, offset, -16); stvx(VR24, offset, R1_SP);
mdoerr@9497 3571 offsetInt -= 16; addi(offset, offset, -16); stvx(VR25, offset, R1_SP);
mdoerr@9497 3572 offsetInt -= 16; addi(offset, offset, -16); stvx(VR26, offset, R1_SP);
mdoerr@9497 3573 offsetInt -= 16; addi(offset, offset, -16); stvx(VR27, offset, R1_SP);
mdoerr@9497 3574 offsetInt -= 16; addi(offset, offset, -16); stvx(VR28, offset, R1_SP);
mdoerr@9497 3575 offsetInt -= 8; std(R22, offsetInt, R1_SP);
mdoerr@9497 3576 offsetInt -= 8; std(R23, offsetInt, R1_SP);
mdoerr@9497 3577 offsetInt -= 8; std(R24, offsetInt, R1_SP);
mdoerr@9497 3578 offsetInt -= 8; std(R25, offsetInt, R1_SP);
mdoerr@9497 3579 offsetInt -= 8; std(R26, offsetInt, R1_SP);
mdoerr@9497 3580 offsetInt -= 8; std(R27, offsetInt, R1_SP);
mdoerr@9497 3581 offsetInt -= 8; std(R28, offsetInt, R1_SP);
mdoerr@9497 3582 offsetInt -= 8; std(R29, offsetInt, R1_SP);
mdoerr@9497 3583 offsetInt -= 8; std(R30, offsetInt, R1_SP);
mdoerr@9497 3584 offsetInt -= 8; std(R31, offsetInt, R1_SP);
mdoerr@9497 3585
mdoerr@9497 3586 // Set constants
mdoerr@9497 3587 li(off16, 16);
mdoerr@9497 3588 li(off32, 32);
mdoerr@9497 3589 li(off48, 48);
mdoerr@9497 3590 li(off64, 64);
mdoerr@9497 3591 li(off80, 80);
mdoerr@9497 3592 li(off96, 96);
mdoerr@9497 3593 li(off112, 112);
mdoerr@9497 3594
mdoerr@9497 3595 clrldi(crc, crc, 32);
mdoerr@9497 3596
mdoerr@9497 3597 vxor(zeroes, zeroes, zeroes);
mdoerr@9497 3598 vspltisw(VR0, -1);
mdoerr@9497 3599
mdoerr@9497 3600 vsldoi(mask_32bit, zeroes, VR0, 4);
mdoerr@9603 3601 vsldoi(mask_64bit, zeroes, VR0, 8);
mdoerr@9497 3602
mdoerr@9497 3603 // Get the initial value into v8
mdoerr@9497 3604 vxor(VR8, VR8, VR8);
mdoerr@9497 3605 mtvrd(VR8, crc);
mdoerr@9603 3606 vsldoi(VR8, zeroes, VR8, 8); // shift into bottom 32 bits
mdoerr@9497 3607
mdoerr@9497 3608 li (rLoaded, 0);
mdoerr@9497 3609
mdoerr@9497 3610 rldicr(rIdx, len, 0, 56);
mdoerr@9497 3611
mdoerr@9497 3612 {
mdoerr@9497 3613 BIND(L_1);
mdoerr@9497 3614 // Checksum in blocks of MAX_SIZE (32768)
mdoerr@9497 3615 lis(rMax, 0);
mdoerr@9497 3616 ori(rMax, rMax, 32768);
mdoerr@9497 3617 mr(rTmp2, rMax);
mdoerr@9497 3618 cmpd(CCR0, rIdx, rMax);
mdoerr@9497 3619 bgt(CCR0, L_2);
mdoerr@9497 3620 mr(rMax, rIdx);
mdoerr@9497 3621
mdoerr@9497 3622 BIND(L_2);
mdoerr@9497 3623 subf(rIdx, rMax, rIdx);
mdoerr@9497 3624
mdoerr@9497 3625 // our main loop does 128 bytes at a time
mdoerr@9497 3626 srdi(rMax, rMax, 7);
mdoerr@9497 3627
mdoerr@9497 3628 /*
mdoerr@9497 3629 * Work out the offset into the constants table to start at. Each
mdoerr@9497 3630 * constant is 16 bytes, and it is used against 128 bytes of input
mdoerr@9497 3631 * data - 128 / 16 = 8
mdoerr@9497 3632 */
mdoerr@9497 3633 sldi(rTmp1, rMax, 4);
mdoerr@9497 3634 srdi(rTmp2, rTmp2, 3);
mdoerr@9497 3635 subf(rTmp1, rTmp1, rTmp2);
mdoerr@9497 3636
mdoerr@9497 3637 // We reduce our final 128 bytes in a separate step
mdoerr@9497 3638 addi(rMax, rMax, -1);
mdoerr@9497 3639 mtctr(rMax);
mdoerr@9497 3640
mdoerr@9497 3641 // Find the start of our constants
mdoerr@9497 3642 add(constantsPos, constants, rTmp1);
mdoerr@9497 3643
mdoerr@9497 3644 // zero VR0-v7 which will contain our checksums
mdoerr@9497 3645 vxor(VR0, VR0, VR0);
mdoerr@9497 3646 vxor(VR1, VR1, VR1);
mdoerr@9497 3647 vxor(VR2, VR2, VR2);
mdoerr@9497 3648 vxor(VR3, VR3, VR3);
mdoerr@9497 3649 vxor(VR4, VR4, VR4);
mdoerr@9497 3650 vxor(VR5, VR5, VR5);
mdoerr@9497 3651 vxor(VR6, VR6, VR6);
mdoerr@9497 3652 vxor(VR7, VR7, VR7);
mdoerr@9497 3653
mdoerr@9497 3654 lvx(const1, constantsPos);
mdoerr@9497 3655
mdoerr@9497 3656 /*
mdoerr@9497 3657 * If we are looping back to consume more data we use the values
mdoerr@9497 3658 * already in VR16-v23.
mdoerr@9497 3659 */
mdoerr@9497 3660 cmpdi(CCR0, rLoaded, 1);
mdoerr@9497 3661 beq(CCR0, L_3);
mdoerr@9497 3662 {
mdoerr@9497 3663
mdoerr@9497 3664 // First warm up pass
mdoerr@9497 3665 lvx(VR16, buf);
mdoerr@9497 3666 lvx(VR17, off16, buf);
mdoerr@9497 3667 lvx(VR18, off32, buf);
mdoerr@9497 3668 lvx(VR19, off48, buf);
mdoerr@9497 3669 lvx(VR20, off64, buf);
mdoerr@9497 3670 lvx(VR21, off80, buf);
mdoerr@9497 3671 lvx(VR22, off96, buf);
mdoerr@9497 3672 lvx(VR23, off112, buf);
mdoerr@9497 3673 addi(buf, buf, 8*16);
mdoerr@9497 3674
mdoerr@9497 3675 // xor in initial value
mdoerr@9497 3676 vxor(VR16, VR16, VR8);
mdoerr@9497 3677 }
mdoerr@9497 3678
mdoerr@9497 3679 BIND(L_3);
mdoerr@9497 3680 bdz(L_first_warm_up_done);
mdoerr@9497 3681
mdoerr@9497 3682 addi(constantsPos, constantsPos, 16);
mdoerr@9497 3683 lvx(const2, constantsPos);
mdoerr@9497 3684
mdoerr@9497 3685 // Second warm up pass
mdoerr@9497 3686 vpmsumd(VR8, VR16, const1);
mdoerr@9497 3687 lvx(VR16, buf);
mdoerr@9497 3688
mdoerr@9497 3689 vpmsumd(VR9, VR17, const1);
mdoerr@9497 3690 lvx(VR17, off16, buf);
mdoerr@9497 3691
mdoerr@9497 3692 vpmsumd(VR10, VR18, const1);
mdoerr@9497 3693 lvx(VR18, off32, buf);
mdoerr@9497 3694
mdoerr@9497 3695 vpmsumd(VR11, VR19, const1);
mdoerr@9497 3696 lvx(VR19, off48, buf);
mdoerr@9497 3697
mdoerr@9497 3698 vpmsumd(VR12, VR20, const1);
mdoerr@9497 3699 lvx(VR20, off64, buf);
mdoerr@9497 3700
mdoerr@9497 3701 vpmsumd(VR13, VR21, const1);
mdoerr@9497 3702 lvx(VR21, off80, buf);
mdoerr@9497 3703
mdoerr@9497 3704 vpmsumd(VR14, VR22, const1);
mdoerr@9497 3705 lvx(VR22, off96, buf);
mdoerr@9497 3706
mdoerr@9497 3707 vpmsumd(VR15, VR23, const1);
mdoerr@9497 3708 lvx(VR23, off112, buf);
mdoerr@9497 3709
mdoerr@9497 3710 addi(buf, buf, 8 * 16);
mdoerr@9497 3711
mdoerr@9497 3712 bdz(L_first_cool_down);
mdoerr@9497 3713
mdoerr@9497 3714 /*
mdoerr@9497 3715 * main loop. We modulo schedule it such that it takes three iterations
mdoerr@9497 3716 * to complete - first iteration load, second iteration vpmsum, third
mdoerr@9497 3717 * iteration xor.
mdoerr@9497 3718 */
mdoerr@9497 3719 {
mdoerr@9497 3720 BIND(L_4);
mdoerr@9497 3721 lvx(const1, constantsPos); addi(constantsPos, constantsPos, 16);
mdoerr@9497 3722
mdoerr@9497 3723 vxor(VR0, VR0, VR8);
mdoerr@9497 3724 vpmsumd(VR8, VR16, const2);
mdoerr@9497 3725 lvx(VR16, buf);
mdoerr@9497 3726
mdoerr@9497 3727 vxor(VR1, VR1, VR9);
mdoerr@9497 3728 vpmsumd(VR9, VR17, const2);
mdoerr@9497 3729 lvx(VR17, off16, buf);
mdoerr@9497 3730
mdoerr@9497 3731 vxor(VR2, VR2, VR10);
mdoerr@9497 3732 vpmsumd(VR10, VR18, const2);
mdoerr@9497 3733 lvx(VR18, off32, buf);
mdoerr@9497 3734
mdoerr@9497 3735 vxor(VR3, VR3, VR11);
mdoerr@9497 3736 vpmsumd(VR11, VR19, const2);
mdoerr@9497 3737 lvx(VR19, off48, buf);
mdoerr@9497 3738 lvx(const2, constantsPos);
mdoerr@9497 3739
mdoerr@9497 3740 vxor(VR4, VR4, VR12);
mdoerr@9497 3741 vpmsumd(VR12, VR20, const1);
mdoerr@9497 3742 lvx(VR20, off64, buf);
mdoerr@9497 3743
mdoerr@9497 3744 vxor(VR5, VR5, VR13);
mdoerr@9497 3745 vpmsumd(VR13, VR21, const1);
mdoerr@9497 3746 lvx(VR21, off80, buf);
mdoerr@9497 3747
mdoerr@9497 3748 vxor(VR6, VR6, VR14);
mdoerr@9497 3749 vpmsumd(VR14, VR22, const1);
mdoerr@9497 3750 lvx(VR22, off96, buf);
mdoerr@9497 3751
mdoerr@9497 3752 vxor(VR7, VR7, VR15);
mdoerr@9497 3753 vpmsumd(VR15, VR23, const1);
mdoerr@9497 3754 lvx(VR23, off112, buf);
mdoerr@9497 3755
mdoerr@9497 3756 addi(buf, buf, 8 * 16);
mdoerr@9497 3757
mdoerr@9497 3758 bdnz(L_4);
mdoerr@9497 3759 }
mdoerr@9497 3760
mdoerr@9497 3761 BIND(L_first_cool_down);
mdoerr@9497 3762
mdoerr@9497 3763 // First cool down pass
mdoerr@9497 3764 lvx(const1, constantsPos);
mdoerr@9497 3765 addi(constantsPos, constantsPos, 16);
mdoerr@9497 3766
mdoerr@9497 3767 vxor(VR0, VR0, VR8);
mdoerr@9497 3768 vpmsumd(VR8, VR16, const1);
mdoerr@9497 3769
mdoerr@9497 3770 vxor(VR1, VR1, VR9);
mdoerr@9497 3771 vpmsumd(VR9, VR17, const1);
mdoerr@9497 3772
mdoerr@9497 3773 vxor(VR2, VR2, VR10);
mdoerr@9497 3774 vpmsumd(VR10, VR18, const1);
mdoerr@9497 3775
mdoerr@9497 3776 vxor(VR3, VR3, VR11);
mdoerr@9497 3777 vpmsumd(VR11, VR19, const1);
mdoerr@9497 3778
mdoerr@9497 3779 vxor(VR4, VR4, VR12);
mdoerr@9497 3780 vpmsumd(VR12, VR20, const1);
mdoerr@9497 3781
mdoerr@9497 3782 vxor(VR5, VR5, VR13);
mdoerr@9497 3783 vpmsumd(VR13, VR21, const1);
mdoerr@9497 3784
mdoerr@9497 3785 vxor(VR6, VR6, VR14);
mdoerr@9497 3786 vpmsumd(VR14, VR22, const1);
mdoerr@9497 3787
mdoerr@9497 3788 vxor(VR7, VR7, VR15);
mdoerr@9497 3789 vpmsumd(VR15, VR23, const1);
mdoerr@9497 3790
mdoerr@9497 3791 BIND(L_second_cool_down);
mdoerr@9497 3792 // Second cool down pass
mdoerr@9497 3793 vxor(VR0, VR0, VR8);
mdoerr@9497 3794 vxor(VR1, VR1, VR9);
mdoerr@9497 3795 vxor(VR2, VR2, VR10);
mdoerr@9497 3796 vxor(VR3, VR3, VR11);
mdoerr@9497 3797 vxor(VR4, VR4, VR12);
mdoerr@9497 3798 vxor(VR5, VR5, VR13);
mdoerr@9497 3799 vxor(VR6, VR6, VR14);
mdoerr@9497 3800 vxor(VR7, VR7, VR15);
mdoerr@9497 3801
mdoerr@9497 3802 /*
mdoerr@9497 3803 * vpmsumd produces a 96 bit result in the least significant bits
mdoerr@9497 3804 * of the register. Since we are bit reflected we have to shift it
mdoerr@9497 3805 * left 32 bits so it occupies the least significant bits in the
mdoerr@9497 3806 * bit reflected domain.
mdoerr@9497 3807 */
mdoerr@9497 3808 vsldoi(VR0, VR0, zeroes, 4);
mdoerr@9497 3809 vsldoi(VR1, VR1, zeroes, 4);
mdoerr@9497 3810 vsldoi(VR2, VR2, zeroes, 4);
mdoerr@9497 3811 vsldoi(VR3, VR3, zeroes, 4);
mdoerr@9497 3812 vsldoi(VR4, VR4, zeroes, 4);
mdoerr@9497 3813 vsldoi(VR5, VR5, zeroes, 4);
mdoerr@9497 3814 vsldoi(VR6, VR6, zeroes, 4);
mdoerr@9497 3815 vsldoi(VR7, VR7, zeroes, 4);
mdoerr@9497 3816
mdoerr@9497 3817 // xor with last 1024 bits
mdoerr@9497 3818 lvx(VR8, buf);
mdoerr@9497 3819 lvx(VR9, off16, buf);
mdoerr@9497 3820 lvx(VR10, off32, buf);
mdoerr@9497 3821 lvx(VR11, off48, buf);
mdoerr@9497 3822 lvx(VR12, off64, buf);
mdoerr@9497 3823 lvx(VR13, off80, buf);
mdoerr@9497 3824 lvx(VR14, off96, buf);
mdoerr@9497 3825 lvx(VR15, off112, buf);
mdoerr@9497 3826 addi(buf, buf, 8 * 16);
mdoerr@9497 3827
mdoerr@9497 3828 vxor(VR16, VR0, VR8);
mdoerr@9497 3829 vxor(VR17, VR1, VR9);
mdoerr@9497 3830 vxor(VR18, VR2, VR10);
mdoerr@9497 3831 vxor(VR19, VR3, VR11);
mdoerr@9497 3832 vxor(VR20, VR4, VR12);
mdoerr@9497 3833 vxor(VR21, VR5, VR13);
mdoerr@9497 3834 vxor(VR22, VR6, VR14);
mdoerr@9497 3835 vxor(VR23, VR7, VR15);
mdoerr@9497 3836
mdoerr@9497 3837 li(rLoaded, 1);
mdoerr@9497 3838 cmpdi(CCR0, rIdx, 0);
mdoerr@9497 3839 addi(rIdx, rIdx, 128);
mdoerr@9497 3840 bne(CCR0, L_1);
mdoerr@9497 3841 }
mdoerr@9497 3842
mdoerr@9497 3843 // Work out how many bytes we have left
mdoerr@9497 3844 andi_(len, len, 127);
mdoerr@9497 3845
mdoerr@9497 3846 // Calculate where in the constant table we need to start
mdoerr@9497 3847 subfic(rTmp1, len, 128);
mdoerr@9497 3848 add(constantsPos, constantsPos, rTmp1);
mdoerr@9497 3849
mdoerr@9497 3850 // How many 16 byte chunks are in the tail
mdoerr@9497 3851 srdi(rIdx, len, 4);
mdoerr@9497 3852 mtctr(rIdx);
mdoerr@9497 3853
mdoerr@9497 3854 /*
mdoerr@9497 3855 * Reduce the previously calculated 1024 bits to 64 bits, shifting
mdoerr@9497 3856 * 32 bits to include the trailing 32 bits of zeros
mdoerr@9497 3857 */
mdoerr@9497 3858 lvx(VR0, constantsPos);
mdoerr@9497 3859 lvx(VR1, off16, constantsPos);
mdoerr@9497 3860 lvx(VR2, off32, constantsPos);
mdoerr@9497 3861 lvx(VR3, off48, constantsPos);
mdoerr@9497 3862 lvx(VR4, off64, constantsPos);
mdoerr@9497 3863 lvx(VR5, off80, constantsPos);
mdoerr@9497 3864 lvx(VR6, off96, constantsPos);
mdoerr@9497 3865 lvx(VR7, off112, constantsPos);
mdoerr@9497 3866 addi(constantsPos, constantsPos, 8 * 16);
mdoerr@9497 3867
mdoerr@9497 3868 vpmsumw(VR0, VR16, VR0);
mdoerr@9497 3869 vpmsumw(VR1, VR17, VR1);
mdoerr@9497 3870 vpmsumw(VR2, VR18, VR2);
mdoerr@9497 3871 vpmsumw(VR3, VR19, VR3);
mdoerr@9497 3872 vpmsumw(VR4, VR20, VR4);
mdoerr@9497 3873 vpmsumw(VR5, VR21, VR5);
mdoerr@9497 3874 vpmsumw(VR6, VR22, VR6);
mdoerr@9497 3875 vpmsumw(VR7, VR23, VR7);
mdoerr@9497 3876
mdoerr@9497 3877 // Now reduce the tail (0 - 112 bytes)
mdoerr@9497 3878 cmpdi(CCR0, rIdx, 0);
mdoerr@9497 3879 beq(CCR0, L_XOR);
mdoerr@9497 3880
mdoerr@9497 3881 lvx(VR16, buf); addi(buf, buf, 16);
mdoerr@9497 3882 lvx(VR17, constantsPos);
mdoerr@9497 3883 vpmsumw(VR16, VR16, VR17);
mdoerr@9497 3884 vxor(VR0, VR0, VR16);
mdoerr@9497 3885 beq(CCR0, L_XOR);
mdoerr@9497 3886
mdoerr@9497 3887 lvx(VR16, buf); addi(buf, buf, 16);
mdoerr@9497 3888 lvx(VR17, off16, constantsPos);
mdoerr@9497 3889 vpmsumw(VR16, VR16, VR17);
mdoerr@9497 3890 vxor(VR0, VR0, VR16);
mdoerr@9497 3891 beq(CCR0, L_XOR);
mdoerr@9497 3892
mdoerr@9497 3893 lvx(VR16, buf); addi(buf, buf, 16);
mdoerr@9497 3894 lvx(VR17, off32, constantsPos);
mdoerr@9497 3895 vpmsumw(VR16, VR16, VR17);
mdoerr@9497 3896 vxor(VR0, VR0, VR16);
mdoerr@9497 3897 beq(CCR0, L_XOR);
mdoerr@9497 3898
mdoerr@9497 3899 lvx(VR16, buf); addi(buf, buf, 16);
mdoerr@9497 3900 lvx(VR17, off48,constantsPos);
mdoerr@9497 3901 vpmsumw(VR16, VR16, VR17);
mdoerr@9497 3902 vxor(VR0, VR0, VR16);
mdoerr@9497 3903 beq(CCR0, L_XOR);
mdoerr@9497 3904
mdoerr@9497 3905 lvx(VR16, buf); addi(buf, buf, 16);
mdoerr@9497 3906 lvx(VR17, off64, constantsPos);
mdoerr@9497 3907 vpmsumw(VR16, VR16, VR17);
mdoerr@9497 3908 vxor(VR0, VR0, VR16);
mdoerr@9497 3909 beq(CCR0, L_XOR);
mdoerr@9497 3910
mdoerr@9497 3911 lvx(VR16, buf); addi(buf, buf, 16);
mdoerr@9497 3912 lvx(VR17, off80, constantsPos);
mdoerr@9497 3913 vpmsumw(VR16, VR16, VR17);
mdoerr@9497 3914 vxor(VR0, VR0, VR16);
mdoerr@9497 3915 beq(CCR0, L_XOR);
mdoerr@9497 3916
mdoerr@9497 3917 lvx(VR16, buf); addi(buf, buf, 16);
mdoerr@9497 3918 lvx(VR17, off96, constantsPos);
mdoerr@9497 3919 vpmsumw(VR16, VR16, VR17);
mdoerr@9497 3920 vxor(VR0, VR0, VR16);
mdoerr@9497 3921
mdoerr@9497 3922 // Now xor all the parallel chunks together
mdoerr@9497 3923 BIND(L_XOR);
mdoerr@9497 3924 vxor(VR0, VR0, VR1);
mdoerr@9497 3925 vxor(VR2, VR2, VR3);
mdoerr@9497 3926 vxor(VR4, VR4, VR5);
mdoerr@9497 3927 vxor(VR6, VR6, VR7);
mdoerr@9497 3928
mdoerr@9497 3929 vxor(VR0, VR0, VR2);
mdoerr@9497 3930 vxor(VR4, VR4, VR6);
mdoerr@9497 3931
mdoerr@9497 3932 vxor(VR0, VR0, VR4);
mdoerr@9497 3933
mdoerr@9497 3934 b(L_barrett_reduction);
mdoerr@9497 3935
mdoerr@9497 3936 BIND(L_first_warm_up_done);
mdoerr@9497 3937 lvx(const1, constantsPos);
mdoerr@9497 3938 addi(constantsPos, constantsPos, 16);
mdoerr@9497 3939 vpmsumd(VR8, VR16, const1);
mdoerr@9497 3940 vpmsumd(VR9, VR17, const1);
mdoerr@9497 3941 vpmsumd(VR10, VR18, const1);
mdoerr@9497 3942 vpmsumd(VR11, VR19, const1);
mdoerr@9497 3943 vpmsumd(VR12, VR20, const1);
mdoerr@9497 3944 vpmsumd(VR13, VR21, const1);
mdoerr@9497 3945 vpmsumd(VR14, VR22, const1);
mdoerr@9497 3946 vpmsumd(VR15, VR23, const1);
mdoerr@9497 3947 b(L_second_cool_down);
mdoerr@9497 3948
mdoerr@9497 3949 BIND(L_barrett_reduction);
mdoerr@9497 3950
mdoerr@9497 3951 lvx(const1, barretConstants);
mdoerr@9497 3952 addi(barretConstants, barretConstants, 16);
mdoerr@9497 3953 lvx(const2, barretConstants);
mdoerr@9497 3954
mdoerr@9603 3955 vsldoi(VR1, VR0, VR0, 8);
mdoerr@9497 3956 vxor(VR0, VR0, VR1); // xor two 64 bit results together
mdoerr@9497 3957
mdoerr@9497 3958 // shift left one bit
mdoerr@9497 3959 vspltisb(VR1, 1);
mdoerr@9497 3960 vsl(VR0, VR0, VR1);
mdoerr@9497 3961
mdoerr@9497 3962 vand(VR0, VR0, mask_64bit);
mdoerr@9497 3963
mdoerr@9497 3964 /*
mdoerr@9497 3965 * The reflected version of Barrett reduction. Instead of bit
mdoerr@9497 3966 * reflecting our data (which is expensive to do), we bit reflect our
mdoerr@9497 3967 * constants and our algorithm, which means the intermediate data in
mdoerr@9497 3968 * our vector registers goes from 0-63 instead of 63-0. We can reflect
mdoerr@9497 3969 * the algorithm because we don't carry in mod 2 arithmetic.
mdoerr@9497 3970 */
mdoerr@9497 3971 vand(VR1, VR0, mask_32bit); // bottom 32 bits of a
mdoerr@9497 3972 vpmsumd(VR1, VR1, const1); // ma
mdoerr@9497 3973 vand(VR1, VR1, mask_32bit); // bottom 32bits of ma
mdoerr@9497 3974 vpmsumd(VR1, VR1, const2); // qn */
mdoerr@9497 3975 vxor(VR0, VR0, VR1); // a - qn, subtraction is xor in GF(2)
mdoerr@9497 3976
mdoerr@9497 3977 /*
mdoerr@9497 3978 * Since we are bit reflected, the result (ie the low 32 bits) is in
mdoerr@9497 3979 * the high 32 bits. We just need to shift it left 4 bytes
mdoerr@9497 3980 * V0 [ 0 1 X 3 ]
mdoerr@9497 3981 * V0 [ 0 X 2 3 ]
mdoerr@9497 3982 */
mdoerr@9497 3983 vsldoi(VR0, VR0, zeroes, 4); // shift result into top 64 bits of
mdoerr@9497 3984
mdoerr@9497 3985 // Get it into r3
mdoerr@9497 3986 mfvrd(crc, VR0);
mdoerr@9497 3987
mdoerr@9497 3988 BIND(L_end);
mdoerr@9497 3989
mdoerr@9497 3990 offsetInt = 0;
mdoerr@9497 3991 // Restore non-volatile Vector registers (frameless).
mdoerr@9497 3992 offsetInt -= 16; li(offset, -16); lvx(VR20, offset, R1_SP);
mdoerr@9497 3993 offsetInt -= 16; addi(offset, offset, -16); lvx(VR21, offset, R1_SP);
mdoerr@9497 3994 offsetInt -= 16; addi(offset, offset, -16); lvx(VR22, offset, R1_SP);
mdoerr@9497 3995 offsetInt -= 16; addi(offset, offset, -16); lvx(VR23, offset, R1_SP);
mdoerr@9497 3996 offsetInt -= 16; addi(offset, offset, -16); lvx(VR24, offset, R1_SP);
mdoerr@9497 3997 offsetInt -= 16; addi(offset, offset, -16); lvx(VR25, offset, R1_SP);
mdoerr@9497 3998 offsetInt -= 16; addi(offset, offset, -16); lvx(VR26, offset, R1_SP);
mdoerr@9497 3999 offsetInt -= 16; addi(offset, offset, -16); lvx(VR27, offset, R1_SP);
mdoerr@9497 4000 offsetInt -= 16; addi(offset, offset, -16); lvx(VR28, offset, R1_SP);
mdoerr@9497 4001 offsetInt -= 8; ld(R22, offsetInt, R1_SP);
mdoerr@9497 4002 offsetInt -= 8; ld(R23, offsetInt, R1_SP);
mdoerr@9497 4003 offsetInt -= 8; ld(R24, offsetInt, R1_SP);
mdoerr@9497 4004 offsetInt -= 8; ld(R25, offsetInt, R1_SP);
mdoerr@9497 4005 offsetInt -= 8; ld(R26, offsetInt, R1_SP);
mdoerr@9497 4006 offsetInt -= 8; ld(R27, offsetInt, R1_SP);
mdoerr@9497 4007 offsetInt -= 8; ld(R28, offsetInt, R1_SP);
mdoerr@9497 4008 offsetInt -= 8; ld(R29, offsetInt, R1_SP);
mdoerr@9497 4009 offsetInt -= 8; ld(R30, offsetInt, R1_SP);
mdoerr@9497 4010 offsetInt -= 8; ld(R31, offsetInt, R1_SP);
mdoerr@9497 4011 }
mdoerr@9497 4012
gromero@9496 4013 void MacroAssembler::kernel_crc32_singleByte(Register crc, Register buf, Register len, Register table, Register tmp) {
gromero@9496 4014 assert_different_registers(crc, buf, /* len, not used!! */ table, tmp);
gromero@9496 4015
gromero@9496 4016 BLOCK_COMMENT("kernel_crc32_singleByte:");
gromero@9496 4017 nand(crc, crc, crc); // ~c
gromero@9496 4018
gromero@9496 4019 lbz(tmp, 0, buf); // Byte from buffer, zero-extended.
gromero@9496 4020 update_byte_crc32(crc, tmp, table);
gromero@9496 4021
gromero@9496 4022 nand(crc, crc, crc); // ~c
gromero@9496 4023 }
gromero@9496 4024
goetz@6458 4025
goetz@6458 4026 void MacroAssembler::asm_assert(bool check_equal, const char *msg, int id) {
goetz@6458 4027 #ifdef ASSERT
goetz@6458 4028 Label ok;
goetz@6458 4029 if (check_equal) {
goetz@6458 4030 beq(CCR0, ok);
goetz@6458 4031 } else {
goetz@6458 4032 bne(CCR0, ok);
goetz@6458 4033 }
goetz@6458 4034 stop(msg, id);
goetz@6458 4035 bind(ok);
goetz@6458 4036 #endif
goetz@6458 4037 }
goetz@6458 4038
goetz@6458 4039 void MacroAssembler::asm_assert_mems_zero(bool check_equal, int size, int mem_offset,
goetz@6458 4040 Register mem_base, const char* msg, int id) {
goetz@6458 4041 #ifdef ASSERT
goetz@6458 4042 switch (size) {
goetz@6458 4043 case 4:
goetz@6458 4044 lwz(R0, mem_offset, mem_base);
goetz@6458 4045 cmpwi(CCR0, R0, 0);
goetz@6458 4046 break;
goetz@6458 4047 case 8:
goetz@6458 4048 ld(R0, mem_offset, mem_base);
goetz@6458 4049 cmpdi(CCR0, R0, 0);
goetz@6458 4050 break;
goetz@6458 4051 default:
goetz@6458 4052 ShouldNotReachHere();
goetz@6458 4053 }
goetz@6458 4054 asm_assert(check_equal, msg, id);
goetz@6458 4055 #endif // ASSERT
goetz@6458 4056 }
goetz@6458 4057
goetz@6458 4058 void MacroAssembler::verify_thread() {
goetz@6458 4059 if (VerifyThread) {
goetz@6458 4060 unimplemented("'VerifyThread' currently not implemented on PPC");
goetz@6458 4061 }
goetz@6458 4062 }
goetz@6458 4063
goetz@6458 4064 // READ: oop. KILL: R0. Volatile floats perhaps.
goetz@6458 4065 void MacroAssembler::verify_oop(Register oop, const char* msg) {
goetz@6458 4066 if (!VerifyOops) {
goetz@6458 4067 return;
goetz@6458 4068 }
goetz@7424 4069
goetz@6495 4070 address/* FunctionDescriptor** */fd = StubRoutines::verify_oop_subroutine_entry_address();
goetz@7424 4071 const Register tmp = R11; // Will be preserved.
goetz@7424 4072 const int nbytes_save = 11*8; // Volatile gprs except R0.
goetz@7424 4073 save_volatile_gprs(R1_SP, -nbytes_save); // except R0
goetz@7424 4074
goetz@7424 4075 if (oop == tmp) mr(R4_ARG2, oop);
goetz@7424 4076 save_LR_CR(tmp); // save in old frame
goetz@6511 4077 push_frame_reg_args(nbytes_save, tmp);
goetz@6511 4078 // load FunctionDescriptor** / entry_address *
goetz@7424 4079 load_const_optimized(tmp, fd, R0);
goetz@6511 4080 // load FunctionDescriptor* / entry_address
goetz@6458 4081 ld(tmp, 0, tmp);
goetz@7424 4082 if (oop != tmp) mr_if_needed(R4_ARG2, oop);
goetz@7424 4083 load_const_optimized(R3_ARG1, (address)msg, R0);
goetz@7424 4084 // Call destination for its side effect.
goetz@6458 4085 call_c(tmp);
goetz@7424 4086
goetz@6458 4087 pop_frame();
goetz@6458 4088 restore_LR_CR(tmp);
goetz@7424 4089 restore_volatile_gprs(R1_SP, -nbytes_save); // except R0
goetz@6458 4090 }
goetz@6458 4091
goetz@6458 4092 const char* stop_types[] = {
goetz@6458 4093 "stop",
goetz@6458 4094 "untested",
goetz@6458 4095 "unimplemented",
goetz@6458 4096 "shouldnotreachhere"
goetz@6458 4097 };
goetz@6458 4098
goetz@6458 4099 static void stop_on_request(int tp, const char* msg) {
coleenp@7358 4100 tty->print("PPC assembly code requires stop: (%s) %s\n", stop_types[tp%/*stop_end*/4], msg);
goetz@6458 4101 guarantee(false, err_msg("PPC assembly code requires stop: %s", msg));
goetz@6458 4102 }
goetz@6458 4103
goetz@6458 4104 // Call a C-function that prints output.
goetz@6458 4105 void MacroAssembler::stop(int type, const char* msg, int id) {
goetz@6458 4106 #ifndef PRODUCT
goetz@6458 4107 block_comment(err_msg("stop: %s %s {", stop_types[type%stop_end], msg));
goetz@6458 4108 #else
goetz@6458 4109 block_comment("stop {");
goetz@6458 4110 #endif
goetz@6458 4111
goetz@6458 4112 // setup arguments
goetz@6458 4113 load_const_optimized(R3_ARG1, type);
goetz@6458 4114 load_const_optimized(R4_ARG2, (void *)msg, /*tmp=*/R0);
goetz@6458 4115 call_VM_leaf(CAST_FROM_FN_PTR(address, stop_on_request), R3_ARG1, R4_ARG2);
goetz@6458 4116 illtrap();
goetz@6458 4117 emit_int32(id);
goetz@6458 4118 block_comment("} stop;");
goetz@6458 4119 }
goetz@6458 4120
goetz@6458 4121 #ifndef PRODUCT
goetz@6458 4122 // Write pattern 0x0101010101010101 in memory region [low-before, high+after].
goetz@6458 4123 // Val, addr are temp registers.
goetz@6458 4124 // If low == addr, addr is killed.
goetz@6458 4125 // High is preserved.
goetz@6458 4126 void MacroAssembler::zap_from_to(Register low, int before, Register high, int after, Register val, Register addr) {
goetz@6458 4127 if (!ZapMemory) return;
goetz@6458 4128
goetz@6458 4129 assert_different_registers(low, val);
goetz@6458 4130
goetz@6458 4131 BLOCK_COMMENT("zap memory region {");
goetz@6458 4132 load_const_optimized(val, 0x0101010101010101);
goetz@6458 4133 int size = before + after;
goetz@6458 4134 if (low == high && size < 5 && size > 0) {
goetz@6458 4135 int offset = -before*BytesPerWord;
goetz@6458 4136 for (int i = 0; i < size; ++i) {
goetz@6458 4137 std(val, offset, low);
goetz@6458 4138 offset += (1*BytesPerWord);
goetz@6458 4139 }
goetz@6458 4140 } else {
goetz@6458 4141 addi(addr, low, -before*BytesPerWord);
goetz@6458 4142 assert_different_registers(high, val);
goetz@6458 4143 if (after) addi(high, high, after * BytesPerWord);
goetz@6458 4144 Label loop;
goetz@6458 4145 bind(loop);
goetz@6458 4146 std(val, 0, addr);
goetz@6458 4147 addi(addr, addr, 8);
goetz@6458 4148 cmpd(CCR6, addr, high);
goetz@6458 4149 ble(CCR6, loop);
goetz@6458 4150 if (after) addi(high, high, -after * BytesPerWord); // Correct back to old value.
goetz@6458 4151 }
goetz@6458 4152 BLOCK_COMMENT("} zap memory region");
goetz@6458 4153 }
goetz@6458 4154
goetz@6458 4155 #endif // !PRODUCT
goetz@6512 4156
goetz@6512 4157 SkipIfEqualZero::SkipIfEqualZero(MacroAssembler* masm, Register temp, const bool* flag_addr) : _masm(masm), _label() {
goetz@6512 4158 int simm16_offset = masm->load_const_optimized(temp, (address)flag_addr, R0, true);
goetz@6512 4159 assert(sizeof(bool) == 1, "PowerPC ABI");
goetz@6512 4160 masm->lbz(temp, simm16_offset, temp);
goetz@6512 4161 masm->cmpwi(CCR0, temp, 0);
goetz@6512 4162 masm->beq(CCR0, _label);
goetz@6512 4163 }
goetz@6512 4164
goetz@6512 4165 SkipIfEqualZero::~SkipIfEqualZero() {
goetz@6512 4166 _masm->bind(_label);
goetz@6512 4167 }

mercurial