src/cpu/ppc/vm/macroAssembler_ppc.cpp

changeset 6458
ec28f9c041ff
child 6463
7687c56b6693
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/src/cpu/ppc/vm/macroAssembler_ppc.cpp	Fri Aug 02 16:46:45 2013 +0200
     1.3 @@ -0,0 +1,3017 @@
     1.4 +/*
     1.5 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
     1.6 + * Copyright 2012, 2013 SAP AG. All rights reserved.
     1.7 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     1.8 + *
     1.9 + * This code is free software; you can redistribute it and/or modify it
    1.10 + * under the terms of the GNU General Public License version 2 only, as
    1.11 + * published by the Free Software Foundation.
    1.12 + *
    1.13 + * This code is distributed in the hope that it will be useful, but WITHOUT
    1.14 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    1.15 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    1.16 + * version 2 for more details (a copy is included in the LICENSE file that
    1.17 + * accompanied this code).
    1.18 + *
    1.19 + * You should have received a copy of the GNU General Public License version
    1.20 + * 2 along with this work; if not, write to the Free Software Foundation,
    1.21 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    1.22 + *
    1.23 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    1.24 + * or visit www.oracle.com if you need additional information or have any
    1.25 + * questions.
    1.26 + *
    1.27 + */
    1.28 +
    1.29 +#include "precompiled.hpp"
    1.30 +#include "asm/assembler.hpp"
    1.31 +#include "asm/assembler.inline.hpp"
    1.32 +#include "asm/macroAssembler.inline.hpp"
    1.33 +#include "compiler/disassembler.hpp"
    1.34 +#include "gc_interface/collectedHeap.inline.hpp"
    1.35 +#include "interpreter/interpreter.hpp"
    1.36 +#include "memory/cardTableModRefBS.hpp"
    1.37 +#include "memory/resourceArea.hpp"
    1.38 +#include "prims/methodHandles.hpp"
    1.39 +#include "runtime/biasedLocking.hpp"
    1.40 +#include "runtime/interfaceSupport.hpp"
    1.41 +#include "runtime/objectMonitor.hpp"
    1.42 +#include "runtime/os.hpp"
    1.43 +#include "runtime/sharedRuntime.hpp"
    1.44 +#include "runtime/stubRoutines.hpp"
    1.45 +#include "utilities/macros.hpp"
    1.46 +#if INCLUDE_ALL_GCS
    1.47 +#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
    1.48 +#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
    1.49 +#include "gc_implementation/g1/heapRegion.hpp"
    1.50 +#endif // INCLUDE_ALL_GCS
    1.51 +
    1.52 +#ifdef PRODUCT
    1.53 +#define BLOCK_COMMENT(str) // nothing
    1.54 +#else
    1.55 +#define BLOCK_COMMENT(str) block_comment(str)
    1.56 +#endif
    1.57 +
    1.58 +#ifdef ASSERT
    1.59 +// On RISC, there's no benefit to verifying instruction boundaries.
    1.60 +bool AbstractAssembler::pd_check_instruction_mark() { return false; }
    1.61 +#endif
    1.62 +
    1.63 +void MacroAssembler::ld_largeoffset_unchecked(Register d, int si31, Register a, int emit_filler_nop) {
    1.64 +  assert(Assembler::is_simm(si31, 31) && si31 >= 0, "si31 out of range");
    1.65 +  if (Assembler::is_simm(si31, 16)) {
    1.66 +    ld(d, si31, a);
    1.67 +    if (emit_filler_nop) nop();
    1.68 +  } else {
    1.69 +    const int hi = MacroAssembler::largeoffset_si16_si16_hi(si31);
    1.70 +    const int lo = MacroAssembler::largeoffset_si16_si16_lo(si31);
    1.71 +    addis(d, a, hi);
    1.72 +    ld(d, lo, d);
    1.73 +  }
    1.74 +}
    1.75 +
    1.76 +void MacroAssembler::ld_largeoffset(Register d, int si31, Register a, int emit_filler_nop) {
    1.77 +  assert_different_registers(d, a);
    1.78 +  ld_largeoffset_unchecked(d, si31, a, emit_filler_nop);
    1.79 +}
    1.80 +
    1.81 +void MacroAssembler::load_sized_value(Register dst, RegisterOrConstant offs, Register base,
    1.82 +                                      size_t size_in_bytes, bool is_signed) {
    1.83 +  switch (size_in_bytes) {
    1.84 +  case  8:              ld(dst, offs, base);                         break;
    1.85 +  case  4:  is_signed ? lwa(dst, offs, base) : lwz(dst, offs, base); break;
    1.86 +  case  2:  is_signed ? lha(dst, offs, base) : lhz(dst, offs, base); break;
    1.87 +  case  1:  lbz(dst, offs, base); if (is_signed) extsb(dst, dst);    break; // lba doesn't exist :(
    1.88 +  default:  ShouldNotReachHere();
    1.89 +  }
    1.90 +}
    1.91 +
    1.92 +void MacroAssembler::store_sized_value(Register dst, RegisterOrConstant offs, Register base,
    1.93 +                                       size_t size_in_bytes) {
    1.94 +  switch (size_in_bytes) {
    1.95 +  case  8:  std(dst, offs, base); break;
    1.96 +  case  4:  stw(dst, offs, base); break;
    1.97 +  case  2:  sth(dst, offs, base); break;
    1.98 +  case  1:  stb(dst, offs, base); break;
    1.99 +  default:  ShouldNotReachHere();
   1.100 +  }
   1.101 +}
   1.102 +
   1.103 +void MacroAssembler::align(int modulus) {
   1.104 +  while (offset() % modulus != 0) nop();
   1.105 +}
   1.106 +
   1.107 +// Issue instructions that calculate given TOC from global TOC.
   1.108 +void MacroAssembler::calculate_address_from_global_toc(Register dst, address addr, bool hi16, bool lo16,
   1.109 +                                                       bool add_relocation, bool emit_dummy_addr) {
   1.110 +  int offset = -1;
   1.111 +  if (emit_dummy_addr) {
   1.112 +    offset = -128; // dummy address
   1.113 +  } else if (addr != (address)(intptr_t)-1) {
   1.114 +    offset = MacroAssembler::offset_to_global_toc(addr);
   1.115 +  }
   1.116 +
   1.117 +  if (hi16) {
   1.118 +    addis(dst, R29, MacroAssembler::largeoffset_si16_si16_hi(offset));
   1.119 +  }
   1.120 +  if (lo16) {
   1.121 +    if (add_relocation) {
   1.122 +      // Relocate at the addi to avoid confusion with a load from the method's TOC.
   1.123 +      relocate(internal_word_Relocation::spec(addr));
   1.124 +    }
   1.125 +    addi(dst, dst, MacroAssembler::largeoffset_si16_si16_lo(offset));
   1.126 +  }
   1.127 +}
   1.128 +
   1.129 +int MacroAssembler::patch_calculate_address_from_global_toc_at(address a, address bound, address addr) {
   1.130 +  const int offset = MacroAssembler::offset_to_global_toc(addr);
   1.131 +
   1.132 +  const address inst2_addr = a;
   1.133 +  const int inst2 = *(int *)inst2_addr;
   1.134 +
   1.135 +  // The relocation points to the second instruction, the addi,
   1.136 +  // and the addi reads and writes the same register dst.
   1.137 +  const int dst = inv_rt_field(inst2);
   1.138 +  assert(is_addi(inst2) && inv_ra_field(inst2) == dst, "must be addi reading and writing dst");
   1.139 +
   1.140 +  // Now, find the preceding addis which writes to dst.
   1.141 +  int inst1 = 0;
   1.142 +  address inst1_addr = inst2_addr - BytesPerInstWord;
   1.143 +  while (inst1_addr >= bound) {
   1.144 +    inst1 = *(int *) inst1_addr;
   1.145 +    if (is_addis(inst1) && inv_rt_field(inst1) == dst) {
   1.146 +      // Stop, found the addis which writes dst.
   1.147 +      break;
   1.148 +    }
   1.149 +    inst1_addr -= BytesPerInstWord;
   1.150 +  }
   1.151 +
   1.152 +  assert(is_addis(inst1) && inv_ra_field(inst1) == 29 /* R29 */, "source must be global TOC");
   1.153 +  set_imm((int *)inst1_addr, MacroAssembler::largeoffset_si16_si16_hi(offset));
   1.154 +  set_imm((int *)inst2_addr, MacroAssembler::largeoffset_si16_si16_lo(offset));
   1.155 +  return (int)((intptr_t)addr - (intptr_t)inst1_addr);
   1.156 +}
   1.157 +
   1.158 +address MacroAssembler::get_address_of_calculate_address_from_global_toc_at(address a, address bound) {
   1.159 +  const address inst2_addr = a;
   1.160 +  const int inst2 = *(int *)inst2_addr;
   1.161 +
   1.162 +  // The relocation points to the second instruction, the addi,
   1.163 +  // and the addi reads and writes the same register dst.
   1.164 +  const int dst = inv_rt_field(inst2);
   1.165 +  assert(is_addi(inst2) && inv_ra_field(inst2) == dst, "must be addi reading and writing dst");
   1.166 +
   1.167 +  // Now, find the preceding addis which writes to dst.
   1.168 +  int inst1 = 0;
   1.169 +  address inst1_addr = inst2_addr - BytesPerInstWord;
   1.170 +  while (inst1_addr >= bound) {
   1.171 +    inst1 = *(int *) inst1_addr;
   1.172 +    if (is_addis(inst1) && inv_rt_field(inst1) == dst) {
   1.173 +      // stop, found the addis which writes dst
   1.174 +      break;
   1.175 +    }
   1.176 +    inst1_addr -= BytesPerInstWord;
   1.177 +  }
   1.178 +
   1.179 +  assert(is_addis(inst1) && inv_ra_field(inst1) == 29 /* R29 */, "source must be global TOC");
   1.180 +
   1.181 +  int offset = (get_imm(inst1_addr, 0) << 16) + get_imm(inst2_addr, 0);
   1.182 +  // -1 is a special case
   1.183 +  if (offset == -1) {
   1.184 +    return (address)(intptr_t)-1;
   1.185 +  } else {
   1.186 +    return global_toc() + offset;
   1.187 +  }
   1.188 +}
   1.189 +
   1.190 +#ifdef _LP64
   1.191 +// Patch compressed oops or klass constants.
   1.192 +int MacroAssembler::patch_set_narrow_oop(address a, address bound, narrowOop data) {
   1.193 +  assert(UseCompressedOops, "Should only patch compressed oops");
   1.194 +
   1.195 +  const address inst2_addr = a;
   1.196 +  const int inst2 = *(int *)inst2_addr;
   1.197 +
   1.198 +  // The relocation points to the second instruction, the addi,
   1.199 +  // and the addi reads and writes the same register dst.
   1.200 +  const int dst = inv_rt_field(inst2);
   1.201 +  assert(is_addi(inst2) && inv_ra_field(inst2) == dst, "must be addi reading and writing dst");
   1.202 +  // Now, find the preceding addis which writes to dst.
   1.203 +  int inst1 = 0;
   1.204 +  address inst1_addr = inst2_addr - BytesPerInstWord;
   1.205 +  bool inst1_found = false;
   1.206 +  while (inst1_addr >= bound) {
   1.207 +    inst1 = *(int *)inst1_addr;
   1.208 +    if (is_lis(inst1) && inv_rs_field(inst1) == dst) { inst1_found = true; break; }
   1.209 +    inst1_addr -= BytesPerInstWord;
   1.210 +  }
   1.211 +  assert(inst1_found, "inst is not lis");
   1.212 +
   1.213 +  int xc = (data >> 16) & 0xffff;
   1.214 +  int xd = (data >>  0) & 0xffff;
   1.215 +
   1.216 +  set_imm((int *)inst1_addr,((short)(xc + ((xd & 0x8000) != 0 ? 1 : 0)))); // see enc_load_con_narrow1/2
   1.217 +  set_imm((int *)inst2_addr, (short)(xd));
   1.218 +  return (int)((intptr_t)inst2_addr - (intptr_t)inst1_addr);
   1.219 +}
   1.220 +
   1.221 +// Get compressed oop or klass constant.
   1.222 +narrowOop MacroAssembler::get_narrow_oop(address a, address bound) {
   1.223 +  assert(UseCompressedOops, "Should only patch compressed oops");
   1.224 +
   1.225 +  const address inst2_addr = a;
   1.226 +  const int inst2 = *(int *)inst2_addr;
   1.227 +
   1.228 +  // The relocation points to the second instruction, the addi,
   1.229 +  // and the addi reads and writes the same register dst.
   1.230 +  const int dst = inv_rt_field(inst2);
   1.231 +  assert(is_addi(inst2) && inv_ra_field(inst2) == dst, "must be addi reading and writing dst");
   1.232 +  // Now, find the preceding lis which writes to dst.
   1.233 +  int inst1 = 0;
   1.234 +  address inst1_addr = inst2_addr - BytesPerInstWord;
   1.235 +  bool inst1_found = false;
   1.236 +
   1.237 +  while (inst1_addr >= bound) {
   1.238 +    inst1 = *(int *) inst1_addr;
   1.239 +    if (is_lis(inst1) && inv_rs_field(inst1) == dst) { inst1_found = true; break;}
   1.240 +    inst1_addr -= BytesPerInstWord;
   1.241 +  }
   1.242 +  assert(inst1_found, "inst is not lis");
   1.243 +
   1.244 +  uint xl = ((unsigned int) (get_imm(inst2_addr,0) & 0xffff));
   1.245 +  uint xh = (((((xl & 0x8000) != 0 ? -1 : 0) + get_imm(inst1_addr,0)) & 0xffff) << 16);
   1.246 +  return (int) (xl | xh);
   1.247 +}
   1.248 +#endif // _LP64
   1.249 +
   1.250 +void MacroAssembler::load_const_from_method_toc(Register dst, AddressLiteral& a, Register toc) {
   1.251 +  int toc_offset = 0;
   1.252 +  // Use RelocationHolder::none for the constant pool entry, otherwise
   1.253 +  // we will end up with a failing NativeCall::verify(x) where x is
   1.254 +  // the address of the constant pool entry.
   1.255 +  // FIXME: We should insert relocation information for oops at the constant
   1.256 +  // pool entries instead of inserting it at the loads; patching of a constant
   1.257 +  // pool entry should be less expensive.
   1.258 +  Unimplemented();
   1.259 +  if (false) {
   1.260 +    address oop_address = address_constant((address)a.value(), RelocationHolder::none);
   1.261 +    // Relocate at the pc of the load.
   1.262 +    relocate(a.rspec());
   1.263 +    toc_offset = (int)(oop_address - code()->consts()->start());
   1.264 +  }
   1.265 +  ld_largeoffset_unchecked(dst, toc_offset, toc, true);
   1.266 +}
   1.267 +
   1.268 +bool MacroAssembler::is_load_const_from_method_toc_at(address a) {
   1.269 +  const address inst1_addr = a;
   1.270 +  const int inst1 = *(int *)inst1_addr;
   1.271 +
   1.272 +   // The relocation points to the ld or the addis.
   1.273 +   return (is_ld(inst1)) ||
   1.274 +          (is_addis(inst1) && inv_ra_field(inst1) != 0);
   1.275 +}
   1.276 +
   1.277 +int MacroAssembler::get_offset_of_load_const_from_method_toc_at(address a) {
   1.278 +  assert(is_load_const_from_method_toc_at(a), "must be load_const_from_method_toc");
   1.279 +
   1.280 +  const address inst1_addr = a;
   1.281 +  const int inst1 = *(int *)inst1_addr;
   1.282 +
   1.283 +  if (is_ld(inst1)) {
   1.284 +    return inv_d1_field(inst1);
   1.285 +  } else if (is_addis(inst1)) {
   1.286 +    const int dst = inv_rt_field(inst1);
   1.287 +
   1.288 +    // Now, find the succeeding ld which reads and writes to dst.
   1.289 +    address inst2_addr = inst1_addr + BytesPerInstWord;
   1.290 +    int inst2 = 0;
   1.291 +    while (true) {
   1.292 +      inst2 = *(int *) inst2_addr;
   1.293 +      if (is_ld(inst2) && inv_ra_field(inst2) == dst && inv_rt_field(inst2) == dst) {
   1.294 +        // Stop, found the ld which reads and writes dst.
   1.295 +        break;
   1.296 +      }
   1.297 +      inst2_addr += BytesPerInstWord;
   1.298 +    }
   1.299 +    return (inv_d1_field(inst1) << 16) + inv_d1_field(inst2);
   1.300 +  }
   1.301 +  ShouldNotReachHere();
   1.302 +  return 0;
   1.303 +}
   1.304 +
   1.305 +// Get the constant from a `load_const' sequence.
   1.306 +long MacroAssembler::get_const(address a) {
   1.307 +  assert(is_load_const_at(a), "not a load of a constant");
   1.308 +  const int *p = (const int*) a;
   1.309 +  unsigned long x = (((unsigned long) (get_imm(a,0) & 0xffff)) << 48);
   1.310 +  if (is_ori(*(p+1))) {
   1.311 +    x |= (((unsigned long) (get_imm(a,1) & 0xffff)) << 32);
   1.312 +    x |= (((unsigned long) (get_imm(a,3) & 0xffff)) << 16);
   1.313 +    x |= (((unsigned long) (get_imm(a,4) & 0xffff)));
   1.314 +  } else if (is_lis(*(p+1))) {
   1.315 +    x |= (((unsigned long) (get_imm(a,2) & 0xffff)) << 32);
   1.316 +    x |= (((unsigned long) (get_imm(a,1) & 0xffff)) << 16);
   1.317 +    x |= (((unsigned long) (get_imm(a,3) & 0xffff)));
   1.318 +  } else {
   1.319 +    ShouldNotReachHere();
   1.320 +    return (long) 0;
   1.321 +  }
   1.322 +  return (long) x;
   1.323 +}
   1.324 +
   1.325 +// Patch the 64 bit constant of a `load_const' sequence. This is a low
   1.326 +// level procedure. It neither flushes the instruction cache nor is it
   1.327 +// mt safe.
   1.328 +void MacroAssembler::patch_const(address a, long x) {
   1.329 +  assert(is_load_const_at(a), "not a load of a constant");
   1.330 +  int *p = (int*) a;
   1.331 +  if (is_ori(*(p+1))) {
   1.332 +    set_imm(0 + p, (x >> 48) & 0xffff);
   1.333 +    set_imm(1 + p, (x >> 32) & 0xffff);
   1.334 +    set_imm(3 + p, (x >> 16) & 0xffff);
   1.335 +    set_imm(4 + p, x & 0xffff);
   1.336 +  } else if (is_lis(*(p+1))) {
   1.337 +    set_imm(0 + p, (x >> 48) & 0xffff);
   1.338 +    set_imm(2 + p, (x >> 32) & 0xffff);
   1.339 +    set_imm(1 + p, (x >> 16) & 0xffff);
   1.340 +    set_imm(3 + p, x & 0xffff);
   1.341 +  } else {
   1.342 +    ShouldNotReachHere();
   1.343 +  }
   1.344 +}
   1.345 +
   1.346 +AddressLiteral MacroAssembler::allocate_metadata_address(Metadata* obj) {
   1.347 +  assert(oop_recorder() != NULL, "this assembler needs a Recorder");
   1.348 +  int index = oop_recorder()->allocate_metadata_index(obj);
   1.349 +  RelocationHolder rspec = metadata_Relocation::spec(index);
   1.350 +  return AddressLiteral((address)obj, rspec);
   1.351 +}
   1.352 +
   1.353 +AddressLiteral MacroAssembler::constant_metadata_address(Metadata* obj) {
   1.354 +  assert(oop_recorder() != NULL, "this assembler needs a Recorder");
   1.355 +  int index = oop_recorder()->find_index(obj);
   1.356 +  RelocationHolder rspec = metadata_Relocation::spec(index);
   1.357 +  return AddressLiteral((address)obj, rspec);
   1.358 +}
   1.359 +
   1.360 +AddressLiteral MacroAssembler::allocate_oop_address(jobject obj) {
   1.361 +  assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
   1.362 +  int oop_index = oop_recorder()->allocate_oop_index(obj);
   1.363 +  return AddressLiteral(address(obj), oop_Relocation::spec(oop_index));
   1.364 +}
   1.365 +
   1.366 +AddressLiteral MacroAssembler::constant_oop_address(jobject obj) {
   1.367 +  assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
   1.368 +  int oop_index = oop_recorder()->find_index(obj);
   1.369 +  return AddressLiteral(address(obj), oop_Relocation::spec(oop_index));
   1.370 +}
   1.371 +
   1.372 +RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr,
   1.373 +                                                      Register tmp, int offset) {
   1.374 +  intptr_t value = *delayed_value_addr;
   1.375 +  if (value != 0) {
   1.376 +    return RegisterOrConstant(value + offset);
   1.377 +  }
   1.378 +
   1.379 +  // Load indirectly to solve generation ordering problem.
   1.380 +  // static address, no relocation
   1.381 +  int simm16_offset = load_const_optimized(tmp, delayed_value_addr, noreg, true);
   1.382 +  ld(tmp, simm16_offset, tmp); // must be aligned ((xa & 3) == 0)
   1.383 +
   1.384 +  if (offset != 0) {
   1.385 +    addi(tmp, tmp, offset);
   1.386 +  }
   1.387 +
   1.388 +  return RegisterOrConstant(tmp);
   1.389 +}
   1.390 +
   1.391 +#ifndef PRODUCT
   1.392 +void MacroAssembler::pd_print_patched_instruction(address branch) {
   1.393 +  Unimplemented(); // TODO: PPC port
   1.394 +}
   1.395 +#endif // ndef PRODUCT
   1.396 +
   1.397 +// Conditional far branch for destinations encodable in 24+2 bits.
   1.398 +void MacroAssembler::bc_far(int boint, int biint, Label& dest, int optimize) {
   1.399 +
   1.400 +  // If requested by flag optimize, relocate the bc_far as a
   1.401 +  // runtime_call and prepare for optimizing it when the code gets
   1.402 +  // relocated.
   1.403 +  if (optimize == bc_far_optimize_on_relocate) {
   1.404 +    relocate(relocInfo::runtime_call_type);
   1.405 +  }
   1.406 +
   1.407 +  // variant 2:
   1.408 +  //
   1.409 +  //    b!cxx SKIP
   1.410 +  //    bxx   DEST
   1.411 +  //  SKIP:
   1.412 +  //
   1.413 +
   1.414 +  const int opposite_boint = add_bhint_to_boint(opposite_bhint(inv_boint_bhint(boint)),
   1.415 +                                                opposite_bcond(inv_boint_bcond(boint)));
   1.416 +
   1.417 +  // We emit two branches.
   1.418 +  // First, a conditional branch which jumps around the far branch.
   1.419 +  const address not_taken_pc = pc() + 2 * BytesPerInstWord;
   1.420 +  const address bc_pc        = pc();
   1.421 +  bc(opposite_boint, biint, not_taken_pc);
   1.422 +
   1.423 +  const int bc_instr = *(int*)bc_pc;
   1.424 +  assert(not_taken_pc == (address)inv_bd_field(bc_instr, (intptr_t)bc_pc), "postcondition");
   1.425 +  assert(opposite_boint == inv_bo_field(bc_instr), "postcondition");
   1.426 +  assert(boint == add_bhint_to_boint(opposite_bhint(inv_boint_bhint(inv_bo_field(bc_instr))),
   1.427 +                                     opposite_bcond(inv_boint_bcond(inv_bo_field(bc_instr)))),
   1.428 +         "postcondition");
   1.429 +  assert(biint == inv_bi_field(bc_instr), "postcondition");
   1.430 +
   1.431 +  // Second, an unconditional far branch which jumps to dest.
   1.432 +  // Note: target(dest) remembers the current pc (see CodeSection::target)
   1.433 +  //       and returns the current pc if the label is not bound yet; when
   1.434 +  //       the label gets bound, the unconditional far branch will be patched.
   1.435 +  const address target_pc = target(dest);
   1.436 +  const address b_pc  = pc();
   1.437 +  b(target_pc);
   1.438 +
   1.439 +  assert(not_taken_pc == pc(),                     "postcondition");
   1.440 +  assert(dest.is_bound() || target_pc == b_pc, "postcondition");
   1.441 +}
   1.442 +
   1.443 +bool MacroAssembler::is_bc_far_at(address instruction_addr) {
   1.444 +  return is_bc_far_variant1_at(instruction_addr) ||
   1.445 +         is_bc_far_variant2_at(instruction_addr) ||
   1.446 +         is_bc_far_variant3_at(instruction_addr);
   1.447 +}
   1.448 +
   1.449 +address MacroAssembler::get_dest_of_bc_far_at(address instruction_addr) {
   1.450 +  if (is_bc_far_variant1_at(instruction_addr)) {
   1.451 +    const address instruction_1_addr = instruction_addr;
   1.452 +    const int instruction_1 = *(int*)instruction_1_addr;
   1.453 +    return (address)inv_bd_field(instruction_1, (intptr_t)instruction_1_addr);
   1.454 +  } else if (is_bc_far_variant2_at(instruction_addr)) {
   1.455 +    const address instruction_2_addr = instruction_addr + 4;
   1.456 +    return bxx_destination(instruction_2_addr);
   1.457 +  } else if (is_bc_far_variant3_at(instruction_addr)) {
   1.458 +    return instruction_addr + 8;
   1.459 +  }
   1.460 +  // variant 4 ???
   1.461 +  ShouldNotReachHere();
   1.462 +  return NULL;
   1.463 +}
   1.464 +void MacroAssembler::set_dest_of_bc_far_at(address instruction_addr, address dest) {
   1.465 +
   1.466 +  if (is_bc_far_variant3_at(instruction_addr)) {
   1.467 +    // variant 3, far cond branch to the next instruction, already patched to nops:
   1.468 +    //
   1.469 +    //    nop
   1.470 +    //    endgroup
   1.471 +    //  SKIP/DEST:
   1.472 +    //
   1.473 +    return;
   1.474 +  }
   1.475 +
   1.476 +  // first, extract boint and biint from the current branch
   1.477 +  int boint = 0;
   1.478 +  int biint = 0;
   1.479 +
   1.480 +  ResourceMark rm;
   1.481 +  const int code_size = 2 * BytesPerInstWord;
   1.482 +  CodeBuffer buf(instruction_addr, code_size);
   1.483 +  MacroAssembler masm(&buf);
   1.484 +  if (is_bc_far_variant2_at(instruction_addr) && dest == instruction_addr + 8) {
   1.485 +    // Far branch to next instruction: Optimize it by patching nops (produce variant 3).
   1.486 +    masm.nop();
   1.487 +    masm.endgroup();
   1.488 +  } else {
   1.489 +    if (is_bc_far_variant1_at(instruction_addr)) {
   1.490 +      // variant 1, the 1st instruction contains the destination address:
   1.491 +      //
   1.492 +      //    bcxx  DEST
   1.493 +      //    endgroup
   1.494 +      //
   1.495 +      const int instruction_1 = *(int*)(instruction_addr);
   1.496 +      boint = inv_bo_field(instruction_1);
   1.497 +      biint = inv_bi_field(instruction_1);
   1.498 +    } else if (is_bc_far_variant2_at(instruction_addr)) {
   1.499 +      // variant 2, the 2nd instruction contains the destination address:
   1.500 +      //
   1.501 +      //    b!cxx SKIP
   1.502 +      //    bxx   DEST
   1.503 +      //  SKIP:
   1.504 +      //
   1.505 +      const int instruction_1 = *(int*)(instruction_addr);
   1.506 +      boint = add_bhint_to_boint(opposite_bhint(inv_boint_bhint(inv_bo_field(instruction_1))),
   1.507 +          opposite_bcond(inv_boint_bcond(inv_bo_field(instruction_1))));
   1.508 +      biint = inv_bi_field(instruction_1);
   1.509 +    } else {
   1.510 +      // variant 4???
   1.511 +      ShouldNotReachHere();
   1.512 +    }
   1.513 +
   1.514 +    // second, set the new branch destination and optimize the code
   1.515 +    if (dest != instruction_addr + 4 && // the bc_far is still unbound!
   1.516 +        masm.is_within_range_of_bcxx(dest, instruction_addr)) {
   1.517 +      // variant 1:
   1.518 +      //
   1.519 +      //    bcxx  DEST
   1.520 +      //    endgroup
   1.521 +      //
   1.522 +      masm.bc(boint, biint, dest);
   1.523 +      masm.endgroup();
   1.524 +    } else {
   1.525 +      // variant 2:
   1.526 +      //
   1.527 +      //    b!cxx SKIP
   1.528 +      //    bxx   DEST
   1.529 +      //  SKIP:
   1.530 +      //
   1.531 +      const int opposite_boint = add_bhint_to_boint(opposite_bhint(inv_boint_bhint(boint)),
   1.532 +                                                    opposite_bcond(inv_boint_bcond(boint)));
   1.533 +      const address not_taken_pc = masm.pc() + 2 * BytesPerInstWord;
   1.534 +      masm.bc(opposite_boint, biint, not_taken_pc);
   1.535 +      masm.b(dest);
   1.536 +    }
   1.537 +  }
   1.538 +  ICache::invalidate_range(instruction_addr, code_size);
   1.539 +}
   1.540 +
   1.541 +// Emit a NOT mt-safe patchable 64 bit absolute call/jump.
   1.542 +void MacroAssembler::bxx64_patchable(address dest, relocInfo::relocType rt, bool link) {
   1.543 +  // get current pc
   1.544 +  uint64_t start_pc = (uint64_t) pc();
   1.545 +
   1.546 +  const address pc_of_bl = (address) (start_pc + (6*BytesPerInstWord)); // bl is last
   1.547 +  const address pc_of_b  = (address) (start_pc + (0*BytesPerInstWord)); // b is first
   1.548 +
   1.549 +  // relocate here
   1.550 +  if (rt != relocInfo::none) {
   1.551 +    relocate(rt);
   1.552 +  }
   1.553 +
   1.554 +  if ( ReoptimizeCallSequences &&
   1.555 +       (( link && is_within_range_of_b(dest, pc_of_bl)) ||
   1.556 +        (!link && is_within_range_of_b(dest, pc_of_b)))) {
   1.557 +    // variant 2:
   1.558 +    // Emit an optimized, pc-relative call/jump.
   1.559 +
   1.560 +    if (link) {
   1.561 +      // some padding
   1.562 +      nop();
   1.563 +      nop();
   1.564 +      nop();
   1.565 +      nop();
   1.566 +      nop();
   1.567 +      nop();
   1.568 +
   1.569 +      // do the call
   1.570 +      assert(pc() == pc_of_bl, "just checking");
   1.571 +      bl(dest, relocInfo::none);
   1.572 +    } else {
   1.573 +      // do the jump
   1.574 +      assert(pc() == pc_of_b, "just checking");
   1.575 +      b(dest, relocInfo::none);
   1.576 +
   1.577 +      // some padding
   1.578 +      nop();
   1.579 +      nop();
   1.580 +      nop();
   1.581 +      nop();
   1.582 +      nop();
   1.583 +      nop();
   1.584 +    }
   1.585 +
   1.586 +    // Assert that we can identify the emitted call/jump.
   1.587 +    assert(is_bxx64_patchable_variant2_at((address)start_pc, link),
   1.588 +           "can't identify emitted call");
   1.589 +  } else {
   1.590 +    // variant 1:
   1.591 +
   1.592 +    mr(R0, R11);  // spill R11 -> R0.
   1.593 +
   1.594 +    // Load the destination address into CTR,
   1.595 +    // calculate destination relative to global toc.
   1.596 +    calculate_address_from_global_toc(R11, dest, true, true, false);
   1.597 +
   1.598 +    mtctr(R11);
   1.599 +    mr(R11, R0);  // spill R11 <- R0.
   1.600 +    nop();
   1.601 +
   1.602 +    // do the call/jump
   1.603 +    if (link) {
   1.604 +      bctrl();
   1.605 +    } else{
   1.606 +      bctr();
   1.607 +    }
   1.608 +    // Assert that we can identify the emitted call/jump.
   1.609 +    assert(is_bxx64_patchable_variant1b_at((address)start_pc, link),
   1.610 +           "can't identify emitted call");
   1.611 +  }
   1.612 +
   1.613 +  // Assert that we can identify the emitted call/jump.
   1.614 +  assert(is_bxx64_patchable_at((address)start_pc, link),
   1.615 +         "can't identify emitted call");
   1.616 +  assert(get_dest_of_bxx64_patchable_at((address)start_pc, link) == dest,
   1.617 +         "wrong encoding of dest address");
   1.618 +}
   1.619 +
   1.620 +// Identify a bxx64_patchable instruction.
   1.621 +bool MacroAssembler::is_bxx64_patchable_at(address instruction_addr, bool link) {
   1.622 +  return is_bxx64_patchable_variant1b_at(instruction_addr, link)
   1.623 +    //|| is_bxx64_patchable_variant1_at(instruction_addr, link)
   1.624 +      || is_bxx64_patchable_variant2_at(instruction_addr, link);
   1.625 +}
   1.626 +
   1.627 +// Does the call64_patchable instruction use a pc-relative encoding of
   1.628 +// the call destination?
   1.629 +bool MacroAssembler::is_bxx64_patchable_pcrelative_at(address instruction_addr, bool link) {
   1.630 +  // variant 2 is pc-relative
   1.631 +  return is_bxx64_patchable_variant2_at(instruction_addr, link);
   1.632 +}
   1.633 +
   1.634 +// Identify variant 1.
   1.635 +bool MacroAssembler::is_bxx64_patchable_variant1_at(address instruction_addr, bool link) {
   1.636 +  unsigned int* instr = (unsigned int*) instruction_addr;
   1.637 +  return (link ? is_bctrl(instr[6]) : is_bctr(instr[6])) // bctr[l]
   1.638 +      && is_mtctr(instr[5]) // mtctr
   1.639 +    && is_load_const_at(instruction_addr);
   1.640 +}
   1.641 +
   1.642 +// Identify variant 1b: load destination relative to global toc.
   1.643 +bool MacroAssembler::is_bxx64_patchable_variant1b_at(address instruction_addr, bool link) {
   1.644 +  unsigned int* instr = (unsigned int*) instruction_addr;
   1.645 +  return (link ? is_bctrl(instr[6]) : is_bctr(instr[6])) // bctr[l]
   1.646 +    && is_mtctr(instr[3]) // mtctr
   1.647 +    && is_calculate_address_from_global_toc_at(instruction_addr + 2*BytesPerInstWord, instruction_addr);
   1.648 +}
   1.649 +
   1.650 +// Identify variant 2.
   1.651 +bool MacroAssembler::is_bxx64_patchable_variant2_at(address instruction_addr, bool link) {
   1.652 +  unsigned int* instr = (unsigned int*) instruction_addr;
   1.653 +  if (link) {
   1.654 +    return is_bl (instr[6])  // bl dest is last
   1.655 +      && is_nop(instr[0])  // nop
   1.656 +      && is_nop(instr[1])  // nop
   1.657 +      && is_nop(instr[2])  // nop
   1.658 +      && is_nop(instr[3])  // nop
   1.659 +      && is_nop(instr[4])  // nop
   1.660 +      && is_nop(instr[5]); // nop
   1.661 +  } else {
   1.662 +    return is_b  (instr[0])  // b  dest is first
   1.663 +      && is_nop(instr[1])  // nop
   1.664 +      && is_nop(instr[2])  // nop
   1.665 +      && is_nop(instr[3])  // nop
   1.666 +      && is_nop(instr[4])  // nop
   1.667 +      && is_nop(instr[5])  // nop
   1.668 +      && is_nop(instr[6]); // nop
   1.669 +  }
   1.670 +}
   1.671 +
   1.672 +// Set dest address of a bxx64_patchable instruction.
   1.673 +void MacroAssembler::set_dest_of_bxx64_patchable_at(address instruction_addr, address dest, bool link) {
   1.674 +  ResourceMark rm;
   1.675 +  int code_size = MacroAssembler::bxx64_patchable_size;
   1.676 +  CodeBuffer buf(instruction_addr, code_size);
   1.677 +  MacroAssembler masm(&buf);
   1.678 +  masm.bxx64_patchable(dest, relocInfo::none, link);
   1.679 +  ICache::invalidate_range(instruction_addr, code_size);
   1.680 +}
   1.681 +
   1.682 +// Get dest address of a bxx64_patchable instruction.
   1.683 +address MacroAssembler::get_dest_of_bxx64_patchable_at(address instruction_addr, bool link) {
   1.684 +  if (is_bxx64_patchable_variant1_at(instruction_addr, link)) {
   1.685 +    return (address) (unsigned long) get_const(instruction_addr);
   1.686 +  } else if (is_bxx64_patchable_variant2_at(instruction_addr, link)) {
   1.687 +    unsigned int* instr = (unsigned int*) instruction_addr;
   1.688 +    if (link) {
   1.689 +      const int instr_idx = 6; // bl is last
   1.690 +      int branchoffset = branch_destination(instr[instr_idx], 0);
   1.691 +      return instruction_addr + branchoffset + instr_idx*BytesPerInstWord;
   1.692 +    } else {
   1.693 +      const int instr_idx = 0; // b is first
   1.694 +      int branchoffset = branch_destination(instr[instr_idx], 0);
   1.695 +      return instruction_addr + branchoffset + instr_idx*BytesPerInstWord;
   1.696 +    }
   1.697 +  // Load dest relative to global toc.
   1.698 +  } else if (is_bxx64_patchable_variant1b_at(instruction_addr, link)) {
   1.699 +    return get_address_of_calculate_address_from_global_toc_at(instruction_addr + 2*BytesPerInstWord,
   1.700 +                                                               instruction_addr);
   1.701 +  } else {
   1.702 +    ShouldNotReachHere();
   1.703 +    return NULL;
   1.704 +  }
   1.705 +}
   1.706 +
   1.707 +// Uses ordering which corresponds to ABI:
   1.708 +//    _savegpr0_14:  std  r14,-144(r1)
   1.709 +//    _savegpr0_15:  std  r15,-136(r1)
   1.710 +//    _savegpr0_16:  std  r16,-128(r1)
   1.711 +void MacroAssembler::save_nonvolatile_gprs(Register dst, int offset) {
   1.712 +  std(R14, offset, dst);   offset += 8;
   1.713 +  std(R15, offset, dst);   offset += 8;
   1.714 +  std(R16, offset, dst);   offset += 8;
   1.715 +  std(R17, offset, dst);   offset += 8;
   1.716 +  std(R18, offset, dst);   offset += 8;
   1.717 +  std(R19, offset, dst);   offset += 8;
   1.718 +  std(R20, offset, dst);   offset += 8;
   1.719 +  std(R21, offset, dst);   offset += 8;
   1.720 +  std(R22, offset, dst);   offset += 8;
   1.721 +  std(R23, offset, dst);   offset += 8;
   1.722 +  std(R24, offset, dst);   offset += 8;
   1.723 +  std(R25, offset, dst);   offset += 8;
   1.724 +  std(R26, offset, dst);   offset += 8;
   1.725 +  std(R27, offset, dst);   offset += 8;
   1.726 +  std(R28, offset, dst);   offset += 8;
   1.727 +  std(R29, offset, dst);   offset += 8;
   1.728 +  std(R30, offset, dst);   offset += 8;
   1.729 +  std(R31, offset, dst);   offset += 8;
   1.730 +
   1.731 +  stfd(F14, offset, dst);   offset += 8;
   1.732 +  stfd(F15, offset, dst);   offset += 8;
   1.733 +  stfd(F16, offset, dst);   offset += 8;
   1.734 +  stfd(F17, offset, dst);   offset += 8;
   1.735 +  stfd(F18, offset, dst);   offset += 8;
   1.736 +  stfd(F19, offset, dst);   offset += 8;
   1.737 +  stfd(F20, offset, dst);   offset += 8;
   1.738 +  stfd(F21, offset, dst);   offset += 8;
   1.739 +  stfd(F22, offset, dst);   offset += 8;
   1.740 +  stfd(F23, offset, dst);   offset += 8;
   1.741 +  stfd(F24, offset, dst);   offset += 8;
   1.742 +  stfd(F25, offset, dst);   offset += 8;
   1.743 +  stfd(F26, offset, dst);   offset += 8;
   1.744 +  stfd(F27, offset, dst);   offset += 8;
   1.745 +  stfd(F28, offset, dst);   offset += 8;
   1.746 +  stfd(F29, offset, dst);   offset += 8;
   1.747 +  stfd(F30, offset, dst);   offset += 8;
   1.748 +  stfd(F31, offset, dst);
   1.749 +}
   1.750 +
   1.751 +// Uses ordering which corresponds to ABI:
   1.752 +//    _restgpr0_14:  ld   r14,-144(r1)
   1.753 +//    _restgpr0_15:  ld   r15,-136(r1)
   1.754 +//    _restgpr0_16:  ld   r16,-128(r1)
   1.755 +void MacroAssembler::restore_nonvolatile_gprs(Register src, int offset) {
   1.756 +  ld(R14, offset, src);   offset += 8;
   1.757 +  ld(R15, offset, src);   offset += 8;
   1.758 +  ld(R16, offset, src);   offset += 8;
   1.759 +  ld(R17, offset, src);   offset += 8;
   1.760 +  ld(R18, offset, src);   offset += 8;
   1.761 +  ld(R19, offset, src);   offset += 8;
   1.762 +  ld(R20, offset, src);   offset += 8;
   1.763 +  ld(R21, offset, src);   offset += 8;
   1.764 +  ld(R22, offset, src);   offset += 8;
   1.765 +  ld(R23, offset, src);   offset += 8;
   1.766 +  ld(R24, offset, src);   offset += 8;
   1.767 +  ld(R25, offset, src);   offset += 8;
   1.768 +  ld(R26, offset, src);   offset += 8;
   1.769 +  ld(R27, offset, src);   offset += 8;
   1.770 +  ld(R28, offset, src);   offset += 8;
   1.771 +  ld(R29, offset, src);   offset += 8;
   1.772 +  ld(R30, offset, src);   offset += 8;
   1.773 +  ld(R31, offset, src);   offset += 8;
   1.774 +
   1.775 +  // FP registers
   1.776 +  lfd(F14, offset, src);   offset += 8;
   1.777 +  lfd(F15, offset, src);   offset += 8;
   1.778 +  lfd(F16, offset, src);   offset += 8;
   1.779 +  lfd(F17, offset, src);   offset += 8;
   1.780 +  lfd(F18, offset, src);   offset += 8;
   1.781 +  lfd(F19, offset, src);   offset += 8;
   1.782 +  lfd(F20, offset, src);   offset += 8;
   1.783 +  lfd(F21, offset, src);   offset += 8;
   1.784 +  lfd(F22, offset, src);   offset += 8;
   1.785 +  lfd(F23, offset, src);   offset += 8;
   1.786 +  lfd(F24, offset, src);   offset += 8;
   1.787 +  lfd(F25, offset, src);   offset += 8;
   1.788 +  lfd(F26, offset, src);   offset += 8;
   1.789 +  lfd(F27, offset, src);   offset += 8;
   1.790 +  lfd(F28, offset, src);   offset += 8;
   1.791 +  lfd(F29, offset, src);   offset += 8;
   1.792 +  lfd(F30, offset, src);   offset += 8;
   1.793 +  lfd(F31, offset, src);
   1.794 +}
   1.795 +
   1.796 +// For verify_oops.
   1.797 +void MacroAssembler::save_volatile_gprs(Register dst, int offset) {
   1.798 +  std(R3,  offset, dst);   offset += 8;
   1.799 +  std(R4,  offset, dst);   offset += 8;
   1.800 +  std(R5,  offset, dst);   offset += 8;
   1.801 +  std(R6,  offset, dst);   offset += 8;
   1.802 +  std(R7,  offset, dst);   offset += 8;
   1.803 +  std(R8,  offset, dst);   offset += 8;
   1.804 +  std(R9,  offset, dst);   offset += 8;
   1.805 +  std(R10, offset, dst);   offset += 8;
   1.806 +  std(R11, offset, dst);   offset += 8;
   1.807 +  std(R12, offset, dst);
   1.808 +}
   1.809 +
   1.810 +// For verify_oops.
   1.811 +void MacroAssembler::restore_volatile_gprs(Register src, int offset) {
   1.812 +  ld(R3,  offset, src);   offset += 8;
   1.813 +  ld(R4,  offset, src);   offset += 8;
   1.814 +  ld(R5,  offset, src);   offset += 8;
   1.815 +  ld(R6,  offset, src);   offset += 8;
   1.816 +  ld(R7,  offset, src);   offset += 8;
   1.817 +  ld(R8,  offset, src);   offset += 8;
   1.818 +  ld(R9,  offset, src);   offset += 8;
   1.819 +  ld(R10, offset, src);   offset += 8;
   1.820 +  ld(R11, offset, src);   offset += 8;
   1.821 +  ld(R12, offset, src);
   1.822 +}
   1.823 +
   1.824 +void MacroAssembler::save_LR_CR(Register tmp) {
   1.825 +  mfcr(tmp);
   1.826 +  std(tmp, _abi(cr), R1_SP);
   1.827 +  mflr(tmp);
   1.828 +  std(tmp, _abi(lr), R1_SP);
   1.829 +  // Tmp must contain lr on exit! (see return_addr and prolog in ppc64.ad)
   1.830 +}
   1.831 +
   1.832 +void MacroAssembler::restore_LR_CR(Register tmp) {
   1.833 +  assert(tmp != R1_SP, "must be distinct");
   1.834 +  ld(tmp, _abi(lr), R1_SP);
   1.835 +  mtlr(tmp);
   1.836 +  ld(tmp, _abi(cr), R1_SP);
   1.837 +  mtcr(tmp);
   1.838 +}
   1.839 +
   1.840 +address MacroAssembler::get_PC_trash_LR(Register result) {
   1.841 +  Label L;
   1.842 +  bl(L);
   1.843 +  bind(L);
   1.844 +  address lr_pc = pc();
   1.845 +  mflr(result);
   1.846 +  return lr_pc;
   1.847 +}
   1.848 +
   1.849 +void MacroAssembler::resize_frame(Register offset, Register tmp) {
   1.850 +#ifdef ASSERT
   1.851 +  assert_different_registers(offset, tmp, R1_SP);
   1.852 +  andi_(tmp, offset, frame::alignment_in_bytes-1);
   1.853 +  asm_assert_eq("resize_frame: unaligned", 0x204);
   1.854 +#endif
   1.855 +
   1.856 +  // tmp <- *(SP)
   1.857 +  ld(tmp, _abi(callers_sp), R1_SP);
   1.858 +  // addr <- SP + offset;
   1.859 +  // *(addr) <- tmp;
   1.860 +  // SP <- addr
   1.861 +  stdux(tmp, R1_SP, offset);
   1.862 +}
   1.863 +
   1.864 +void MacroAssembler::resize_frame(int offset, Register tmp) {
   1.865 +  assert(is_simm(offset, 16), "too big an offset");
   1.866 +  assert_different_registers(tmp, R1_SP);
   1.867 +  assert((offset & (frame::alignment_in_bytes-1))==0, "resize_frame: unaligned");
   1.868 +  // tmp <- *(SP)
   1.869 +  ld(tmp, _abi(callers_sp), R1_SP);
   1.870 +  // addr <- SP + offset;
   1.871 +  // *(addr) <- tmp;
   1.872 +  // SP <- addr
   1.873 +  stdu(tmp, offset, R1_SP);
   1.874 +}
   1.875 +
   1.876 +void MacroAssembler::resize_frame_absolute(Register addr, Register tmp1, Register tmp2) {
   1.877 +  // (addr == tmp1) || (addr == tmp2) is allowed here!
   1.878 +  assert(tmp1 != tmp2, "must be distinct");
   1.879 +
   1.880 +  // compute offset w.r.t. current stack pointer
   1.881 +  // tmp_1 <- addr - SP (!)
   1.882 +  subf(tmp1, R1_SP, addr);
   1.883 +
   1.884 +  // atomically update SP keeping back link.
   1.885 +  resize_frame(tmp1/* offset */, tmp2/* tmp */);
   1.886 +}
   1.887 +
   1.888 +void MacroAssembler::push_frame(Register bytes, Register tmp) {
   1.889 +#ifdef ASSERT
   1.890 +  assert(bytes != R0, "r0 not allowed here");
   1.891 +  andi_(R0, bytes, frame::alignment_in_bytes-1);
   1.892 +  asm_assert_eq("push_frame(Reg, Reg): unaligned", 0x203);
   1.893 +#endif
   1.894 +  neg(tmp, bytes);
   1.895 +  stdux(R1_SP, R1_SP, tmp);
   1.896 +}
   1.897 +
   1.898 +// Push a frame of size `bytes'.
   1.899 +void MacroAssembler::push_frame(unsigned int bytes, Register tmp) {
   1.900 +  long offset = align_addr(bytes, frame::alignment_in_bytes);
   1.901 +  if (is_simm(-offset, 16)) {
   1.902 +    stdu(R1_SP, -offset, R1_SP);
   1.903 +  } else {
   1.904 +    load_const(tmp, -offset);
   1.905 +    stdux(R1_SP, R1_SP, tmp);
   1.906 +  }
   1.907 +}
   1.908 +
   1.909 +// Push a frame of size `bytes' plus abi112 on top.
   1.910 +void MacroAssembler::push_frame_abi112(unsigned int bytes, Register tmp) {
   1.911 +  push_frame(bytes + frame::abi_112_size, tmp);
   1.912 +}
   1.913 +
   1.914 +// Setup up a new C frame with a spill area for non-volatile GPRs and
   1.915 +// additional space for local variables.
   1.916 +void MacroAssembler::push_frame_abi112_nonvolatiles(unsigned int bytes,
   1.917 +                                                    Register tmp) {
   1.918 +  push_frame(bytes + frame::abi_112_size + frame::spill_nonvolatiles_size, tmp);
   1.919 +}
   1.920 +
   1.921 +// Pop current C frame.
   1.922 +void MacroAssembler::pop_frame() {
   1.923 +  ld(R1_SP, _abi(callers_sp), R1_SP);
   1.924 +}
   1.925 +
   1.926 +// Generic version of a call to C function via a function descriptor
   1.927 +// with variable support for C calling conventions (TOC, ENV, etc.).
   1.928 +// Updates and returns _last_calls_return_pc.
   1.929 +address MacroAssembler::branch_to(Register function_descriptor, bool and_link, bool save_toc_before_call,
   1.930 +                                  bool restore_toc_after_call, bool load_toc_of_callee, bool load_env_of_callee) {
   1.931 +  // we emit standard ptrgl glue code here
   1.932 +  assert((function_descriptor != R0), "function_descriptor cannot be R0");
   1.933 +
   1.934 +  // retrieve necessary entries from the function descriptor
   1.935 +  ld(R0, in_bytes(FunctionDescriptor::entry_offset()), function_descriptor);
   1.936 +  mtctr(R0);
   1.937 +
   1.938 +  if (load_toc_of_callee) {
   1.939 +    ld(R2_TOC, in_bytes(FunctionDescriptor::toc_offset()), function_descriptor);
   1.940 +  }
   1.941 +  if (load_env_of_callee) {
   1.942 +    ld(R11, in_bytes(FunctionDescriptor::env_offset()), function_descriptor);
   1.943 +  } else if (load_toc_of_callee) {
   1.944 +    li(R11, 0);
   1.945 +  }
   1.946 +
   1.947 +  // do a call or a branch
   1.948 +  if (and_link) {
   1.949 +    bctrl();
   1.950 +  } else {
   1.951 +    bctr();
   1.952 +  }
   1.953 +  _last_calls_return_pc = pc();
   1.954 +
   1.955 +  return _last_calls_return_pc;
   1.956 +}
   1.957 +
   1.958 +// Call a C function via a function descriptor and use full C calling
   1.959 +// conventions.
   1.960 +// We don't use the TOC in generated code, so there is no need to save
   1.961 +// and restore its value.
   1.962 +address MacroAssembler::call_c(Register fd) {
   1.963 +  return branch_to(fd, /*and_link=*/true,
   1.964 +                       /*save toc=*/false,
   1.965 +                       /*restore toc=*/false,
   1.966 +                       /*load toc=*/true,
   1.967 +                       /*load env=*/true);
   1.968 +}
   1.969 +
   1.970 +address MacroAssembler::call_c(const FunctionDescriptor* fd, relocInfo::relocType rt) {
   1.971 +  if (rt != relocInfo::none) {
   1.972 +    // this call needs to be relocatable
   1.973 +    if (!ReoptimizeCallSequences
   1.974 +        || (rt != relocInfo::runtime_call_type && rt != relocInfo::none)
   1.975 +        || fd == NULL   // support code-size estimation
   1.976 +        || !fd->is_friend_function()
   1.977 +        || fd->entry() == NULL) {
   1.978 +      // it's not a friend function as defined by class FunctionDescriptor,
   1.979 +      // so do a full call-c here.
   1.980 +      load_const(R11, (address)fd, R0);
   1.981 +
   1.982 +      bool has_env = (fd != NULL && fd->env() != NULL);
   1.983 +      return branch_to(R11, /*and_link=*/true,
   1.984 +                                /*save toc=*/false,
   1.985 +                                /*restore toc=*/false,
   1.986 +                                /*load toc=*/true,
   1.987 +                                /*load env=*/has_env);
   1.988 +    } else {
   1.989 +      // It's a friend function. Load the entry point and don't care about
   1.990 +      // toc and env. Use an optimizable call instruction, but ensure the
   1.991 +      // same code-size as in the case of a non-friend function.
   1.992 +      nop();
   1.993 +      nop();
   1.994 +      nop();
   1.995 +      bl64_patchable(fd->entry(), rt);
   1.996 +      _last_calls_return_pc = pc();
   1.997 +      return _last_calls_return_pc;
   1.998 +    }
   1.999 +  } else {
  1.1000 +    // This call does not need to be relocatable, do more aggressive
  1.1001 +    // optimizations.
  1.1002 +    if (!ReoptimizeCallSequences
  1.1003 +      || !fd->is_friend_function()) {
  1.1004 +      // It's not a friend function as defined by class FunctionDescriptor,
  1.1005 +      // so do a full call-c here.
  1.1006 +      load_const(R11, (address)fd, R0);
  1.1007 +      return branch_to(R11, /*and_link=*/true,
  1.1008 +                                /*save toc=*/false,
  1.1009 +                                /*restore toc=*/false,
  1.1010 +                                /*load toc=*/true,
  1.1011 +                                /*load env=*/true);
  1.1012 +    } else {
  1.1013 +      // it's a friend function, load the entry point and don't care about
  1.1014 +      // toc and env.
  1.1015 +      address dest = fd->entry();
  1.1016 +      if (is_within_range_of_b(dest, pc())) {
  1.1017 +        bl(dest);
  1.1018 +      } else {
  1.1019 +        bl64_patchable(dest, rt);
  1.1020 +      }
  1.1021 +      _last_calls_return_pc = pc();
  1.1022 +      return _last_calls_return_pc;
  1.1023 +    }
  1.1024 +  }
  1.1025 +}
  1.1026 +
  1.1027 +// Call a C function.  All constants needed reside in TOC.
  1.1028 +//
  1.1029 +// Read the address to call from the TOC.
  1.1030 +// Read env from TOC, if fd specifies an env.
  1.1031 +// Read new TOC from TOC.
  1.1032 +address MacroAssembler::call_c_using_toc(const FunctionDescriptor* fd,
  1.1033 +                                         relocInfo::relocType rt, Register toc) {
  1.1034 +  if (!ReoptimizeCallSequences
  1.1035 +    || (rt != relocInfo::runtime_call_type && rt != relocInfo::none)
  1.1036 +    || !fd->is_friend_function()) {
  1.1037 +    // It's not a friend function as defined by class FunctionDescriptor,
  1.1038 +    // so do a full call-c here.
  1.1039 +    assert(fd->entry() != NULL, "function must be linked");
  1.1040 +
  1.1041 +    AddressLiteral fd_entry(fd->entry());
  1.1042 +    load_const_from_method_toc(R11, fd_entry, toc);
  1.1043 +    mtctr(R11);
  1.1044 +    if (fd->env() == NULL) {
  1.1045 +      li(R11, 0);
  1.1046 +      nop();
  1.1047 +    } else {
  1.1048 +      AddressLiteral fd_env(fd->env());
  1.1049 +      load_const_from_method_toc(R11, fd_env, toc);
  1.1050 +    }
  1.1051 +    AddressLiteral fd_toc(fd->toc());
  1.1052 +    load_toc_from_toc(R2_TOC, fd_toc, toc);
  1.1053 +    // R2_TOC is killed.
  1.1054 +    bctrl();
  1.1055 +    _last_calls_return_pc = pc();
  1.1056 +  } else {
  1.1057 +    // It's a friend function, load the entry point and don't care about
  1.1058 +    // toc and env. Use an optimizable call instruction, but ensure the
  1.1059 +    // same code-size as in the case of a non-friend function.
  1.1060 +    nop();
  1.1061 +    bl64_patchable(fd->entry(), rt);
  1.1062 +    _last_calls_return_pc = pc();
  1.1063 +  }
  1.1064 +  return _last_calls_return_pc;
  1.1065 +}
  1.1066 +
  1.1067 +void MacroAssembler::call_VM_base(Register oop_result,
  1.1068 +                                  Register last_java_sp,
  1.1069 +                                  address  entry_point,
  1.1070 +                                  bool     check_exceptions) {
  1.1071 +  BLOCK_COMMENT("call_VM {");
  1.1072 +  // Determine last_java_sp register.
  1.1073 +  if (!last_java_sp->is_valid()) {
  1.1074 +    last_java_sp = R1_SP;
  1.1075 +  }
  1.1076 +  set_top_ijava_frame_at_SP_as_last_Java_frame(last_java_sp, R11_scratch1);
  1.1077 +
  1.1078 +  // ARG1 must hold thread address.
  1.1079 +  mr(R3_ARG1, R16_thread);
  1.1080 +
  1.1081 +  address return_pc = call_c((FunctionDescriptor*)entry_point, relocInfo::none);
  1.1082 +
  1.1083 +  reset_last_Java_frame();
  1.1084 +
  1.1085 +  // Check for pending exceptions.
  1.1086 +  if (check_exceptions) {
  1.1087 +    // We don't check for exceptions here.
  1.1088 +    ShouldNotReachHere();
  1.1089 +  }
  1.1090 +
  1.1091 +  // Get oop result if there is one and reset the value in the thread.
  1.1092 +  if (oop_result->is_valid()) {
  1.1093 +    get_vm_result(oop_result);
  1.1094 +  }
  1.1095 +
  1.1096 +  _last_calls_return_pc = return_pc;
  1.1097 +  BLOCK_COMMENT("} call_VM");
  1.1098 +}
  1.1099 +
  1.1100 +void MacroAssembler::call_VM_leaf_base(address entry_point) {
  1.1101 +  BLOCK_COMMENT("call_VM_leaf {");
  1.1102 +  call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, entry_point), relocInfo::none);
  1.1103 +  BLOCK_COMMENT("} call_VM_leaf");
  1.1104 +}
  1.1105 +
  1.1106 +void MacroAssembler::call_VM(Register oop_result, address entry_point, bool check_exceptions) {
  1.1107 +  call_VM_base(oop_result, noreg, entry_point, check_exceptions);
  1.1108 +}
  1.1109 +
  1.1110 +void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1,
  1.1111 +                             bool check_exceptions) {
  1.1112 +  // R3_ARG1 is reserved for the thread.
  1.1113 +  mr_if_needed(R4_ARG2, arg_1);
  1.1114 +  call_VM(oop_result, entry_point, check_exceptions);
  1.1115 +}
  1.1116 +
  1.1117 +void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2,
  1.1118 +                             bool check_exceptions) {
  1.1119 +  // R3_ARG1 is reserved for the thread
  1.1120 +  mr_if_needed(R4_ARG2, arg_1);
  1.1121 +  assert(arg_2 != R4_ARG2, "smashed argument");
  1.1122 +  mr_if_needed(R5_ARG3, arg_2);
  1.1123 +  call_VM(oop_result, entry_point, check_exceptions);
  1.1124 +}
  1.1125 +
  1.1126 +void MacroAssembler::call_VM_leaf(address entry_point) {
  1.1127 +  call_VM_leaf_base(entry_point);
  1.1128 +}
  1.1129 +
  1.1130 +void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1) {
  1.1131 +  mr_if_needed(R3_ARG1, arg_1);
  1.1132 +  call_VM_leaf(entry_point);
  1.1133 +}
  1.1134 +
  1.1135 +void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1, Register arg_2) {
  1.1136 +  mr_if_needed(R3_ARG1, arg_1);
  1.1137 +  assert(arg_2 != R3_ARG1, "smashed argument");
  1.1138 +  mr_if_needed(R4_ARG2, arg_2);
  1.1139 +  call_VM_leaf(entry_point);
  1.1140 +}
  1.1141 +
  1.1142 +void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3) {
  1.1143 +  mr_if_needed(R3_ARG1, arg_1);
  1.1144 +  assert(arg_2 != R3_ARG1, "smashed argument");
  1.1145 +  mr_if_needed(R4_ARG2, arg_2);
  1.1146 +  assert(arg_3 != R3_ARG1 && arg_3 != R4_ARG2, "smashed argument");
  1.1147 +  mr_if_needed(R5_ARG3, arg_3);
  1.1148 +  call_VM_leaf(entry_point);
  1.1149 +}
  1.1150 +
  1.1151 +// Check whether instruction is a read access to the polling page
  1.1152 +// which was emitted by load_from_polling_page(..).
  1.1153 +bool MacroAssembler::is_load_from_polling_page(int instruction, void* ucontext,
  1.1154 +                                               address* polling_address_ptr) {
  1.1155 +  if (!is_ld(instruction))
  1.1156 +    return false; // It's not a ld. Fail.
  1.1157 +
  1.1158 +  int rt = inv_rt_field(instruction);
  1.1159 +  int ra = inv_ra_field(instruction);
  1.1160 +  int ds = inv_ds_field(instruction);
  1.1161 +  if (!(ds == 0 && ra != 0 && rt == 0)) {
  1.1162 +    return false; // It's not a ld(r0, X, ra). Fail.
  1.1163 +  }
  1.1164 +
  1.1165 +  if (!ucontext) {
  1.1166 +    // Set polling address.
  1.1167 +    if (polling_address_ptr != NULL) {
  1.1168 +      *polling_address_ptr = NULL;
  1.1169 +    }
  1.1170 +    return true; // No ucontext given. Can't check value of ra. Assume true.
  1.1171 +  }
  1.1172 +
  1.1173 +#ifdef LINUX
  1.1174 +  // Ucontext given. Check that register ra contains the address of
  1.1175 +  // the safepoing polling page.
  1.1176 +  ucontext_t* uc = (ucontext_t*) ucontext;
  1.1177 +  // Set polling address.
  1.1178 +  address addr = (address)uc->uc_mcontext.regs->gpr[ra] + (ssize_t)ds;
  1.1179 +  if (polling_address_ptr != NULL) {
  1.1180 +    *polling_address_ptr = addr;
  1.1181 +  }
  1.1182 +  return os::is_poll_address(addr);
  1.1183 +#else
  1.1184 +  // Not on Linux, ucontext must be NULL.
  1.1185 +  ShouldNotReachHere();
  1.1186 +  return false;
  1.1187 +#endif
  1.1188 +}
  1.1189 +
  1.1190 +bool MacroAssembler::is_memory_serialization(int instruction, JavaThread* thread, void* ucontext) {
  1.1191 +#ifdef LINUX
  1.1192 +  ucontext_t* uc = (ucontext_t*) ucontext;
  1.1193 +
  1.1194 +  if (is_stwx(instruction) || is_stwux(instruction)) {
  1.1195 +    int ra = inv_ra_field(instruction);
  1.1196 +    int rb = inv_rb_field(instruction);
  1.1197 +
  1.1198 +    // look up content of ra and rb in ucontext
  1.1199 +    address ra_val=(address)uc->uc_mcontext.regs->gpr[ra];
  1.1200 +    long rb_val=(long)uc->uc_mcontext.regs->gpr[rb];
  1.1201 +    return os::is_memory_serialize_page(thread, ra_val+rb_val);
  1.1202 +  } else if (is_stw(instruction) || is_stwu(instruction)) {
  1.1203 +    int ra = inv_ra_field(instruction);
  1.1204 +    int d1 = inv_d1_field(instruction);
  1.1205 +
  1.1206 +    // look up content of ra in ucontext
  1.1207 +    address ra_val=(address)uc->uc_mcontext.regs->gpr[ra];
  1.1208 +    return os::is_memory_serialize_page(thread, ra_val+d1);
  1.1209 +  } else {
  1.1210 +    return false;
  1.1211 +  }
  1.1212 +#else
  1.1213 +  // workaround not needed on !LINUX :-)
  1.1214 +  ShouldNotCallThis();
  1.1215 +  return false;
  1.1216 +#endif
  1.1217 +}
  1.1218 +
  1.1219 +void MacroAssembler::bang_stack_with_offset(int offset) {
  1.1220 +  // When increasing the stack, the old stack pointer will be written
  1.1221 +  // to the new top of stack according to the PPC64 abi.
  1.1222 +  // Therefore, stack banging is not necessary when increasing
  1.1223 +  // the stack by <= os::vm_page_size() bytes.
  1.1224 +  // When increasing the stack by a larger amount, this method is
  1.1225 +  // called repeatedly to bang the intermediate pages.
  1.1226 +
  1.1227 +  // Stack grows down, caller passes positive offset.
  1.1228 +  assert(offset > 0, "must bang with positive offset");
  1.1229 +
  1.1230 +  long stdoffset = -offset;
  1.1231 +
  1.1232 +  if (is_simm(stdoffset, 16)) {
  1.1233 +    // Signed 16 bit offset, a simple std is ok.
  1.1234 +    if (UseLoadInstructionsForStackBangingPPC64) {
  1.1235 +      ld(R0, (int)(signed short)stdoffset, R1_SP);
  1.1236 +    } else {
  1.1237 +      std(R0,(int)(signed short)stdoffset, R1_SP);
  1.1238 +    }
  1.1239 +  } else if (is_simm(stdoffset, 31)) {
  1.1240 +    const int hi = MacroAssembler::largeoffset_si16_si16_hi(stdoffset);
  1.1241 +    const int lo = MacroAssembler::largeoffset_si16_si16_lo(stdoffset);
  1.1242 +
  1.1243 +    Register tmp = R11;
  1.1244 +    addis(tmp, R1_SP, hi);
  1.1245 +    if (UseLoadInstructionsForStackBangingPPC64) {
  1.1246 +      ld(R0,  lo, tmp);
  1.1247 +    } else {
  1.1248 +      std(R0, lo, tmp);
  1.1249 +    }
  1.1250 +  } else {
  1.1251 +    ShouldNotReachHere();
  1.1252 +  }
  1.1253 +}
  1.1254 +
  1.1255 +// If instruction is a stack bang of the form
  1.1256 +//    std    R0,    x(Ry),       (see bang_stack_with_offset())
  1.1257 +//    stdu   R1_SP, x(R1_SP),    (see push_frame(), resize_frame())
  1.1258 +// or stdux  R1_SP, Rx, R1_SP    (see push_frame(), resize_frame())
  1.1259 +// return the banged address. Otherwise, return 0.
  1.1260 +address MacroAssembler::get_stack_bang_address(int instruction, void *ucontext) {
  1.1261 +#ifdef LINUX
  1.1262 +  ucontext_t* uc = (ucontext_t*) ucontext;
  1.1263 +  int rs = inv_rs_field(instruction);
  1.1264 +  int ra = inv_ra_field(instruction);
  1.1265 +  if (   (is_ld(instruction)   && rs == 0 &&  UseLoadInstructionsForStackBangingPPC64)
  1.1266 +      || (is_std(instruction)  && rs == 0 && !UseLoadInstructionsForStackBangingPPC64)
  1.1267 +      || (is_stdu(instruction) && rs == 1)) {
  1.1268 +    int ds = inv_ds_field(instruction);
  1.1269 +    // return banged address
  1.1270 +    return ds+(address)uc->uc_mcontext.regs->gpr[ra];
  1.1271 +  } else if (is_stdux(instruction) && rs == 1) {
  1.1272 +    int rb = inv_rb_field(instruction);
  1.1273 +    address sp = (address)uc->uc_mcontext.regs->gpr[1];
  1.1274 +    long rb_val = (long)uc->uc_mcontext.regs->gpr[rb];
  1.1275 +    return ra != 1 || rb_val >= 0 ? NULL         // not a stack bang
  1.1276 +                                  : sp + rb_val; // banged address
  1.1277 +  }
  1.1278 +  return NULL; // not a stack bang
  1.1279 +#else
  1.1280 +  // workaround not needed on !LINUX :-)
  1.1281 +  ShouldNotCallThis();
  1.1282 +  return NULL;
  1.1283 +#endif
  1.1284 +}
  1.1285 +
  1.1286 +// CmpxchgX sets condition register to cmpX(current, compare).
  1.1287 +void MacroAssembler::cmpxchgw(ConditionRegister flag, Register dest_current_value,
  1.1288 +                              Register compare_value, Register exchange_value,
  1.1289 +                              Register addr_base, int semantics, bool cmpxchgx_hint,
  1.1290 +                              Register int_flag_success, bool contention_hint) {
  1.1291 +  Label retry;
  1.1292 +  Label failed;
  1.1293 +  Label done;
  1.1294 +
  1.1295 +  // Save one branch if result is returned via register and
  1.1296 +  // result register is different from the other ones.
  1.1297 +  bool use_result_reg    = (int_flag_success != noreg);
  1.1298 +  bool preset_result_reg = (int_flag_success != dest_current_value && int_flag_success != compare_value &&
  1.1299 +                            int_flag_success != exchange_value && int_flag_success != addr_base);
  1.1300 +
  1.1301 +  // release/fence semantics
  1.1302 +  if (semantics & MemBarRel) {
  1.1303 +    release();
  1.1304 +  }
  1.1305 +
  1.1306 +  if (use_result_reg && preset_result_reg) {
  1.1307 +    li(int_flag_success, 0); // preset (assume cas failed)
  1.1308 +  }
  1.1309 +
  1.1310 +  // Add simple guard in order to reduce risk of starving under high contention (recommended by IBM).
  1.1311 +  if (contention_hint) { // Don't try to reserve if cmp fails.
  1.1312 +    lwz(dest_current_value, 0, addr_base);
  1.1313 +    cmpw(flag, dest_current_value, compare_value);
  1.1314 +    bne(flag, failed);
  1.1315 +  }
  1.1316 +
  1.1317 +  // atomic emulation loop
  1.1318 +  bind(retry);
  1.1319 +
  1.1320 +  lwarx(dest_current_value, addr_base, cmpxchgx_hint);
  1.1321 +  cmpw(flag, dest_current_value, compare_value);
  1.1322 +  if (UseStaticBranchPredictionInCompareAndSwapPPC64) {
  1.1323 +    bne_predict_not_taken(flag, failed);
  1.1324 +  } else {
  1.1325 +    bne(                  flag, failed);
  1.1326 +  }
  1.1327 +  // branch to done  => (flag == ne), (dest_current_value != compare_value)
  1.1328 +  // fall through    => (flag == eq), (dest_current_value == compare_value)
  1.1329 +
  1.1330 +  stwcx_(exchange_value, addr_base);
  1.1331 +  if (UseStaticBranchPredictionInCompareAndSwapPPC64) {
  1.1332 +    bne_predict_not_taken(CCR0, retry); // StXcx_ sets CCR0.
  1.1333 +  } else {
  1.1334 +    bne(                  CCR0, retry); // StXcx_ sets CCR0.
  1.1335 +  }
  1.1336 +  // fall through    => (flag == eq), (dest_current_value == compare_value), (swapped)
  1.1337 +
  1.1338 +  // Result in register (must do this at the end because int_flag_success can be the
  1.1339 +  // same register as one above).
  1.1340 +  if (use_result_reg) {
  1.1341 +    li(int_flag_success, 1);
  1.1342 +  }
  1.1343 +
  1.1344 +  if (semantics & MemBarFenceAfter) {
  1.1345 +    fence();
  1.1346 +  } else if (semantics & MemBarAcq) {
  1.1347 +    isync();
  1.1348 +  }
  1.1349 +
  1.1350 +  if (use_result_reg && !preset_result_reg) {
  1.1351 +    b(done);
  1.1352 +  }
  1.1353 +
  1.1354 +  bind(failed);
  1.1355 +  if (use_result_reg && !preset_result_reg) {
  1.1356 +    li(int_flag_success, 0);
  1.1357 +  }
  1.1358 +
  1.1359 +  bind(done);
  1.1360 +  // (flag == ne) => (dest_current_value != compare_value), (!swapped)
  1.1361 +  // (flag == eq) => (dest_current_value == compare_value), ( swapped)
  1.1362 +}
  1.1363 +
  1.1364 +// Preforms atomic compare exchange:
  1.1365 +//   if (compare_value == *addr_base)
  1.1366 +//     *addr_base = exchange_value
  1.1367 +//     int_flag_success = 1;
  1.1368 +//   else
  1.1369 +//     int_flag_success = 0;
  1.1370 +//
  1.1371 +// ConditionRegister flag       = cmp(compare_value, *addr_base)
  1.1372 +// Register dest_current_value  = *addr_base
  1.1373 +// Register compare_value       Used to compare with value in memory
  1.1374 +// Register exchange_value      Written to memory if compare_value == *addr_base
  1.1375 +// Register addr_base           The memory location to compareXChange
  1.1376 +// Register int_flag_success    Set to 1 if exchange_value was written to *addr_base
  1.1377 +//
  1.1378 +// To avoid the costly compare exchange the value is tested beforehand.
  1.1379 +// Several special cases exist to avoid that unnecessary information is generated.
  1.1380 +//
  1.1381 +void MacroAssembler::cmpxchgd(ConditionRegister flag,
  1.1382 +                              Register dest_current_value, Register compare_value, Register exchange_value,
  1.1383 +                              Register addr_base, int semantics, bool cmpxchgx_hint,
  1.1384 +                              Register int_flag_success, Label* failed_ext, bool contention_hint) {
  1.1385 +  Label retry;
  1.1386 +  Label failed_int;
  1.1387 +  Label& failed = (failed_ext != NULL) ? *failed_ext : failed_int;
  1.1388 +  Label done;
  1.1389 +
  1.1390 +  // Save one branch if result is returned via register and result register is different from the other ones.
  1.1391 +  bool use_result_reg    = (int_flag_success!=noreg);
  1.1392 +  bool preset_result_reg = (int_flag_success!=dest_current_value && int_flag_success!=compare_value &&
  1.1393 +                            int_flag_success!=exchange_value && int_flag_success!=addr_base);
  1.1394 +  assert(int_flag_success == noreg || failed_ext == NULL, "cannot have both");
  1.1395 +
  1.1396 +  // release/fence semantics
  1.1397 +  if (semantics & MemBarRel) {
  1.1398 +    release();
  1.1399 +  }
  1.1400 +
  1.1401 +  if (use_result_reg && preset_result_reg) {
  1.1402 +    li(int_flag_success, 0); // preset (assume cas failed)
  1.1403 +  }
  1.1404 +
  1.1405 +  // Add simple guard in order to reduce risk of starving under high contention (recommended by IBM).
  1.1406 +  if (contention_hint) { // Don't try to reserve if cmp fails.
  1.1407 +    ld(dest_current_value, 0, addr_base);
  1.1408 +    cmpd(flag, dest_current_value, compare_value);
  1.1409 +    bne(flag, failed);
  1.1410 +  }
  1.1411 +
  1.1412 +  // atomic emulation loop
  1.1413 +  bind(retry);
  1.1414 +
  1.1415 +  ldarx(dest_current_value, addr_base, cmpxchgx_hint);
  1.1416 +  cmpd(flag, dest_current_value, compare_value);
  1.1417 +  if (UseStaticBranchPredictionInCompareAndSwapPPC64) {
  1.1418 +    bne_predict_not_taken(flag, failed);
  1.1419 +  } else {
  1.1420 +    bne(                  flag, failed);
  1.1421 +  }
  1.1422 +
  1.1423 +  stdcx_(exchange_value, addr_base);
  1.1424 +  if (UseStaticBranchPredictionInCompareAndSwapPPC64) {
  1.1425 +    bne_predict_not_taken(CCR0, retry); // stXcx_ sets CCR0
  1.1426 +  } else {
  1.1427 +    bne(                  CCR0, retry); // stXcx_ sets CCR0
  1.1428 +  }
  1.1429 +
  1.1430 +  // result in register (must do this at the end because int_flag_success can be the same register as one above)
  1.1431 +  if (use_result_reg) {
  1.1432 +    li(int_flag_success, 1);
  1.1433 +  }
  1.1434 +
  1.1435 +  // POWER6 doesn't need isync in CAS.
  1.1436 +  // Always emit isync to be on the safe side.
  1.1437 +  if (semantics & MemBarFenceAfter) {
  1.1438 +    fence();
  1.1439 +  } else if (semantics & MemBarAcq) {
  1.1440 +    isync();
  1.1441 +  }
  1.1442 +
  1.1443 +  if (use_result_reg && !preset_result_reg) {
  1.1444 +    b(done);
  1.1445 +  }
  1.1446 +
  1.1447 +  bind(failed_int);
  1.1448 +  if (use_result_reg && !preset_result_reg) {
  1.1449 +    li(int_flag_success, 0);
  1.1450 +  }
  1.1451 +
  1.1452 +  bind(done);
  1.1453 +  // (flag == ne) => (dest_current_value != compare_value), (!swapped)
  1.1454 +  // (flag == eq) => (dest_current_value == compare_value), ( swapped)
  1.1455 +}
  1.1456 +
  1.1457 +// Look up the method for a megamorphic invokeinterface call.
  1.1458 +// The target method is determined by <intf_klass, itable_index>.
  1.1459 +// The receiver klass is in recv_klass.
  1.1460 +// On success, the result will be in method_result, and execution falls through.
  1.1461 +// On failure, execution transfers to the given label.
  1.1462 +void MacroAssembler::lookup_interface_method(Register recv_klass,
  1.1463 +                                             Register intf_klass,
  1.1464 +                                             RegisterOrConstant itable_index,
  1.1465 +                                             Register method_result,
  1.1466 +                                             Register scan_temp,
  1.1467 +                                             Register sethi_temp,
  1.1468 +                                             Label& L_no_such_interface) {
  1.1469 +  assert_different_registers(recv_klass, intf_klass, method_result, scan_temp);
  1.1470 +  assert(itable_index.is_constant() || itable_index.as_register() == method_result,
  1.1471 +         "caller must use same register for non-constant itable index as for method");
  1.1472 +
  1.1473 +  // Compute start of first itableOffsetEntry (which is at the end of the vtable).
  1.1474 +  int vtable_base = InstanceKlass::vtable_start_offset() * wordSize;
  1.1475 +  int itentry_off = itableMethodEntry::method_offset_in_bytes();
  1.1476 +  int logMEsize   = exact_log2(itableMethodEntry::size() * wordSize);
  1.1477 +  int scan_step   = itableOffsetEntry::size() * wordSize;
  1.1478 +  int log_vte_size= exact_log2(vtableEntry::size() * wordSize);
  1.1479 +
  1.1480 +  lwz(scan_temp, InstanceKlass::vtable_length_offset() * wordSize, recv_klass);
  1.1481 +  // %%% We should store the aligned, prescaled offset in the klassoop.
  1.1482 +  // Then the next several instructions would fold away.
  1.1483 +
  1.1484 +  sldi(scan_temp, scan_temp, log_vte_size);
  1.1485 +  addi(scan_temp, scan_temp, vtable_base);
  1.1486 +  add(scan_temp, recv_klass, scan_temp);
  1.1487 +
  1.1488 +  // Adjust recv_klass by scaled itable_index, so we can free itable_index.
  1.1489 +  if (itable_index.is_register()) {
  1.1490 +    Register itable_offset = itable_index.as_register();
  1.1491 +    sldi(itable_offset, itable_offset, logMEsize);
  1.1492 +    if (itentry_off) addi(itable_offset, itable_offset, itentry_off);
  1.1493 +    add(recv_klass, itable_offset, recv_klass);
  1.1494 +  } else {
  1.1495 +    long itable_offset = (long)itable_index.as_constant();
  1.1496 +    load_const_optimized(sethi_temp, (itable_offset<<logMEsize)+itentry_off); // static address, no relocation
  1.1497 +    add(recv_klass, sethi_temp, recv_klass);
  1.1498 +  }
  1.1499 +
  1.1500 +  // for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) {
  1.1501 +  //   if (scan->interface() == intf) {
  1.1502 +  //     result = (klass + scan->offset() + itable_index);
  1.1503 +  //   }
  1.1504 +  // }
  1.1505 +  Label search, found_method;
  1.1506 +
  1.1507 +  for (int peel = 1; peel >= 0; peel--) {
  1.1508 +    // %%%% Could load both offset and interface in one ldx, if they were
  1.1509 +    // in the opposite order. This would save a load.
  1.1510 +    ld(method_result, itableOffsetEntry::interface_offset_in_bytes(), scan_temp);
  1.1511 +
  1.1512 +    // Check that this entry is non-null. A null entry means that
  1.1513 +    // the receiver class doesn't implement the interface, and wasn't the
  1.1514 +    // same as when the caller was compiled.
  1.1515 +    cmpd(CCR0, method_result, intf_klass);
  1.1516 +
  1.1517 +    if (peel) {
  1.1518 +      beq(CCR0, found_method);
  1.1519 +    } else {
  1.1520 +      bne(CCR0, search);
  1.1521 +      // (invert the test to fall through to found_method...)
  1.1522 +    }
  1.1523 +
  1.1524 +    if (!peel) break;
  1.1525 +
  1.1526 +    bind(search);
  1.1527 +
  1.1528 +    cmpdi(CCR0, method_result, 0);
  1.1529 +    beq(CCR0, L_no_such_interface);
  1.1530 +    addi(scan_temp, scan_temp, scan_step);
  1.1531 +  }
  1.1532 +
  1.1533 +  bind(found_method);
  1.1534 +
  1.1535 +  // Got a hit.
  1.1536 +  int ito_offset = itableOffsetEntry::offset_offset_in_bytes();
  1.1537 +  lwz(scan_temp, ito_offset, scan_temp);
  1.1538 +  ldx(method_result, scan_temp, recv_klass);
  1.1539 +}
  1.1540 +
  1.1541 +// virtual method calling
  1.1542 +void MacroAssembler::lookup_virtual_method(Register recv_klass,
  1.1543 +                                           RegisterOrConstant vtable_index,
  1.1544 +                                           Register method_result) {
  1.1545 +
  1.1546 +  assert_different_registers(recv_klass, method_result, vtable_index.register_or_noreg());
  1.1547 +
  1.1548 +  const int base = InstanceKlass::vtable_start_offset() * wordSize;
  1.1549 +  assert(vtableEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
  1.1550 +
  1.1551 +  if (vtable_index.is_register()) {
  1.1552 +    sldi(vtable_index.as_register(), vtable_index.as_register(), LogBytesPerWord);
  1.1553 +    add(recv_klass, vtable_index.as_register(), recv_klass);
  1.1554 +  } else {
  1.1555 +    addi(recv_klass, recv_klass, vtable_index.as_constant() << LogBytesPerWord);
  1.1556 +  }
  1.1557 +  ld(R19_method, base + vtableEntry::method_offset_in_bytes(), recv_klass);
  1.1558 +}
  1.1559 +
  1.1560 +/////////////////////////////////////////// subtype checking ////////////////////////////////////////////
  1.1561 +
  1.1562 +void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass,
  1.1563 +                                                   Register super_klass,
  1.1564 +                                                   Register temp1_reg,
  1.1565 +                                                   Register temp2_reg,
  1.1566 +                                                   Label& L_success,
  1.1567 +                                                   Label& L_failure) {
  1.1568 +
  1.1569 +  const Register check_cache_offset = temp1_reg;
  1.1570 +  const Register cached_super       = temp2_reg;
  1.1571 +
  1.1572 +  assert_different_registers(sub_klass, super_klass, check_cache_offset, cached_super);
  1.1573 +
  1.1574 +  int sco_offset = in_bytes(Klass::super_check_offset_offset());
  1.1575 +  int sc_offset  = in_bytes(Klass::secondary_super_cache_offset());
  1.1576 +
  1.1577 +  // If the pointers are equal, we are done (e.g., String[] elements).
  1.1578 +  // This self-check enables sharing of secondary supertype arrays among
  1.1579 +  // non-primary types such as array-of-interface. Otherwise, each such
  1.1580 +  // type would need its own customized SSA.
  1.1581 +  // We move this check to the front of the fast path because many
  1.1582 +  // type checks are in fact trivially successful in this manner,
  1.1583 +  // so we get a nicely predicted branch right at the start of the check.
  1.1584 +  cmpd(CCR0, sub_klass, super_klass);
  1.1585 +  beq(CCR0, L_success);
  1.1586 +
  1.1587 +  // Check the supertype display:
  1.1588 +  lwz(check_cache_offset, sco_offset, super_klass);
  1.1589 +  // The loaded value is the offset from KlassOopDesc.
  1.1590 +
  1.1591 +  ldx(cached_super, check_cache_offset, sub_klass);
  1.1592 +  cmpd(CCR0, cached_super, super_klass);
  1.1593 +  beq(CCR0, L_success);
  1.1594 +
  1.1595 +  // This check has worked decisively for primary supers.
  1.1596 +  // Secondary supers are sought in the super_cache ('super_cache_addr').
  1.1597 +  // (Secondary supers are interfaces and very deeply nested subtypes.)
  1.1598 +  // This works in the same check above because of a tricky aliasing
  1.1599 +  // between the super_cache and the primary super display elements.
  1.1600 +  // (The 'super_check_addr' can address either, as the case requires.)
  1.1601 +  // Note that the cache is updated below if it does not help us find
  1.1602 +  // what we need immediately.
  1.1603 +  // So if it was a primary super, we can just fail immediately.
  1.1604 +  // Otherwise, it's the slow path for us (no success at this point).
  1.1605 +
  1.1606 +  cmpwi(CCR0, check_cache_offset, sc_offset);
  1.1607 +  bne(CCR0, L_failure);
  1.1608 +  // bind(slow_path); // fallthru
  1.1609 +}
  1.1610 +
  1.1611 +void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
  1.1612 +                                                   Register super_klass,
  1.1613 +                                                   Register temp1_reg,
  1.1614 +                                                   Register temp2_reg,
  1.1615 +                                                   Label* L_success,
  1.1616 +                                                   Register result_reg) {
  1.1617 +  const Register array_ptr = temp1_reg; // current value from cache array
  1.1618 +  const Register temp      = temp2_reg;
  1.1619 +
  1.1620 +  assert_different_registers(sub_klass, super_klass, array_ptr, temp);
  1.1621 +
  1.1622 +  int source_offset = in_bytes(Klass::secondary_supers_offset());
  1.1623 +  int target_offset = in_bytes(Klass::secondary_super_cache_offset());
  1.1624 +
  1.1625 +  int length_offset = Array<Klass*>::length_offset_in_bytes();
  1.1626 +  int base_offset   = Array<Klass*>::base_offset_in_bytes();
  1.1627 +
  1.1628 +  Label hit, loop, failure, fallthru;
  1.1629 +
  1.1630 +  ld(array_ptr, source_offset, sub_klass);
  1.1631 +
  1.1632 +  //assert(4 == arrayOopDesc::length_length_in_bytes(), "precondition violated.");
  1.1633 +  lwz(temp, length_offset, array_ptr);
  1.1634 +  cmpwi(CCR0, temp, 0);
  1.1635 +  beq(CCR0, result_reg!=noreg ? failure : fallthru); // length 0
  1.1636 +
  1.1637 +  mtctr(temp); // load ctr
  1.1638 +
  1.1639 +  bind(loop);
  1.1640 +  // Oops in table are NO MORE compressed.
  1.1641 +  ld(temp, base_offset, array_ptr);
  1.1642 +  cmpd(CCR0, temp, super_klass);
  1.1643 +  beq(CCR0, hit);
  1.1644 +  addi(array_ptr, array_ptr, BytesPerWord);
  1.1645 +  bdnz(loop);
  1.1646 +
  1.1647 +  bind(failure);
  1.1648 +  if (result_reg!=noreg) li(result_reg, 1); // load non-zero result (indicates a miss)
  1.1649 +  b(fallthru);
  1.1650 +
  1.1651 +  bind(hit);
  1.1652 +  std(super_klass, target_offset, sub_klass); // save result to cache
  1.1653 +  if (result_reg != noreg) li(result_reg, 0); // load zero result (indicates a hit)
  1.1654 +  if (L_success != NULL) b(*L_success);
  1.1655 +
  1.1656 +  bind(fallthru);
  1.1657 +}
  1.1658 +
  1.1659 +// Try fast path, then go to slow one if not successful
  1.1660 +void MacroAssembler::check_klass_subtype(Register sub_klass,
  1.1661 +                         Register super_klass,
  1.1662 +                         Register temp1_reg,
  1.1663 +                         Register temp2_reg,
  1.1664 +                         Label& L_success) {
  1.1665 +  Label L_failure;
  1.1666 +  check_klass_subtype_fast_path(sub_klass, super_klass, temp1_reg, temp2_reg, L_success, L_failure);
  1.1667 +  check_klass_subtype_slow_path(sub_klass, super_klass, temp1_reg, temp2_reg, &L_success);
  1.1668 +  bind(L_failure); // Fallthru if not successful.
  1.1669 +}
  1.1670 +
  1.1671 +void MacroAssembler::check_method_handle_type(Register mtype_reg, Register mh_reg,
  1.1672 +                                              Register temp_reg,
  1.1673 +                                              Label& wrong_method_type) {
  1.1674 +  assert_different_registers(mtype_reg, mh_reg, temp_reg);
  1.1675 +  // Compare method type against that of the receiver.
  1.1676 +  load_heap_oop_not_null(temp_reg, delayed_value(java_lang_invoke_MethodHandle::type_offset_in_bytes, temp_reg), mh_reg);
  1.1677 +  cmpd(CCR0, temp_reg, mtype_reg);
  1.1678 +  bne(CCR0, wrong_method_type);
  1.1679 +}
  1.1680 +
  1.1681 +RegisterOrConstant MacroAssembler::argument_offset(RegisterOrConstant arg_slot,
  1.1682 +                                                   Register temp_reg,
  1.1683 +                                                   int extra_slot_offset) {
  1.1684 +  // cf. TemplateTable::prepare_invoke(), if (load_receiver).
  1.1685 +  int stackElementSize = Interpreter::stackElementSize;
  1.1686 +  int offset = extra_slot_offset * stackElementSize;
  1.1687 +  if (arg_slot.is_constant()) {
  1.1688 +    offset += arg_slot.as_constant() * stackElementSize;
  1.1689 +    return offset;
  1.1690 +  } else {
  1.1691 +    assert(temp_reg != noreg, "must specify");
  1.1692 +    sldi(temp_reg, arg_slot.as_register(), exact_log2(stackElementSize));
  1.1693 +    if (offset != 0)
  1.1694 +      addi(temp_reg, temp_reg, offset);
  1.1695 +    return temp_reg;
  1.1696 +  }
  1.1697 +}
  1.1698 +
  1.1699 +void MacroAssembler::biased_locking_enter(ConditionRegister cr_reg, Register obj_reg,
  1.1700 +                                          Register mark_reg, Register temp_reg,
  1.1701 +                                          Register temp2_reg, Label& done, Label* slow_case) {
  1.1702 +  assert(UseBiasedLocking, "why call this otherwise?");
  1.1703 +
  1.1704 +#ifdef ASSERT
  1.1705 +  assert_different_registers(obj_reg, mark_reg, temp_reg, temp2_reg);
  1.1706 +#endif
  1.1707 +
  1.1708 +  Label cas_label;
  1.1709 +
  1.1710 +  // Branch to done if fast path fails and no slow_case provided.
  1.1711 +  Label *slow_case_int = (slow_case != NULL) ? slow_case : &done;
  1.1712 +
  1.1713 +  // Biased locking
  1.1714 +  // See whether the lock is currently biased toward our thread and
  1.1715 +  // whether the epoch is still valid
  1.1716 +  // Note that the runtime guarantees sufficient alignment of JavaThread
  1.1717 +  // pointers to allow age to be placed into low bits
  1.1718 +  assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits,
  1.1719 +         "biased locking makes assumptions about bit layout");
  1.1720 +
  1.1721 +  if (PrintBiasedLockingStatistics) {
  1.1722 +    load_const(temp_reg, (address) BiasedLocking::total_entry_count_addr(), temp2_reg);
  1.1723 +    lwz(temp2_reg, 0, temp_reg);
  1.1724 +    addi(temp2_reg, temp2_reg, 1);
  1.1725 +    stw(temp2_reg, 0, temp_reg);
  1.1726 +  }
  1.1727 +
  1.1728 +  andi(temp_reg, mark_reg, markOopDesc::biased_lock_mask_in_place);
  1.1729 +  cmpwi(cr_reg, temp_reg, markOopDesc::biased_lock_pattern);
  1.1730 +  bne(cr_reg, cas_label);
  1.1731 +
  1.1732 +  load_klass_with_trap_null_check(temp_reg, obj_reg);
  1.1733 +
  1.1734 +  load_const_optimized(temp2_reg, ~((int) markOopDesc::age_mask_in_place));
  1.1735 +  ld(temp_reg, in_bytes(Klass::prototype_header_offset()), temp_reg);
  1.1736 +  orr(temp_reg, R16_thread, temp_reg);
  1.1737 +  xorr(temp_reg, mark_reg, temp_reg);
  1.1738 +  andr(temp_reg, temp_reg, temp2_reg);
  1.1739 +  cmpdi(cr_reg, temp_reg, 0);
  1.1740 +  if (PrintBiasedLockingStatistics) {
  1.1741 +    Label l;
  1.1742 +    bne(cr_reg, l);
  1.1743 +    load_const(mark_reg, (address) BiasedLocking::biased_lock_entry_count_addr());
  1.1744 +    lwz(temp2_reg, 0, mark_reg);
  1.1745 +    addi(temp2_reg, temp2_reg, 1);
  1.1746 +    stw(temp2_reg, 0, mark_reg);
  1.1747 +    // restore mark_reg
  1.1748 +    ld(mark_reg, oopDesc::mark_offset_in_bytes(), obj_reg);
  1.1749 +    bind(l);
  1.1750 +  }
  1.1751 +  beq(cr_reg, done);
  1.1752 +
  1.1753 +  Label try_revoke_bias;
  1.1754 +  Label try_rebias;
  1.1755 +
  1.1756 +  // At this point we know that the header has the bias pattern and
  1.1757 +  // that we are not the bias owner in the current epoch. We need to
  1.1758 +  // figure out more details about the state of the header in order to
  1.1759 +  // know what operations can be legally performed on the object's
  1.1760 +  // header.
  1.1761 +
  1.1762 +  // If the low three bits in the xor result aren't clear, that means
  1.1763 +  // the prototype header is no longer biased and we have to revoke
  1.1764 +  // the bias on this object.
  1.1765 +  andi(temp2_reg, temp_reg, markOopDesc::biased_lock_mask_in_place);
  1.1766 +  cmpwi(cr_reg, temp2_reg, 0);
  1.1767 +  bne(cr_reg, try_revoke_bias);
  1.1768 +
  1.1769 +  // Biasing is still enabled for this data type. See whether the
  1.1770 +  // epoch of the current bias is still valid, meaning that the epoch
  1.1771 +  // bits of the mark word are equal to the epoch bits of the
  1.1772 +  // prototype header. (Note that the prototype header's epoch bits
  1.1773 +  // only change at a safepoint.) If not, attempt to rebias the object
  1.1774 +  // toward the current thread. Note that we must be absolutely sure
  1.1775 +  // that the current epoch is invalid in order to do this because
  1.1776 +  // otherwise the manipulations it performs on the mark word are
  1.1777 +  // illegal.
  1.1778 +
  1.1779 +  int shift_amount = 64 - markOopDesc::epoch_shift;
  1.1780 +  // rotate epoch bits to right (little) end and set other bits to 0
  1.1781 +  // [ big part | epoch | little part ] -> [ 0..0 | epoch ]
  1.1782 +  rldicl_(temp2_reg, temp_reg, shift_amount, 64 - markOopDesc::epoch_bits);
  1.1783 +  // branch if epoch bits are != 0, i.e. they differ, because the epoch has been incremented
  1.1784 +  bne(CCR0, try_rebias);
  1.1785 +
  1.1786 +  // The epoch of the current bias is still valid but we know nothing
  1.1787 +  // about the owner; it might be set or it might be clear. Try to
  1.1788 +  // acquire the bias of the object using an atomic operation. If this
  1.1789 +  // fails we will go in to the runtime to revoke the object's bias.
  1.1790 +  // Note that we first construct the presumed unbiased header so we
  1.1791 +  // don't accidentally blow away another thread's valid bias.
  1.1792 +  andi(mark_reg, mark_reg, (markOopDesc::biased_lock_mask_in_place |
  1.1793 +                                markOopDesc::age_mask_in_place |
  1.1794 +                                markOopDesc::epoch_mask_in_place));
  1.1795 +  orr(temp_reg, R16_thread, mark_reg);
  1.1796 +
  1.1797 +  assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
  1.1798 +
  1.1799 +  // CmpxchgX sets cr_reg to cmpX(temp2_reg, mark_reg).
  1.1800 +  fence(); // TODO: replace by MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq ?
  1.1801 +  cmpxchgd(/*flag=*/cr_reg, /*current_value=*/temp2_reg,
  1.1802 +           /*compare_value=*/mark_reg, /*exchange_value=*/temp_reg,
  1.1803 +           /*where=*/obj_reg,
  1.1804 +           MacroAssembler::MemBarAcq,
  1.1805 +           MacroAssembler::cmpxchgx_hint_acquire_lock(),
  1.1806 +           noreg, slow_case_int); // bail out if failed
  1.1807 +
  1.1808 +  // If the biasing toward our thread failed, this means that
  1.1809 +  // another thread succeeded in biasing it toward itself and we
  1.1810 +  // need to revoke that bias. The revocation will occur in the
  1.1811 +  // interpreter runtime in the slow case.
  1.1812 +  if (PrintBiasedLockingStatistics) {
  1.1813 +    load_const(temp_reg, (address) BiasedLocking::anonymously_biased_lock_entry_count_addr(), temp2_reg);
  1.1814 +    lwz(temp2_reg, 0, temp_reg);
  1.1815 +    addi(temp2_reg, temp2_reg, 1);
  1.1816 +    stw(temp2_reg, 0, temp_reg);
  1.1817 +  }
  1.1818 +  b(done);
  1.1819 +
  1.1820 +  bind(try_rebias);
  1.1821 +  // At this point we know the epoch has expired, meaning that the
  1.1822 +  // current "bias owner", if any, is actually invalid. Under these
  1.1823 +  // circumstances _only_, we are allowed to use the current header's
  1.1824 +  // value as the comparison value when doing the cas to acquire the
  1.1825 +  // bias in the current epoch. In other words, we allow transfer of
  1.1826 +  // the bias from one thread to another directly in this situation.
  1.1827 +  andi(temp_reg, mark_reg, markOopDesc::age_mask_in_place);
  1.1828 +  orr(temp_reg, R16_thread, temp_reg);
  1.1829 +  load_klass_with_trap_null_check(temp2_reg, obj_reg);
  1.1830 +  ld(temp2_reg, in_bytes(Klass::prototype_header_offset()), temp2_reg);
  1.1831 +  orr(temp_reg, temp_reg, temp2_reg);
  1.1832 +
  1.1833 +  assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
  1.1834 +
  1.1835 +  // CmpxchgX sets cr_reg to cmpX(temp2_reg, mark_reg).
  1.1836 +  fence(); // TODO: replace by MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq ?
  1.1837 +  cmpxchgd(/*flag=*/cr_reg, /*current_value=*/temp2_reg,
  1.1838 +                 /*compare_value=*/mark_reg, /*exchange_value=*/temp_reg,
  1.1839 +                 /*where=*/obj_reg,
  1.1840 +                 MacroAssembler::MemBarAcq,
  1.1841 +                 MacroAssembler::cmpxchgx_hint_acquire_lock(),
  1.1842 +                 noreg, slow_case_int); // bail out if failed
  1.1843 +
  1.1844 +  // If the biasing toward our thread failed, this means that
  1.1845 +  // another thread succeeded in biasing it toward itself and we
  1.1846 +  // need to revoke that bias. The revocation will occur in the
  1.1847 +  // interpreter runtime in the slow case.
  1.1848 +  if (PrintBiasedLockingStatistics) {
  1.1849 +    load_const(temp_reg, (address) BiasedLocking::rebiased_lock_entry_count_addr(), temp2_reg);
  1.1850 +    lwz(temp2_reg, 0, temp_reg);
  1.1851 +    addi(temp2_reg, temp2_reg, 1);
  1.1852 +    stw(temp2_reg, 0, temp_reg);
  1.1853 +  }
  1.1854 +  b(done);
  1.1855 +
  1.1856 +  bind(try_revoke_bias);
  1.1857 +  // The prototype mark in the klass doesn't have the bias bit set any
  1.1858 +  // more, indicating that objects of this data type are not supposed
  1.1859 +  // to be biased any more. We are going to try to reset the mark of
  1.1860 +  // this object to the prototype value and fall through to the
  1.1861 +  // CAS-based locking scheme. Note that if our CAS fails, it means
  1.1862 +  // that another thread raced us for the privilege of revoking the
  1.1863 +  // bias of this particular object, so it's okay to continue in the
  1.1864 +  // normal locking code.
  1.1865 +  load_klass_with_trap_null_check(temp_reg, obj_reg);
  1.1866 +  ld(temp_reg, in_bytes(Klass::prototype_header_offset()), temp_reg);
  1.1867 +  andi(temp2_reg, mark_reg, markOopDesc::age_mask_in_place);
  1.1868 +  orr(temp_reg, temp_reg, temp2_reg);
  1.1869 +
  1.1870 +  assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
  1.1871 +
  1.1872 +  // CmpxchgX sets cr_reg to cmpX(temp2_reg, mark_reg).
  1.1873 +  fence(); // TODO: replace by MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq ?
  1.1874 +  cmpxchgd(/*flag=*/cr_reg, /*current_value=*/temp2_reg,
  1.1875 +                 /*compare_value=*/mark_reg, /*exchange_value=*/temp_reg,
  1.1876 +                 /*where=*/obj_reg,
  1.1877 +                 MacroAssembler::MemBarAcq,
  1.1878 +                 MacroAssembler::cmpxchgx_hint_acquire_lock());
  1.1879 +
  1.1880 +  // reload markOop in mark_reg before continuing with lightweight locking
  1.1881 +  ld(mark_reg, oopDesc::mark_offset_in_bytes(), obj_reg);
  1.1882 +
  1.1883 +  // Fall through to the normal CAS-based lock, because no matter what
  1.1884 +  // the result of the above CAS, some thread must have succeeded in
  1.1885 +  // removing the bias bit from the object's header.
  1.1886 +  if (PrintBiasedLockingStatistics) {
  1.1887 +    Label l;
  1.1888 +    bne(cr_reg, l);
  1.1889 +    load_const(temp_reg, (address) BiasedLocking::revoked_lock_entry_count_addr(), temp2_reg);
  1.1890 +    lwz(temp2_reg, 0, temp_reg);
  1.1891 +    addi(temp2_reg, temp2_reg, 1);
  1.1892 +    stw(temp2_reg, 0, temp_reg);
  1.1893 +    bind(l);
  1.1894 +  }
  1.1895 +
  1.1896 +  bind(cas_label);
  1.1897 +}
  1.1898 +
  1.1899 +void MacroAssembler::biased_locking_exit (ConditionRegister cr_reg, Register mark_addr, Register temp_reg, Label& done) {
  1.1900 +  // Check for biased locking unlock case, which is a no-op
  1.1901 +  // Note: we do not have to check the thread ID for two reasons.
  1.1902 +  // First, the interpreter checks for IllegalMonitorStateException at
  1.1903 +  // a higher level. Second, if the bias was revoked while we held the
  1.1904 +  // lock, the object could not be rebiased toward another thread, so
  1.1905 +  // the bias bit would be clear.
  1.1906 +
  1.1907 +  ld(temp_reg, 0, mark_addr);
  1.1908 +  andi(temp_reg, temp_reg, markOopDesc::biased_lock_mask_in_place);
  1.1909 +
  1.1910 +  cmpwi(cr_reg, temp_reg, markOopDesc::biased_lock_pattern);
  1.1911 +  beq(cr_reg, done);
  1.1912 +}
  1.1913 +
  1.1914 +// "The box" is the space on the stack where we copy the object mark.
  1.1915 +void MacroAssembler::compiler_fast_lock_object(ConditionRegister flag, Register oop, Register box,
  1.1916 +                                               Register temp, Register displaced_header, Register current_header) {
  1.1917 +  assert_different_registers(oop, box, temp, displaced_header, current_header);
  1.1918 +  assert(flag != CCR0, "bad condition register");
  1.1919 +  Label cont;
  1.1920 +  Label object_has_monitor;
  1.1921 +  Label cas_failed;
  1.1922 +
  1.1923 +  // Load markOop from object into displaced_header.
  1.1924 +  ld(displaced_header, oopDesc::mark_offset_in_bytes(), oop);
  1.1925 +
  1.1926 +
  1.1927 +  // Always do locking in runtime.
  1.1928 +  if (EmitSync & 0x01) {
  1.1929 +    cmpdi(flag, oop, 0); // Oop can't be 0 here => always false.
  1.1930 +    return;
  1.1931 +  }
  1.1932 +
  1.1933 +  if (UseBiasedLocking) {
  1.1934 +    biased_locking_enter(flag, oop, displaced_header, temp, current_header, cont);
  1.1935 +  }
  1.1936 +
  1.1937 +  // Handle existing monitor.
  1.1938 +  if ((EmitSync & 0x02) == 0) {
  1.1939 +    // The object has an existing monitor iff (mark & monitor_value) != 0.
  1.1940 +    andi_(temp, displaced_header, markOopDesc::monitor_value);
  1.1941 +    bne(CCR0, object_has_monitor);
  1.1942 +  }
  1.1943 +
  1.1944 +  // Set displaced_header to be (markOop of object | UNLOCK_VALUE).
  1.1945 +  ori(displaced_header, displaced_header, markOopDesc::unlocked_value);
  1.1946 +
  1.1947 +  // Load Compare Value application register.
  1.1948 +
  1.1949 +  // Initialize the box. (Must happen before we update the object mark!)
  1.1950 +  std(displaced_header, BasicLock::displaced_header_offset_in_bytes(), box);
  1.1951 +
  1.1952 +  // Must fence, otherwise, preceding store(s) may float below cmpxchg.
  1.1953 +  // Compare object markOop with mark and if equal exchange scratch1 with object markOop.
  1.1954 +  // CmpxchgX sets cr_reg to cmpX(current, displaced).
  1.1955 +  cmpxchgd(/*flag=*/flag,
  1.1956 +           /*current_value=*/current_header,
  1.1957 +           /*compare_value=*/displaced_header,
  1.1958 +           /*exchange_value=*/box,
  1.1959 +           /*where=*/oop,
  1.1960 +           MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq,
  1.1961 +           MacroAssembler::cmpxchgx_hint_acquire_lock(),
  1.1962 +           noreg,
  1.1963 +           &cas_failed);
  1.1964 +  assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
  1.1965 +
  1.1966 +  // If the compare-and-exchange succeeded, then we found an unlocked
  1.1967 +  // object and we have now locked it.
  1.1968 +  b(cont);
  1.1969 +
  1.1970 +  bind(cas_failed);
  1.1971 +  // We did not see an unlocked object so try the fast recursive case.
  1.1972 +
  1.1973 +  // Check if the owner is self by comparing the value in the markOop of object
  1.1974 +  // (current_header) with the stack pointer.
  1.1975 +  sub(current_header, current_header, R1_SP);
  1.1976 +  load_const_optimized(temp, (address) (~(os::vm_page_size()-1) |
  1.1977 +                                        markOopDesc::lock_mask_in_place));
  1.1978 +
  1.1979 +  and_(R0/*==0?*/, current_header, temp);
  1.1980 +  // If condition is true we are cont and hence we can store 0 as the
  1.1981 +  // displaced header in the box, which indicates that it is a recursive lock.
  1.1982 +  mcrf(flag,CCR0);
  1.1983 +  std(R0/*==0, perhaps*/, BasicLock::displaced_header_offset_in_bytes(), box);
  1.1984 +
  1.1985 +  // Handle existing monitor.
  1.1986 +  if ((EmitSync & 0x02) == 0) {
  1.1987 +    b(cont);
  1.1988 +
  1.1989 +    bind(object_has_monitor);
  1.1990 +    // The object's monitor m is unlocked iff m->owner == NULL,
  1.1991 +    // otherwise m->owner may contain a thread or a stack address.
  1.1992 +    //
  1.1993 +    // Try to CAS m->owner from NULL to current thread.
  1.1994 +    addi(temp, displaced_header, ObjectMonitor::owner_offset_in_bytes()-markOopDesc::monitor_value);
  1.1995 +    li(displaced_header, 0);
  1.1996 +    // CmpxchgX sets flag to cmpX(current, displaced).
  1.1997 +    cmpxchgd(/*flag=*/flag,
  1.1998 +             /*current_value=*/current_header,
  1.1999 +             /*compare_value=*/displaced_header,
  1.2000 +             /*exchange_value=*/R16_thread,
  1.2001 +             /*where=*/temp,
  1.2002 +             MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq,
  1.2003 +             MacroAssembler::cmpxchgx_hint_acquire_lock());
  1.2004 +
  1.2005 +    // Store a non-null value into the box.
  1.2006 +    std(box, BasicLock::displaced_header_offset_in_bytes(), box);
  1.2007 +
  1.2008 +#   ifdef ASSERT
  1.2009 +    bne(flag, cont);
  1.2010 +    // We have acquired the monitor, check some invariants.
  1.2011 +    addi(/*monitor=*/temp, temp, -ObjectMonitor::owner_offset_in_bytes());
  1.2012 +    // Invariant 1: _recursions should be 0.
  1.2013 +    //assert(ObjectMonitor::recursions_size_in_bytes() == 8, "unexpected size");
  1.2014 +    asm_assert_mem8_is_zero(ObjectMonitor::recursions_offset_in_bytes(), temp,
  1.2015 +                            "monitor->_recursions should be 0", -1);
  1.2016 +    // Invariant 2: OwnerIsThread shouldn't be 0.
  1.2017 +    //assert(ObjectMonitor::OwnerIsThread_size_in_bytes() == 4, "unexpected size");
  1.2018 +    //asm_assert_mem4_isnot_zero(ObjectMonitor::OwnerIsThread_offset_in_bytes(), temp,
  1.2019 +    //                           "monitor->OwnerIsThread shouldn't be 0", -1);
  1.2020 +#   endif
  1.2021 +  }
  1.2022 +
  1.2023 +  bind(cont);
  1.2024 +  // flag == EQ indicates success
  1.2025 +  // flag == NE indicates failure
  1.2026 +}
  1.2027 +
  1.2028 +void MacroAssembler::compiler_fast_unlock_object(ConditionRegister flag, Register oop, Register box,
  1.2029 +                                                 Register temp, Register displaced_header, Register current_header) {
  1.2030 +  assert_different_registers(oop, box, temp, displaced_header, current_header);
  1.2031 +  assert(flag != CCR0, "bad condition register");
  1.2032 +  Label cont;
  1.2033 +  Label object_has_monitor;
  1.2034 +
  1.2035 +  // Always do locking in runtime.
  1.2036 +  if (EmitSync & 0x01) {
  1.2037 +    cmpdi(flag, oop, 0); // Oop can't be 0 here => always false.
  1.2038 +    return;
  1.2039 +  }
  1.2040 +
  1.2041 +  if (UseBiasedLocking) {
  1.2042 +    biased_locking_exit(flag, oop, current_header, cont);
  1.2043 +  }
  1.2044 +
  1.2045 +  // Find the lock address and load the displaced header from the stack.
  1.2046 +  ld(displaced_header, BasicLock::displaced_header_offset_in_bytes(), box);
  1.2047 +
  1.2048 +  // If the displaced header is 0, we have a recursive unlock.
  1.2049 +  cmpdi(flag, displaced_header, 0);
  1.2050 +  beq(flag, cont);
  1.2051 +
  1.2052 +  // Handle existing monitor.
  1.2053 +  if ((EmitSync & 0x02) == 0) {
  1.2054 +    // The object has an existing monitor iff (mark & monitor_value) != 0.
  1.2055 +    ld(current_header, oopDesc::mark_offset_in_bytes(), oop);
  1.2056 +    andi(temp, current_header, markOopDesc::monitor_value);
  1.2057 +    cmpdi(flag, temp, 0);
  1.2058 +    bne(flag, object_has_monitor);
  1.2059 +  }
  1.2060 +
  1.2061 +
  1.2062 +  // Check if it is still a light weight lock, this is is true if we see
  1.2063 +  // the stack address of the basicLock in the markOop of the object.
  1.2064 +  // Cmpxchg sets flag to cmpd(current_header, box).
  1.2065 +  cmpxchgd(/*flag=*/flag,
  1.2066 +           /*current_value=*/current_header,
  1.2067 +           /*compare_value=*/box,
  1.2068 +           /*exchange_value=*/displaced_header,
  1.2069 +           /*where=*/oop,
  1.2070 +           MacroAssembler::MemBarRel,
  1.2071 +           MacroAssembler::cmpxchgx_hint_release_lock(),
  1.2072 +           noreg,
  1.2073 +           &cont);
  1.2074 +
  1.2075 +  assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
  1.2076 +
  1.2077 +  // Handle existing monitor.
  1.2078 +  if ((EmitSync & 0x02) == 0) {
  1.2079 +    b(cont);
  1.2080 +
  1.2081 +    bind(object_has_monitor);
  1.2082 +    addi(current_header, current_header, -markOopDesc::monitor_value); // monitor
  1.2083 +    ld(temp,             ObjectMonitor::owner_offset_in_bytes(), current_header);
  1.2084 +    ld(displaced_header, ObjectMonitor::recursions_offset_in_bytes(), current_header);
  1.2085 +    xorr(temp, R16_thread, temp);      // Will be 0 if we are the owner.
  1.2086 +    orr(temp, temp, displaced_header); // Will be 0 if there are 0 recursions.
  1.2087 +    cmpdi(flag, temp, 0);
  1.2088 +    bne(flag, cont);
  1.2089 +
  1.2090 +    ld(temp,             ObjectMonitor::EntryList_offset_in_bytes(), current_header);
  1.2091 +    ld(displaced_header, ObjectMonitor::cxq_offset_in_bytes(), current_header);
  1.2092 +    orr(temp, temp, displaced_header); // Will be 0 if both are 0.
  1.2093 +    cmpdi(flag, temp, 0);
  1.2094 +    bne(flag, cont);
  1.2095 +    release();
  1.2096 +    std(temp, ObjectMonitor::owner_offset_in_bytes(), current_header);
  1.2097 +  }
  1.2098 +
  1.2099 +  bind(cont);
  1.2100 +  // flag == EQ indicates success
  1.2101 +  // flag == NE indicates failure
  1.2102 +}
  1.2103 +
  1.2104 +// Write serialization page so VM thread can do a pseudo remote membar.
  1.2105 +// We use the current thread pointer to calculate a thread specific
  1.2106 +// offset to write to within the page. This minimizes bus traffic
  1.2107 +// due to cache line collision.
  1.2108 +void MacroAssembler::serialize_memory(Register thread, Register tmp1, Register tmp2) {
  1.2109 +  srdi(tmp2, thread, os::get_serialize_page_shift_count());
  1.2110 +
  1.2111 +  int mask = os::vm_page_size() - sizeof(int);
  1.2112 +  if (Assembler::is_simm(mask, 16)) {
  1.2113 +    andi(tmp2, tmp2, mask);
  1.2114 +  } else {
  1.2115 +    lis(tmp1, (int)((signed short) (mask >> 16)));
  1.2116 +    ori(tmp1, tmp1, mask & 0x0000ffff);
  1.2117 +    andr(tmp2, tmp2, tmp1);
  1.2118 +  }
  1.2119 +
  1.2120 +  load_const(tmp1, (long) os::get_memory_serialize_page());
  1.2121 +  release();
  1.2122 +  stwx(R0, tmp1, tmp2);
  1.2123 +}
  1.2124 +
  1.2125 +
  1.2126 +// GC barrier helper macros
  1.2127 +
  1.2128 +// Write the card table byte if needed.
  1.2129 +void MacroAssembler::card_write_barrier_post(Register Rstore_addr, Register Rnew_val, Register Rtmp) {
  1.2130 +  CardTableModRefBS* bs = (CardTableModRefBS*) Universe::heap()->barrier_set();
  1.2131 +  assert(bs->kind() == BarrierSet::CardTableModRef ||
  1.2132 +         bs->kind() == BarrierSet::CardTableExtension, "wrong barrier");
  1.2133 +#ifdef ASSERT
  1.2134 +  cmpdi(CCR0, Rnew_val, 0);
  1.2135 +  asm_assert_ne("null oop not allowed", 0x321);
  1.2136 +#endif
  1.2137 +  card_table_write(bs->byte_map_base, Rtmp, Rstore_addr);
  1.2138 +}
  1.2139 +
  1.2140 +// Write the card table byte.
  1.2141 +void MacroAssembler::card_table_write(jbyte* byte_map_base, Register Rtmp, Register Robj) {
  1.2142 +  assert_different_registers(Robj, Rtmp, R0);
  1.2143 +  load_const_optimized(Rtmp, (address)byte_map_base, R0);
  1.2144 +  srdi(Robj, Robj, CardTableModRefBS::card_shift);
  1.2145 +  li(R0, 0); // dirty
  1.2146 +  if (UseConcMarkSweepGC) release();
  1.2147 +  stbx(R0, Rtmp, Robj);
  1.2148 +}
  1.2149 +
  1.2150 +#ifndef SERIALGC
  1.2151 +
  1.2152 +// General G1 pre-barrier generator.
  1.2153 +// Goal: record the previous value if it is not null.
  1.2154 +void MacroAssembler::g1_write_barrier_pre(Register Robj, RegisterOrConstant offset, Register Rpre_val,
  1.2155 +                                          Register Rtmp1, Register Rtmp2, bool needs_frame) {
  1.2156 +  Label runtime, filtered;
  1.2157 +
  1.2158 +  // Is marking active?
  1.2159 +  if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
  1.2160 +    lwz(Rtmp1, in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_active()), R16_thread);
  1.2161 +  } else {
  1.2162 +    guarantee(in_bytes(PtrQueue::byte_width_of_active()) == 1, "Assumption");
  1.2163 +    lbz(Rtmp1, in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_active()), R16_thread);
  1.2164 +  }
  1.2165 +  cmpdi(CCR0, Rtmp1, 0);
  1.2166 +  beq(CCR0, filtered);
  1.2167 +
  1.2168 +  // Do we need to load the previous value?
  1.2169 +  if (Robj != noreg) {
  1.2170 +    // Load the previous value...
  1.2171 +    if (UseCompressedOops) {
  1.2172 +      lwz(Rpre_val, offset, Robj);
  1.2173 +    } else {
  1.2174 +      ld(Rpre_val, offset, Robj);
  1.2175 +    }
  1.2176 +    // Previous value has been loaded into Rpre_val.
  1.2177 +  }
  1.2178 +  assert(Rpre_val != noreg, "must have a real register");
  1.2179 +
  1.2180 +  // Is the previous value null?
  1.2181 +  cmpdi(CCR0, Rpre_val, 0);
  1.2182 +  beq(CCR0, filtered);
  1.2183 +
  1.2184 +  if (Robj != noreg && UseCompressedOops) {
  1.2185 +    decode_heap_oop_not_null(Rpre_val);
  1.2186 +  }
  1.2187 +
  1.2188 +  // OK, it's not filtered, so we'll need to call enqueue. In the normal
  1.2189 +  // case, pre_val will be a scratch G-reg, but there are some cases in
  1.2190 +  // which it's an O-reg. In the first case, do a normal call. In the
  1.2191 +  // latter, do a save here and call the frameless version.
  1.2192 +
  1.2193 +  // Can we store original value in the thread's buffer?
  1.2194 +  // Is index == 0?
  1.2195 +  // (The index field is typed as size_t.)
  1.2196 +  const Register Rbuffer = Rtmp1, Rindex = Rtmp2;
  1.2197 +
  1.2198 +  ld(Rindex, in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_index()), R16_thread);
  1.2199 +  cmpdi(CCR0, Rindex, 0);
  1.2200 +  beq(CCR0, runtime); // If index == 0, goto runtime.
  1.2201 +  ld(Rbuffer, in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_buf()), R16_thread);
  1.2202 +
  1.2203 +  addi(Rindex, Rindex, -wordSize); // Decrement index.
  1.2204 +  std(Rindex, in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_index()), R16_thread);
  1.2205 +
  1.2206 +  // Record the previous value.
  1.2207 +  stdx(Rpre_val, Rbuffer, Rindex);
  1.2208 +  b(filtered);
  1.2209 +
  1.2210 +  bind(runtime);
  1.2211 +
  1.2212 +  // VM call need frame to access(write) O register.
  1.2213 +  if (needs_frame) {
  1.2214 +    save_LR_CR(Rtmp1);
  1.2215 +    push_frame_abi112(0, Rtmp2);
  1.2216 +  }
  1.2217 +
  1.2218 +  if (Rpre_val->is_volatile() && Robj == noreg) mr(R31, Rpre_val); // Save pre_val across C call if it was preloaded.
  1.2219 +  call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), Rpre_val, R16_thread);
  1.2220 +  if (Rpre_val->is_volatile() && Robj == noreg) mr(Rpre_val, R31); // restore
  1.2221 +
  1.2222 +  if (needs_frame) {
  1.2223 +    pop_frame();
  1.2224 +    restore_LR_CR(Rtmp1);
  1.2225 +  }
  1.2226 +
  1.2227 +  bind(filtered);
  1.2228 +}
  1.2229 +
  1.2230 +// General G1 post-barrier generator
  1.2231 +// Store cross-region card.
  1.2232 +void MacroAssembler::g1_write_barrier_post(Register Rstore_addr, Register Rnew_val, Register Rtmp1, Register Rtmp2, Register Rtmp3, Label *filtered_ext) {
  1.2233 +  Label runtime, filtered_int;
  1.2234 +  Label& filtered = (filtered_ext != NULL) ? *filtered_ext : filtered_int;
  1.2235 +  assert_different_registers(Rstore_addr, Rnew_val, Rtmp1, Rtmp2);
  1.2236 +
  1.2237 +  G1SATBCardTableModRefBS* bs = (G1SATBCardTableModRefBS*) Universe::heap()->barrier_set();
  1.2238 +  assert(bs->kind() == BarrierSet::G1SATBCT ||
  1.2239 +         bs->kind() == BarrierSet::G1SATBCTLogging, "wrong barrier");
  1.2240 +
  1.2241 +  // Does store cross heap regions?
  1.2242 +  if (G1RSBarrierRegionFilter) {
  1.2243 +    xorr(Rtmp1, Rstore_addr, Rnew_val);
  1.2244 +    srdi_(Rtmp1, Rtmp1, HeapRegion::LogOfHRGrainBytes);
  1.2245 +    beq(CCR0, filtered);
  1.2246 +  }
  1.2247 +
  1.2248 +  // Crosses regions, storing NULL?
  1.2249 +#ifdef ASSERT
  1.2250 +  cmpdi(CCR0, Rnew_val, 0);
  1.2251 +  asm_assert_ne("null oop not allowed (G1)", 0x322); // Checked by caller on PPC64, so following branch is obsolete:
  1.2252 +  //beq(CCR0, filtered);
  1.2253 +#endif
  1.2254 +
  1.2255 +  // Storing region crossing non-NULL, is card already dirty?
  1.2256 +  assert(sizeof(*bs->byte_map_base) == sizeof(jbyte), "adjust this code");
  1.2257 +  const Register Rcard_addr = Rtmp1;
  1.2258 +  Register Rbase = Rtmp2;
  1.2259 +  load_const_optimized(Rbase, (address)bs->byte_map_base, /*temp*/ Rtmp3);
  1.2260 +
  1.2261 +  srdi(Rcard_addr, Rstore_addr, CardTableModRefBS::card_shift);
  1.2262 +
  1.2263 +  // Get the address of the card.
  1.2264 +  lbzx(/*card value*/ Rtmp3, Rbase, Rcard_addr);
  1.2265 +
  1.2266 +  assert(CardTableModRefBS::dirty_card_val() == 0, "otherwise check this code");
  1.2267 +  cmpwi(CCR0, Rtmp3 /* card value */, 0);
  1.2268 +  beq(CCR0, filtered);
  1.2269 +
  1.2270 +  // Storing a region crossing, non-NULL oop, card is clean.
  1.2271 +  // Dirty card and log.
  1.2272 +  li(Rtmp3, 0); // dirty
  1.2273 +  //release(); // G1: oops are allowed to get visible after dirty marking.
  1.2274 +  stbx(Rtmp3, Rbase, Rcard_addr);
  1.2275 +
  1.2276 +  add(Rcard_addr, Rbase, Rcard_addr); // This is the address which needs to get enqueued.
  1.2277 +  Rbase = noreg; // end of lifetime
  1.2278 +
  1.2279 +  const Register Rqueue_index = Rtmp2,
  1.2280 +                 Rqueue_buf   = Rtmp3;
  1.2281 +  ld(Rqueue_index, in_bytes(JavaThread::dirty_card_queue_offset() + PtrQueue::byte_offset_of_index()), R16_thread);
  1.2282 +  cmpdi(CCR0, Rqueue_index, 0);
  1.2283 +  beq(CCR0, runtime); // index == 0 then jump to runtime
  1.2284 +  ld(Rqueue_buf, in_bytes(JavaThread::dirty_card_queue_offset() + PtrQueue::byte_offset_of_buf()), R16_thread);
  1.2285 +
  1.2286 +  addi(Rqueue_index, Rqueue_index, -wordSize); // decrement index
  1.2287 +  std(Rqueue_index, in_bytes(JavaThread::dirty_card_queue_offset() + PtrQueue::byte_offset_of_index()), R16_thread);
  1.2288 +
  1.2289 +  stdx(Rcard_addr, Rqueue_buf, Rqueue_index); // store card
  1.2290 +  b(filtered);
  1.2291 +
  1.2292 +  bind(runtime);
  1.2293 +
  1.2294 +  // Save the live input values.
  1.2295 +  call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), Rcard_addr, R16_thread);
  1.2296 +
  1.2297 +  bind(filtered_int);
  1.2298 +}
  1.2299 +#endif // SERIALGC
  1.2300 +
  1.2301 +// Values for last_Java_pc, and last_Java_sp must comply to the rules
  1.2302 +// in frame_ppc64.hpp.
  1.2303 +void MacroAssembler::set_last_Java_frame(Register last_Java_sp, Register last_Java_pc) {
  1.2304 +  // Always set last_Java_pc and flags first because once last_Java_sp
  1.2305 +  // is visible has_last_Java_frame is true and users will look at the
  1.2306 +  // rest of the fields. (Note: flags should always be zero before we
  1.2307 +  // get here so doesn't need to be set.)
  1.2308 +
  1.2309 +  // Verify that last_Java_pc was zeroed on return to Java
  1.2310 +  asm_assert_mem8_is_zero(in_bytes(JavaThread::last_Java_pc_offset()), R16_thread,
  1.2311 +                          "last_Java_pc not zeroed before leaving Java", 0x200);
  1.2312 +
  1.2313 +  // When returning from calling out from Java mode the frame anchor's
  1.2314 +  // last_Java_pc will always be set to NULL. It is set here so that
  1.2315 +  // if we are doing a call to native (not VM) that we capture the
  1.2316 +  // known pc and don't have to rely on the native call having a
  1.2317 +  // standard frame linkage where we can find the pc.
  1.2318 +  if (last_Java_pc != noreg)
  1.2319 +    std(last_Java_pc, in_bytes(JavaThread::last_Java_pc_offset()), R16_thread);
  1.2320 +
  1.2321 +  // set last_Java_sp last
  1.2322 +  std(last_Java_sp, in_bytes(JavaThread::last_Java_sp_offset()), R16_thread);
  1.2323 +}
  1.2324 +
  1.2325 +void MacroAssembler::reset_last_Java_frame(void) {
  1.2326 +  asm_assert_mem8_isnot_zero(in_bytes(JavaThread::last_Java_sp_offset()),
  1.2327 +                             R16_thread, "SP was not set, still zero", 0x202);
  1.2328 +
  1.2329 +  BLOCK_COMMENT("reset_last_Java_frame {");
  1.2330 +  li(R0, 0);
  1.2331 +
  1.2332 +  // _last_Java_sp = 0
  1.2333 +  std(R0, in_bytes(JavaThread::last_Java_sp_offset()), R16_thread);
  1.2334 +
  1.2335 +  // _last_Java_pc = 0
  1.2336 +  std(R0, in_bytes(JavaThread::last_Java_pc_offset()), R16_thread);
  1.2337 +  BLOCK_COMMENT("} reset_last_Java_frame");
  1.2338 +}
  1.2339 +
  1.2340 +void MacroAssembler::set_top_ijava_frame_at_SP_as_last_Java_frame(Register sp, Register tmp1) {
  1.2341 +  assert_different_registers(sp, tmp1);
  1.2342 +
  1.2343 +  // sp points to a TOP_IJAVA_FRAME, retrieve frame's PC via
  1.2344 +  // TOP_IJAVA_FRAME_ABI.
  1.2345 +  // FIXME: assert that we really have a TOP_IJAVA_FRAME here!
  1.2346 +#ifdef CC_INTERP
  1.2347 +  ld(tmp1/*pc*/, _top_ijava_frame_abi(frame_manager_lr), sp);
  1.2348 +#else
  1.2349 +  Unimplemented();
  1.2350 +#endif
  1.2351 +
  1.2352 +  set_last_Java_frame(/*sp=*/sp, /*pc=*/tmp1);
  1.2353 +}
  1.2354 +
  1.2355 +void MacroAssembler::get_vm_result(Register oop_result) {
  1.2356 +  // Read:
  1.2357 +  //   R16_thread
  1.2358 +  //   R16_thread->in_bytes(JavaThread::vm_result_offset())
  1.2359 +  //
  1.2360 +  // Updated:
  1.2361 +  //   oop_result
  1.2362 +  //   R16_thread->in_bytes(JavaThread::vm_result_offset())
  1.2363 +
  1.2364 +  ld(oop_result, in_bytes(JavaThread::vm_result_offset()), R16_thread);
  1.2365 +  li(R0, 0);
  1.2366 +  std(R0, in_bytes(JavaThread::vm_result_offset()), R16_thread);
  1.2367 +
  1.2368 +  verify_oop(oop_result);
  1.2369 +}
  1.2370 +
  1.2371 +void MacroAssembler::get_vm_result_2(Register metadata_result) {
  1.2372 +  // Read:
  1.2373 +  //   R16_thread
  1.2374 +  //   R16_thread->in_bytes(JavaThread::vm_result_2_offset())
  1.2375 +  //
  1.2376 +  // Updated:
  1.2377 +  //   metadata_result
  1.2378 +  //   R16_thread->in_bytes(JavaThread::vm_result_2_offset())
  1.2379 +
  1.2380 +  ld(metadata_result, in_bytes(JavaThread::vm_result_2_offset()), R16_thread);
  1.2381 +  li(R0, 0);
  1.2382 +  std(R0, in_bytes(JavaThread::vm_result_2_offset()), R16_thread);
  1.2383 +}
  1.2384 +
  1.2385 +
  1.2386 +void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
  1.2387 +  if (src == noreg) src = dst;
  1.2388 +  if (Universe::narrow_klass_base() != NULL) {
  1.2389 +    // heapbased
  1.2390 +    assert(Universe::narrow_klass_shift() != 0, "sanity");
  1.2391 +    sub(dst, src, R30);
  1.2392 +    srdi(dst, dst, Universe::narrow_klass_shift());
  1.2393 +  } else if (Universe::narrow_klass_shift() != 0) {
  1.2394 +    // zerobased
  1.2395 +    srdi(dst, src, Universe::narrow_klass_shift());
  1.2396 +  } else if (src != dst) {
  1.2397 +    // unscaled
  1.2398 +    mr(dst, src);
  1.2399 +  }
  1.2400 +}
  1.2401 +
  1.2402 +void MacroAssembler::store_klass(Register dst_oop, Register klass, Register ck) {
  1.2403 +  if (UseCompressedKlassPointers) {
  1.2404 +    encode_klass_not_null(ck, klass);
  1.2405 +    stw(ck, oopDesc::klass_offset_in_bytes(), dst_oop);
  1.2406 +  } else {
  1.2407 +    std(klass, oopDesc::klass_offset_in_bytes(), dst_oop);
  1.2408 +  }
  1.2409 +}
  1.2410 +
  1.2411 +void MacroAssembler::decode_klass_not_null(Register dst, Register src) {
  1.2412 +  if (src == noreg) src = dst;
  1.2413 +  if (Universe::narrow_klass_base() != NULL) {
  1.2414 +    // heapbased
  1.2415 +    assert(Universe::narrow_klass_shift() != 0, "sanity");
  1.2416 +    sldi(dst, src, Universe::narrow_klass_shift());
  1.2417 +    add(dst, dst, R30);
  1.2418 +  } else if (Universe::narrow_klass_shift() != 0) {
  1.2419 +    // zerobased
  1.2420 +    sldi(dst, src, Universe::narrow_klass_shift());
  1.2421 +  } else if (src != dst) {
  1.2422 +    // unscaled
  1.2423 +    mr(dst, src);
  1.2424 +  }
  1.2425 +}
  1.2426 +
  1.2427 +void MacroAssembler::load_klass(Register dst, Register src) {
  1.2428 +  if (UseCompressedKlassPointers) {
  1.2429 +    lwz(dst, oopDesc::klass_offset_in_bytes(), src);
  1.2430 +    // Attention: no null check here!
  1.2431 +    decode_klass_not_null(dst, dst);
  1.2432 +  } else {
  1.2433 +    ld(dst, oopDesc::klass_offset_in_bytes(), src);
  1.2434 +  }
  1.2435 +}
  1.2436 +
  1.2437 +void MacroAssembler::load_klass_with_trap_null_check(Register dst, Register src) {
  1.2438 +  if (false  NOT_LINUX(|| true) /*!os::zero_page_read_protected()*/) {
  1.2439 +    if (TrapBasedNullChecks) {
  1.2440 +      trap_null_check(src);
  1.2441 +    }
  1.2442 +  }
  1.2443 +  load_klass(dst, src);
  1.2444 +}
  1.2445 +
  1.2446 +void MacroAssembler::reinit_heapbase(Register d, Register tmp) {
  1.2447 +  if (UseCompressedOops || UseCompressedKlassPointers) {
  1.2448 +    load_const(R30, Universe::narrow_ptrs_base_addr(), tmp);
  1.2449 +    ld(R30, 0, R30);
  1.2450 +  }
  1.2451 +}
  1.2452 +
  1.2453 +/////////////////////////////////////////// String intrinsics ////////////////////////////////////////////
  1.2454 +
  1.2455 +// Search for a single jchar in an jchar[].
  1.2456 +//
  1.2457 +// Assumes that result differs from all other registers.
  1.2458 +//
  1.2459 +// Haystack, needle are the addresses of jchar-arrays.
  1.2460 +// NeedleChar is needle[0] if it is known at compile time.
  1.2461 +// Haycnt is the length of the haystack. We assume haycnt >=1.
  1.2462 +//
  1.2463 +// Preserves haystack, haycnt, kills all other registers.
  1.2464 +//
  1.2465 +// If needle == R0, we search for the constant needleChar.
  1.2466 +void MacroAssembler::string_indexof_1(Register result, Register haystack, Register haycnt,
  1.2467 +                                      Register needle, jchar needleChar,
  1.2468 +                                      Register tmp1, Register tmp2) {
  1.2469 +
  1.2470 +  assert_different_registers(result, haystack, haycnt, needle, tmp1, tmp2);
  1.2471 +
  1.2472 +  Label L_InnerLoop, L_FinalCheck, L_Found1, L_Found2, L_Found3, L_NotFound, L_End;
  1.2473 +  Register needle0 = needle, // Contains needle[0].
  1.2474 +           addr = tmp1,
  1.2475 +           ch1 = tmp2,
  1.2476 +           ch2 = R0;
  1.2477 +
  1.2478 +//2 (variable) or 3 (const):
  1.2479 +   if (needle != R0) lhz(needle0, 0, needle); // Preload needle character, needle has len==1.
  1.2480 +   dcbtct(haystack, 0x00);                        // Indicate R/O access to haystack.
  1.2481 +
  1.2482 +   srwi_(tmp2, haycnt, 1);   // Shift right by exact_log2(UNROLL_FACTOR).
  1.2483 +   mr(addr, haystack);
  1.2484 +   beq(CCR0, L_FinalCheck);
  1.2485 +   mtctr(tmp2);              // Move to count register.
  1.2486 +//8:
  1.2487 +  bind(L_InnerLoop);             // Main work horse (2x unrolled search loop).
  1.2488 +   lhz(ch1, 0, addr);        // Load characters from haystack.
  1.2489 +   lhz(ch2, 2, addr);
  1.2490 +   (needle != R0) ? cmpw(CCR0, ch1, needle0) : cmplwi(CCR0, ch1, needleChar);
  1.2491 +   (needle != R0) ? cmpw(CCR1, ch2, needle0) : cmplwi(CCR1, ch2, needleChar);
  1.2492 +   beq(CCR0, L_Found1);   // Did we find the needle?
  1.2493 +   beq(CCR1, L_Found2);
  1.2494 +   addi(addr, addr, 4);
  1.2495 +   bdnz(L_InnerLoop);
  1.2496 +//16:
  1.2497 +  bind(L_FinalCheck);
  1.2498 +   andi_(R0, haycnt, 1);
  1.2499 +   beq(CCR0, L_NotFound);
  1.2500 +   lhz(ch1, 0, addr);        // One position left at which we have to compare.
  1.2501 +   (needle != R0) ? cmpw(CCR1, ch1, needle0) : cmplwi(CCR1, ch1, needleChar);
  1.2502 +   beq(CCR1, L_Found3);
  1.2503 +//21:
  1.2504 +  bind(L_NotFound);
  1.2505 +   li(result, -1);           // Not found.
  1.2506 +   b(L_End);
  1.2507 +
  1.2508 +  bind(L_Found2);
  1.2509 +   addi(addr, addr, 2);
  1.2510 +//24:
  1.2511 +  bind(L_Found1);
  1.2512 +  bind(L_Found3);                  // Return index ...
  1.2513 +   subf(addr, haystack, addr); // relative to haystack,
  1.2514 +   srdi(result, addr, 1);      // in characters.
  1.2515 +  bind(L_End);
  1.2516 +}
  1.2517 +
  1.2518 +
  1.2519 +// Implementation of IndexOf for jchar arrays.
  1.2520 +//
  1.2521 +// The length of haystack and needle are not constant, i.e. passed in a register.
  1.2522 +//
  1.2523 +// Preserves registers haystack, needle.
  1.2524 +// Kills registers haycnt, needlecnt.
  1.2525 +// Assumes that result differs from all other registers.
  1.2526 +// Haystack, needle are the addresses of jchar-arrays.
  1.2527 +// Haycnt, needlecnt are the lengths of them, respectively.
  1.2528 +//
  1.2529 +// Needlecntval must be zero or 15-bit unsigned immediate and > 1.
  1.2530 +void MacroAssembler::string_indexof(Register result, Register haystack, Register haycnt,
  1.2531 +                                    Register needle, ciTypeArray* needle_values, Register needlecnt, int needlecntval,
  1.2532 +                                    Register tmp1, Register tmp2, Register tmp3, Register tmp4) {
  1.2533 +
  1.2534 +  // Ensure 0<needlecnt<=haycnt in ideal graph as prerequisite!
  1.2535 +  Label L_TooShort, L_Found, L_NotFound, L_End;
  1.2536 +  Register last_addr = haycnt, // Kill haycnt at the beginning.
  1.2537 +           addr      = tmp1,
  1.2538 +           n_start   = tmp2,
  1.2539 +           ch1       = tmp3,
  1.2540 +           ch2       = R0;
  1.2541 +
  1.2542 +  // **************************************************************************************************
  1.2543 +  // Prepare for main loop: optimized for needle count >=2, bail out otherwise.
  1.2544 +  // **************************************************************************************************
  1.2545 +
  1.2546 +//1 (variable) or 3 (const):
  1.2547 +   dcbtct(needle, 0x00);    // Indicate R/O access to str1.
  1.2548 +   dcbtct(haystack, 0x00);  // Indicate R/O access to str2.
  1.2549 +
  1.2550 +  // Compute last haystack addr to use if no match gets found.
  1.2551 +  if (needlecntval == 0) { // variable needlecnt
  1.2552 +//3:
  1.2553 +   subf(ch1, needlecnt, haycnt);      // Last character index to compare is haycnt-needlecnt.
  1.2554 +   addi(addr, haystack, -2);          // Accesses use pre-increment.
  1.2555 +   cmpwi(CCR6, needlecnt, 2);
  1.2556 +   blt(CCR6, L_TooShort);          // Variable needlecnt: handle short needle separately.
  1.2557 +   slwi(ch1, ch1, 1);                 // Scale to number of bytes.
  1.2558 +   lwz(n_start, 0, needle);           // Load first 2 characters of needle.
  1.2559 +   add(last_addr, haystack, ch1);     // Point to last address to compare (haystack+2*(haycnt-needlecnt)).
  1.2560 +   addi(needlecnt, needlecnt, -2);    // Rest of needle.
  1.2561 +  } else { // constant needlecnt
  1.2562 +  guarantee(needlecntval != 1, "IndexOf with single-character needle must be handled separately");
  1.2563 +  assert((needlecntval & 0x7fff) == needlecntval, "wrong immediate");
  1.2564 +//5:
  1.2565 +   addi(ch1, haycnt, -needlecntval);  // Last character index to compare is haycnt-needlecnt.
  1.2566 +   lwz(n_start, 0, needle);           // Load first 2 characters of needle.
  1.2567 +   addi(addr, haystack, -2);          // Accesses use pre-increment.
  1.2568 +   slwi(ch1, ch1, 1);                 // Scale to number of bytes.
  1.2569 +   add(last_addr, haystack, ch1);     // Point to last address to compare (haystack+2*(haycnt-needlecnt)).
  1.2570 +   li(needlecnt, needlecntval-2);     // Rest of needle.
  1.2571 +  }
  1.2572 +
  1.2573 +  // Main Loop (now we have at least 3 characters).
  1.2574 +//11:
  1.2575 +  Label L_OuterLoop, L_InnerLoop, L_FinalCheck, L_Comp1, L_Comp2, L_Comp3;
  1.2576 +  bind(L_OuterLoop); // Search for 1st 2 characters.
  1.2577 +  Register addr_diff = tmp4;
  1.2578 +   subf(addr_diff, addr, last_addr); // Difference between already checked address and last address to check.
  1.2579 +   addi(addr, addr, 2);              // This is the new address we want to use for comparing.
  1.2580 +   srdi_(ch2, addr_diff, 2);
  1.2581 +   beq(CCR0, L_FinalCheck);       // 2 characters left?
  1.2582 +   mtctr(ch2);                       // addr_diff/4
  1.2583 +//16:
  1.2584 +  bind(L_InnerLoop);                // Main work horse (2x unrolled search loop)
  1.2585 +   lwz(ch1, 0, addr);           // Load 2 characters of haystack (ignore alignment).
  1.2586 +   lwz(ch2, 2, addr);
  1.2587 +   cmpw(CCR0, ch1, n_start); // Compare 2 characters (1 would be sufficient but try to reduce branches to CompLoop).
  1.2588 +   cmpw(CCR1, ch2, n_start);
  1.2589 +   beq(CCR0, L_Comp1);       // Did we find the needle start?
  1.2590 +   beq(CCR1, L_Comp2);
  1.2591 +   addi(addr, addr, 4);
  1.2592 +   bdnz(L_InnerLoop);
  1.2593 +//24:
  1.2594 +  bind(L_FinalCheck);
  1.2595 +   rldicl_(addr_diff, addr_diff, 64-1, 63); // Remaining characters not covered by InnerLoop: (addr_diff>>1)&1.
  1.2596 +   beq(CCR0, L_NotFound);
  1.2597 +   lwz(ch1, 0, addr);                       // One position left at which we have to compare.
  1.2598 +   cmpw(CCR1, ch1, n_start);
  1.2599 +   beq(CCR1, L_Comp3);
  1.2600 +//29:
  1.2601 +  bind(L_NotFound);
  1.2602 +   li(result, -1); // not found
  1.2603 +   b(L_End);
  1.2604 +
  1.2605 +
  1.2606 +   // **************************************************************************************************
  1.2607 +   // Special Case: unfortunately, the variable needle case can be called with needlecnt<2
  1.2608 +   // **************************************************************************************************
  1.2609 +//31:
  1.2610 + if ((needlecntval>>1) !=1 ) { // Const needlecnt is 2 or 3? Reduce code size.
  1.2611 +  int nopcnt = 5;
  1.2612 +  if (needlecntval !=0 ) ++nopcnt; // Balance alignment (other case: see below).
  1.2613 +  if (needlecntval == 0) {         // We have to handle these cases separately.
  1.2614 +  Label L_OneCharLoop;
  1.2615 +  bind(L_TooShort);
  1.2616 +   mtctr(haycnt);
  1.2617 +   lhz(n_start, 0, needle);    // First character of needle
  1.2618 +  bind(L_OneCharLoop);
  1.2619 +   lhzu(ch1, 2, addr);
  1.2620 +   cmpw(CCR1, ch1, n_start);
  1.2621 +   beq(CCR1, L_Found);      // Did we find the one character needle?
  1.2622 +   bdnz(L_OneCharLoop);
  1.2623 +   li(result, -1);             // Not found.
  1.2624 +   b(L_End);
  1.2625 +  } // 8 instructions, so no impact on alignment.
  1.2626 +  for (int x = 0; x < nopcnt; ++x) nop();
  1.2627 + }
  1.2628 +
  1.2629 +  // **************************************************************************************************
  1.2630 +  // Regular Case Part II: compare rest of needle (first 2 characters have been compared already)
  1.2631 +  // **************************************************************************************************
  1.2632 +
  1.2633 +  // Compare the rest
  1.2634 +//36 if needlecntval==0, else 37:
  1.2635 +  bind(L_Comp2);
  1.2636 +   addi(addr, addr, 2); // First comparison has failed, 2nd one hit.
  1.2637 +  bind(L_Comp1);            // Addr points to possible needle start.
  1.2638 +  bind(L_Comp3);            // Could have created a copy and use a different return address but saving code size here.
  1.2639 +  if (needlecntval != 2) {  // Const needlecnt==2?
  1.2640 +   if (needlecntval != 3) {
  1.2641 +    if (needlecntval == 0) beq(CCR6, L_Found); // Variable needlecnt==2?
  1.2642 +    Register ind_reg = tmp4;
  1.2643 +    li(ind_reg, 2*2);   // First 2 characters are already compared, use index 2.
  1.2644 +    mtctr(needlecnt);   // Decremented by 2, still > 0.
  1.2645 +//40:
  1.2646 +   Label L_CompLoop;
  1.2647 +   bind(L_CompLoop);
  1.2648 +    lhzx(ch2, needle, ind_reg);
  1.2649 +    lhzx(ch1, addr, ind_reg);
  1.2650 +    cmpw(CCR1, ch1, ch2);
  1.2651 +    bne(CCR1, L_OuterLoop);
  1.2652 +    addi(ind_reg, ind_reg, 2);
  1.2653 +    bdnz(L_CompLoop);
  1.2654 +   } else { // No loop required if there's only one needle character left.
  1.2655 +    lhz(ch2, 2*2, needle);
  1.2656 +    lhz(ch1, 2*2, addr);
  1.2657 +    cmpw(CCR1, ch1, ch2);
  1.2658 +    bne(CCR1, L_OuterLoop);
  1.2659 +   }
  1.2660 +  }
  1.2661 +  // Return index ...
  1.2662 +//46:
  1.2663 +  bind(L_Found);
  1.2664 +   subf(addr, haystack, addr); // relative to haystack, ...
  1.2665 +   srdi(result, addr, 1);      // in characters.
  1.2666 +//48:
  1.2667 +  bind(L_End);
  1.2668 +}
  1.2669 +
  1.2670 +// Implementation of Compare for jchar arrays.
  1.2671 +//
  1.2672 +// Kills the registers str1, str2, cnt1, cnt2.
  1.2673 +// Kills cr0, ctr.
  1.2674 +// Assumes that result differes from the input registers.
  1.2675 +void MacroAssembler::string_compare(Register str1_reg, Register str2_reg, Register cnt1_reg, Register cnt2_reg,
  1.2676 +                                    Register result_reg, Register tmp_reg) {
  1.2677 +   assert_different_registers(result_reg, str1_reg, str2_reg, cnt1_reg, cnt2_reg, tmp_reg);
  1.2678 +
  1.2679 +   Label Ldone, Lslow_case, Lslow_loop, Lfast_loop;
  1.2680 +   Register cnt_diff = R0,
  1.2681 +            limit_reg = cnt1_reg,
  1.2682 +            chr1_reg = result_reg,
  1.2683 +            chr2_reg = cnt2_reg,
  1.2684 +            addr_diff = str2_reg;
  1.2685 +
  1.2686 +   // Offset 0 should be 32 byte aligned.
  1.2687 +//-4:
  1.2688 +    dcbtct(str1_reg, 0x00);  // Indicate R/O access to str1.
  1.2689 +    dcbtct(str2_reg, 0x00);  // Indicate R/O access to str2.
  1.2690 +//-2:
  1.2691 +   // Compute min(cnt1, cnt2) and check if 0 (bail out if we don't need to compare characters).
  1.2692 +    subf(result_reg, cnt2_reg, cnt1_reg);  // difference between cnt1/2
  1.2693 +    subf_(addr_diff, str1_reg, str2_reg);  // alias?
  1.2694 +    beq(CCR0, Ldone);                   // return cnt difference if both ones are identical
  1.2695 +    srawi(limit_reg, result_reg, 31);      // generate signmask (cnt1/2 must be non-negative so cnt_diff can't overflow)
  1.2696 +    mr(cnt_diff, result_reg);
  1.2697 +    andr(limit_reg, result_reg, limit_reg); // difference or zero (negative): cnt1<cnt2 ? cnt1-cnt2 : 0
  1.2698 +    add_(limit_reg, cnt2_reg, limit_reg);  // min(cnt1, cnt2)==0?
  1.2699 +    beq(CCR0, Ldone);                   // return cnt difference if one has 0 length
  1.2700 +
  1.2701 +    lhz(chr1_reg, 0, str1_reg);            // optional: early out if first characters mismatch
  1.2702 +    lhzx(chr2_reg, str1_reg, addr_diff);   // optional: early out if first characters mismatch
  1.2703 +    addi(tmp_reg, limit_reg, -1);          // min(cnt1, cnt2)-1
  1.2704 +    subf_(result_reg, chr2_reg, chr1_reg); // optional: early out if first characters mismatch
  1.2705 +    bne(CCR0, Ldone);                   // optional: early out if first characters mismatch
  1.2706 +
  1.2707 +   // Set loop counter by scaling down tmp_reg
  1.2708 +    srawi_(chr2_reg, tmp_reg, exact_log2(4)); // (min(cnt1, cnt2)-1)/4
  1.2709 +    ble(CCR0, Lslow_case);                 // need >4 characters for fast loop
  1.2710 +    andi(limit_reg, tmp_reg, 4-1);            // remaining characters
  1.2711 +
  1.2712 +   // Adapt str1_reg str2_reg for the first loop iteration
  1.2713 +    mtctr(chr2_reg);                 // (min(cnt1, cnt2)-1)/4
  1.2714 +    addi(limit_reg, limit_reg, 4+1); // compare last 5-8 characters in slow_case if mismatch found in fast_loop
  1.2715 +//16:
  1.2716 +   // Compare the rest of the characters
  1.2717 +   bind(Lfast_loop);
  1.2718 +    ld(chr1_reg, 0, str1_reg);
  1.2719 +    ldx(chr2_reg, str1_reg, addr_diff);
  1.2720 +    cmpd(CCR0, chr2_reg, chr1_reg);
  1.2721 +    bne(CCR0, Lslow_case); // return chr1_reg
  1.2722 +    addi(str1_reg, str1_reg, 4*2);
  1.2723 +    bdnz(Lfast_loop);
  1.2724 +    addi(limit_reg, limit_reg, -4); // no mismatch found in fast_loop, only 1-4 characters missing
  1.2725 +//23:
  1.2726 +   bind(Lslow_case);
  1.2727 +    mtctr(limit_reg);
  1.2728 +//24:
  1.2729 +   bind(Lslow_loop);
  1.2730 +    lhz(chr1_reg, 0, str1_reg);
  1.2731 +    lhzx(chr2_reg, str1_reg, addr_diff);
  1.2732 +    subf_(result_reg, chr2_reg, chr1_reg);
  1.2733 +    bne(CCR0, Ldone); // return chr1_reg
  1.2734 +    addi(str1_reg, str1_reg, 1*2);
  1.2735 +    bdnz(Lslow_loop);
  1.2736 +//30:
  1.2737 +   // If strings are equal up to min length, return the length difference.
  1.2738 +    mr(result_reg, cnt_diff);
  1.2739 +    nop(); // alignment
  1.2740 +//32:
  1.2741 +   // Otherwise, return the difference between the first mismatched chars.
  1.2742 +   bind(Ldone);
  1.2743 +}
  1.2744 +
  1.2745 +
  1.2746 +// Compare char[] arrays.
  1.2747 +//
  1.2748 +// str1_reg   USE only
  1.2749 +// str2_reg   USE only
  1.2750 +// cnt_reg    USE_DEF, due to tmp reg shortage
  1.2751 +// result_reg DEF only, might compromise USE only registers
  1.2752 +void MacroAssembler::char_arrays_equals(Register str1_reg, Register str2_reg, Register cnt_reg, Register result_reg,
  1.2753 +                                        Register tmp1_reg, Register tmp2_reg, Register tmp3_reg, Register tmp4_reg,
  1.2754 +                                        Register tmp5_reg) {
  1.2755 +
  1.2756 +  // Str1 may be the same register as str2 which can occur e.g. after scalar replacement.
  1.2757 +  assert_different_registers(result_reg, str1_reg, cnt_reg, tmp1_reg, tmp2_reg, tmp3_reg, tmp4_reg, tmp5_reg);
  1.2758 +  assert_different_registers(result_reg, str2_reg, cnt_reg, tmp1_reg, tmp2_reg, tmp3_reg, tmp4_reg, tmp5_reg);
  1.2759 +
  1.2760 +  // Offset 0 should be 32 byte aligned.
  1.2761 +  Label Linit_cbc, Lcbc, Lloop, Ldone_true, Ldone_false;
  1.2762 +  Register index_reg = tmp5_reg;
  1.2763 +  Register cbc_iter  = tmp4_reg;
  1.2764 +
  1.2765 +//-1:
  1.2766 +  dcbtct(str1_reg, 0x00);  // Indicate R/O access to str1.
  1.2767 +  dcbtct(str2_reg, 0x00);  // Indicate R/O access to str2.
  1.2768 +//1:
  1.2769 +  andi(cbc_iter, cnt_reg, 4-1);            // Remaining iterations after 4 java characters per iteration loop.
  1.2770 +  li(index_reg, 0); // init
  1.2771 +  li(result_reg, 0); // assume false
  1.2772 +  srwi_(tmp2_reg, cnt_reg, exact_log2(4)); // Div: 4 java characters per iteration (main loop).
  1.2773 +
  1.2774 +  cmpwi(CCR1, cbc_iter, 0);             // CCR1 = (cbc_iter==0)
  1.2775 +  beq(CCR0, Linit_cbc);                 // too short
  1.2776 +    mtctr(tmp2_reg);
  1.2777 +//8:
  1.2778 +    bind(Lloop);
  1.2779 +      ldx(tmp1_reg, str1_reg, index_reg);
  1.2780 +      ldx(tmp2_reg, str2_reg, index_reg);
  1.2781 +      cmpd(CCR0, tmp1_reg, tmp2_reg);
  1.2782 +      bne(CCR0, Ldone_false);  // Unequal char pair found -> done.
  1.2783 +      addi(index_reg, index_reg, 4*sizeof(jchar));
  1.2784 +      bdnz(Lloop);
  1.2785 +//14:
  1.2786 +  bind(Linit_cbc);
  1.2787 +  beq(CCR1, Ldone_true);
  1.2788 +    mtctr(cbc_iter);
  1.2789 +//16:
  1.2790 +    bind(Lcbc);
  1.2791 +      lhzx(tmp1_reg, str1_reg, index_reg);
  1.2792 +      lhzx(tmp2_reg, str2_reg, index_reg);
  1.2793 +      cmpw(CCR0, tmp1_reg, tmp2_reg);
  1.2794 +      bne(CCR0, Ldone_false);  // Unequal char pair found -> done.
  1.2795 +      addi(index_reg, index_reg, 1*sizeof(jchar));
  1.2796 +      bdnz(Lcbc);
  1.2797 +    nop();
  1.2798 +  bind(Ldone_true);
  1.2799 +  li(result_reg, 1);
  1.2800 +//24:
  1.2801 +  bind(Ldone_false);
  1.2802 +}
  1.2803 +
  1.2804 +
  1.2805 +void MacroAssembler::char_arrays_equalsImm(Register str1_reg, Register str2_reg, int cntval, Register result_reg,
  1.2806 +                                           Register tmp1_reg, Register tmp2_reg) {
  1.2807 +  // Str1 may be the same register as str2 which can occur e.g. after scalar replacement.
  1.2808 +  assert_different_registers(result_reg, str1_reg, tmp1_reg, tmp2_reg);
  1.2809 +  assert_different_registers(result_reg, str2_reg, tmp1_reg, tmp2_reg);
  1.2810 +  assert(sizeof(jchar) == 2, "must be");
  1.2811 +  assert(cntval >= 0 && ((cntval & 0x7fff) == cntval), "wrong immediate");
  1.2812 +
  1.2813 +  Label Ldone_false;
  1.2814 +
  1.2815 +  if (cntval < 16) { // short case
  1.2816 +    if (cntval != 0) li(result_reg, 0); // assume false
  1.2817 +
  1.2818 +    const int num_bytes = cntval*sizeof(jchar);
  1.2819 +    int index = 0;
  1.2820 +    for (int next_index; (next_index = index + 8) <= num_bytes; index = next_index) {
  1.2821 +      ld(tmp1_reg, index, str1_reg);
  1.2822 +      ld(tmp2_reg, index, str2_reg);
  1.2823 +      cmpd(CCR0, tmp1_reg, tmp2_reg);
  1.2824 +      bne(CCR0, Ldone_false);
  1.2825 +    }
  1.2826 +    if (cntval & 2) {
  1.2827 +      lwz(tmp1_reg, index, str1_reg);
  1.2828 +      lwz(tmp2_reg, index, str2_reg);
  1.2829 +      cmpw(CCR0, tmp1_reg, tmp2_reg);
  1.2830 +      bne(CCR0, Ldone_false);
  1.2831 +      index += 4;
  1.2832 +    }
  1.2833 +    if (cntval & 1) {
  1.2834 +      lhz(tmp1_reg, index, str1_reg);
  1.2835 +      lhz(tmp2_reg, index, str2_reg);
  1.2836 +      cmpw(CCR0, tmp1_reg, tmp2_reg);
  1.2837 +      bne(CCR0, Ldone_false);
  1.2838 +    }
  1.2839 +    // fallthrough: true
  1.2840 +  } else {
  1.2841 +    Label Lloop;
  1.2842 +    Register index_reg = tmp1_reg;
  1.2843 +    const int loopcnt = cntval/4;
  1.2844 +    assert(loopcnt > 0, "must be");
  1.2845 +    // Offset 0 should be 32 byte aligned.
  1.2846 +    //2:
  1.2847 +    dcbtct(str1_reg, 0x00);  // Indicate R/O access to str1.
  1.2848 +    dcbtct(str2_reg, 0x00);  // Indicate R/O access to str2.
  1.2849 +    li(tmp2_reg, loopcnt);
  1.2850 +    li(index_reg, 0); // init
  1.2851 +    li(result_reg, 0); // assume false
  1.2852 +    mtctr(tmp2_reg);
  1.2853 +    //8:
  1.2854 +    bind(Lloop);
  1.2855 +    ldx(R0, str1_reg, index_reg);
  1.2856 +    ldx(tmp2_reg, str2_reg, index_reg);
  1.2857 +    cmpd(CCR0, R0, tmp2_reg);
  1.2858 +    bne(CCR0, Ldone_false);  // Unequal char pair found -> done.
  1.2859 +    addi(index_reg, index_reg, 4*sizeof(jchar));
  1.2860 +    bdnz(Lloop);
  1.2861 +    //14:
  1.2862 +    if (cntval & 2) {
  1.2863 +      lwzx(R0, str1_reg, index_reg);
  1.2864 +      lwzx(tmp2_reg, str2_reg, index_reg);
  1.2865 +      cmpw(CCR0, R0, tmp2_reg);
  1.2866 +      bne(CCR0, Ldone_false);
  1.2867 +      if (cntval & 1) addi(index_reg, index_reg, 2*sizeof(jchar));
  1.2868 +    }
  1.2869 +    if (cntval & 1) {
  1.2870 +      lhzx(R0, str1_reg, index_reg);
  1.2871 +      lhzx(tmp2_reg, str2_reg, index_reg);
  1.2872 +      cmpw(CCR0, R0, tmp2_reg);
  1.2873 +      bne(CCR0, Ldone_false);
  1.2874 +    }
  1.2875 +    // fallthru: true
  1.2876 +  }
  1.2877 +  li(result_reg, 1);
  1.2878 +  bind(Ldone_false);
  1.2879 +}
  1.2880 +
  1.2881 +
  1.2882 +void MacroAssembler::asm_assert(bool check_equal, const char *msg, int id) {
  1.2883 +#ifdef ASSERT
  1.2884 +  Label ok;
  1.2885 +  if (check_equal) {
  1.2886 +    beq(CCR0, ok);
  1.2887 +  } else {
  1.2888 +    bne(CCR0, ok);
  1.2889 +  }
  1.2890 +  stop(msg, id);
  1.2891 +  bind(ok);
  1.2892 +#endif
  1.2893 +}
  1.2894 +
  1.2895 +void MacroAssembler::asm_assert_mems_zero(bool check_equal, int size, int mem_offset,
  1.2896 +                                          Register mem_base, const char* msg, int id) {
  1.2897 +#ifdef ASSERT
  1.2898 +  switch (size) {
  1.2899 +    case 4:
  1.2900 +      lwz(R0, mem_offset, mem_base);
  1.2901 +      cmpwi(CCR0, R0, 0);
  1.2902 +      break;
  1.2903 +    case 8:
  1.2904 +      ld(R0, mem_offset, mem_base);
  1.2905 +      cmpdi(CCR0, R0, 0);
  1.2906 +      break;
  1.2907 +    default:
  1.2908 +      ShouldNotReachHere();
  1.2909 +  }
  1.2910 +  asm_assert(check_equal, msg, id);
  1.2911 +#endif // ASSERT
  1.2912 +}
  1.2913 +
  1.2914 +void MacroAssembler::verify_thread() {
  1.2915 +  if (VerifyThread) {
  1.2916 +    unimplemented("'VerifyThread' currently not implemented on PPC");
  1.2917 +  }
  1.2918 +}
  1.2919 +
  1.2920 +// READ: oop. KILL: R0. Volatile floats perhaps.
  1.2921 +void MacroAssembler::verify_oop(Register oop, const char* msg) {
  1.2922 +  if (!VerifyOops) {
  1.2923 +    return;
  1.2924 +  }
  1.2925 +  // will be preserved.
  1.2926 +  Register tmp = R11;
  1.2927 +  assert(oop != tmp, "precondition");
  1.2928 +  unsigned int nbytes_save = 10*8; // 10 volatile gprs
  1.2929 +  address/* FunctionDescriptor** */fd =
  1.2930 +    StubRoutines::verify_oop_subroutine_entry_address();
  1.2931 +  // save tmp
  1.2932 +  mr(R0, tmp);
  1.2933 +  // kill tmp
  1.2934 +  save_LR_CR(tmp);
  1.2935 +  push_frame_abi112(nbytes_save, tmp);
  1.2936 +  // restore tmp
  1.2937 +  mr(tmp, R0);
  1.2938 +  save_volatile_gprs(R1_SP, 112); // except R0
  1.2939 +  // load FunctionDescriptor**
  1.2940 +  load_const(tmp, fd);
  1.2941 +  // load FunctionDescriptor*
  1.2942 +  ld(tmp, 0, tmp);
  1.2943 +  mr(R4_ARG2, oop);
  1.2944 +  load_const(R3_ARG1, (address)msg);
  1.2945 +  // call destination for its side effect
  1.2946 +  call_c(tmp);
  1.2947 +  restore_volatile_gprs(R1_SP, 112); // except R0
  1.2948 +  pop_frame();
  1.2949 +  // save tmp
  1.2950 +  mr(R0, tmp);
  1.2951 +  // kill tmp
  1.2952 +  restore_LR_CR(tmp);
  1.2953 +  // restore tmp
  1.2954 +  mr(tmp, R0);
  1.2955 +}
  1.2956 +
  1.2957 +const char* stop_types[] = {
  1.2958 +  "stop",
  1.2959 +  "untested",
  1.2960 +  "unimplemented",
  1.2961 +  "shouldnotreachhere"
  1.2962 +};
  1.2963 +
  1.2964 +static void stop_on_request(int tp, const char* msg) {
  1.2965 +  tty->print("PPC assembly code requires stop: (%s) %s\n", (void *)stop_types[tp%/*stop_end*/4], msg);
  1.2966 +  guarantee(false, err_msg("PPC assembly code requires stop: %s", msg));
  1.2967 +}
  1.2968 +
  1.2969 +// Call a C-function that prints output.
  1.2970 +void MacroAssembler::stop(int type, const char* msg, int id) {
  1.2971 +#ifndef PRODUCT
  1.2972 +  block_comment(err_msg("stop: %s %s {", stop_types[type%stop_end], msg));
  1.2973 +#else
  1.2974 +  block_comment("stop {");
  1.2975 +#endif
  1.2976 +
  1.2977 +  // setup arguments
  1.2978 +  load_const_optimized(R3_ARG1, type);
  1.2979 +  load_const_optimized(R4_ARG2, (void *)msg, /*tmp=*/R0);
  1.2980 +  call_VM_leaf(CAST_FROM_FN_PTR(address, stop_on_request), R3_ARG1, R4_ARG2);
  1.2981 +  illtrap();
  1.2982 +  emit_int32(id);
  1.2983 +  block_comment("} stop;");
  1.2984 +}
  1.2985 +
  1.2986 +#ifndef PRODUCT
  1.2987 +// Write pattern 0x0101010101010101 in memory region [low-before, high+after].
  1.2988 +// Val, addr are temp registers.
  1.2989 +// If low == addr, addr is killed.
  1.2990 +// High is preserved.
  1.2991 +void MacroAssembler::zap_from_to(Register low, int before, Register high, int after, Register val, Register addr) {
  1.2992 +  if (!ZapMemory) return;
  1.2993 +
  1.2994 +  assert_different_registers(low, val);
  1.2995 +
  1.2996 +  BLOCK_COMMENT("zap memory region {");
  1.2997 +  load_const_optimized(val, 0x0101010101010101);
  1.2998 +  int size = before + after;
  1.2999 +  if (low == high && size < 5 && size > 0) {
  1.3000 +    int offset = -before*BytesPerWord;
  1.3001 +    for (int i = 0; i < size; ++i) {
  1.3002 +      std(val, offset, low);
  1.3003 +      offset += (1*BytesPerWord);
  1.3004 +    }
  1.3005 +  } else {
  1.3006 +    addi(addr, low, -before*BytesPerWord);
  1.3007 +    assert_different_registers(high, val);
  1.3008 +    if (after) addi(high, high, after * BytesPerWord);
  1.3009 +    Label loop;
  1.3010 +    bind(loop);
  1.3011 +    std(val, 0, addr);
  1.3012 +    addi(addr, addr, 8);
  1.3013 +    cmpd(CCR6, addr, high);
  1.3014 +    ble(CCR6, loop);
  1.3015 +    if (after) addi(high, high, -after * BytesPerWord);  // Correct back to old value.
  1.3016 +  }
  1.3017 +  BLOCK_COMMENT("} zap memory region");
  1.3018 +}
  1.3019 +
  1.3020 +#endif // !PRODUCT

mercurial