src/cpu/ppc/vm/macroAssembler_ppc.cpp

Tue, 17 Oct 2017 12:58:25 +0800

author
aoqi
date
Tue, 17 Oct 2017 12:58:25 +0800
changeset 7994
04ff2f6cd0eb
parent 7535
7ae4e26cb1e0
child 8604
04d83ba48607
permissions
-rw-r--r--

merge

     1 /*
     2  * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
     3  * Copyright 2012, 2014 SAP AG. All rights reserved.
     4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     5  *
     6  * This code is free software; you can redistribute it and/or modify it
     7  * under the terms of the GNU General Public License version 2 only, as
     8  * published by the Free Software Foundation.
     9  *
    10  * This code is distributed in the hope that it will be useful, but WITHOUT
    11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    13  * version 2 for more details (a copy is included in the LICENSE file that
    14  * accompanied this code).
    15  *
    16  * You should have received a copy of the GNU General Public License version
    17  * 2 along with this work; if not, write to the Free Software Foundation,
    18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    19  *
    20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    21  * or visit www.oracle.com if you need additional information or have any
    22  * questions.
    23  *
    24  */
    26 #include "precompiled.hpp"
    27 #include "asm/macroAssembler.inline.hpp"
    28 #include "compiler/disassembler.hpp"
    29 #include "gc_interface/collectedHeap.inline.hpp"
    30 #include "interpreter/interpreter.hpp"
    31 #include "memory/cardTableModRefBS.hpp"
    32 #include "memory/resourceArea.hpp"
    33 #include "prims/methodHandles.hpp"
    34 #include "runtime/biasedLocking.hpp"
    35 #include "runtime/interfaceSupport.hpp"
    36 #include "runtime/objectMonitor.hpp"
    37 #include "runtime/os.hpp"
    38 #include "runtime/sharedRuntime.hpp"
    39 #include "runtime/stubRoutines.hpp"
    40 #include "utilities/macros.hpp"
    41 #if INCLUDE_ALL_GCS
    42 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
    43 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
    44 #include "gc_implementation/g1/heapRegion.hpp"
    45 #endif // INCLUDE_ALL_GCS
    47 #ifdef PRODUCT
    48 #define BLOCK_COMMENT(str) // nothing
    49 #else
    50 #define BLOCK_COMMENT(str) block_comment(str)
    51 #endif
    53 #ifdef ASSERT
    54 // On RISC, there's no benefit to verifying instruction boundaries.
    55 bool AbstractAssembler::pd_check_instruction_mark() { return false; }
    56 #endif
    58 void MacroAssembler::ld_largeoffset_unchecked(Register d, int si31, Register a, int emit_filler_nop) {
    59   assert(Assembler::is_simm(si31, 31) && si31 >= 0, "si31 out of range");
    60   if (Assembler::is_simm(si31, 16)) {
    61     ld(d, si31, a);
    62     if (emit_filler_nop) nop();
    63   } else {
    64     const int hi = MacroAssembler::largeoffset_si16_si16_hi(si31);
    65     const int lo = MacroAssembler::largeoffset_si16_si16_lo(si31);
    66     addis(d, a, hi);
    67     ld(d, lo, d);
    68   }
    69 }
    71 void MacroAssembler::ld_largeoffset(Register d, int si31, Register a, int emit_filler_nop) {
    72   assert_different_registers(d, a);
    73   ld_largeoffset_unchecked(d, si31, a, emit_filler_nop);
    74 }
    76 void MacroAssembler::load_sized_value(Register dst, RegisterOrConstant offs, Register base,
    77                                       size_t size_in_bytes, bool is_signed) {
    78   switch (size_in_bytes) {
    79   case  8:              ld(dst, offs, base);                         break;
    80   case  4:  is_signed ? lwa(dst, offs, base) : lwz(dst, offs, base); break;
    81   case  2:  is_signed ? lha(dst, offs, base) : lhz(dst, offs, base); break;
    82   case  1:  lbz(dst, offs, base); if (is_signed) extsb(dst, dst);    break; // lba doesn't exist :(
    83   default:  ShouldNotReachHere();
    84   }
    85 }
    87 void MacroAssembler::store_sized_value(Register dst, RegisterOrConstant offs, Register base,
    88                                        size_t size_in_bytes) {
    89   switch (size_in_bytes) {
    90   case  8:  std(dst, offs, base); break;
    91   case  4:  stw(dst, offs, base); break;
    92   case  2:  sth(dst, offs, base); break;
    93   case  1:  stb(dst, offs, base); break;
    94   default:  ShouldNotReachHere();
    95   }
    96 }
    98 void MacroAssembler::align(int modulus, int max, int rem) {
    99   int padding = (rem + modulus - (offset() % modulus)) % modulus;
   100   if (padding > max) return;
   101   for (int c = (padding >> 2); c > 0; --c) { nop(); }
   102 }
   104 // Issue instructions that calculate given TOC from global TOC.
   105 void MacroAssembler::calculate_address_from_global_toc(Register dst, address addr, bool hi16, bool lo16,
   106                                                        bool add_relocation, bool emit_dummy_addr) {
   107   int offset = -1;
   108   if (emit_dummy_addr) {
   109     offset = -128; // dummy address
   110   } else if (addr != (address)(intptr_t)-1) {
   111     offset = MacroAssembler::offset_to_global_toc(addr);
   112   }
   114   if (hi16) {
   115     addis(dst, R29, MacroAssembler::largeoffset_si16_si16_hi(offset));
   116   }
   117   if (lo16) {
   118     if (add_relocation) {
   119       // Relocate at the addi to avoid confusion with a load from the method's TOC.
   120       relocate(internal_word_Relocation::spec(addr));
   121     }
   122     addi(dst, dst, MacroAssembler::largeoffset_si16_si16_lo(offset));
   123   }
   124 }
   126 int MacroAssembler::patch_calculate_address_from_global_toc_at(address a, address bound, address addr) {
   127   const int offset = MacroAssembler::offset_to_global_toc(addr);
   129   const address inst2_addr = a;
   130   const int inst2 = *(int *)inst2_addr;
   132   // The relocation points to the second instruction, the addi,
   133   // and the addi reads and writes the same register dst.
   134   const int dst = inv_rt_field(inst2);
   135   assert(is_addi(inst2) && inv_ra_field(inst2) == dst, "must be addi reading and writing dst");
   137   // Now, find the preceding addis which writes to dst.
   138   int inst1 = 0;
   139   address inst1_addr = inst2_addr - BytesPerInstWord;
   140   while (inst1_addr >= bound) {
   141     inst1 = *(int *) inst1_addr;
   142     if (is_addis(inst1) && inv_rt_field(inst1) == dst) {
   143       // Stop, found the addis which writes dst.
   144       break;
   145     }
   146     inst1_addr -= BytesPerInstWord;
   147   }
   149   assert(is_addis(inst1) && inv_ra_field(inst1) == 29 /* R29 */, "source must be global TOC");
   150   set_imm((int *)inst1_addr, MacroAssembler::largeoffset_si16_si16_hi(offset));
   151   set_imm((int *)inst2_addr, MacroAssembler::largeoffset_si16_si16_lo(offset));
   152   return (int)((intptr_t)addr - (intptr_t)inst1_addr);
   153 }
   155 address MacroAssembler::get_address_of_calculate_address_from_global_toc_at(address a, address bound) {
   156   const address inst2_addr = a;
   157   const int inst2 = *(int *)inst2_addr;
   159   // The relocation points to the second instruction, the addi,
   160   // and the addi reads and writes the same register dst.
   161   const int dst = inv_rt_field(inst2);
   162   assert(is_addi(inst2) && inv_ra_field(inst2) == dst, "must be addi reading and writing dst");
   164   // Now, find the preceding addis which writes to dst.
   165   int inst1 = 0;
   166   address inst1_addr = inst2_addr - BytesPerInstWord;
   167   while (inst1_addr >= bound) {
   168     inst1 = *(int *) inst1_addr;
   169     if (is_addis(inst1) && inv_rt_field(inst1) == dst) {
   170       // stop, found the addis which writes dst
   171       break;
   172     }
   173     inst1_addr -= BytesPerInstWord;
   174   }
   176   assert(is_addis(inst1) && inv_ra_field(inst1) == 29 /* R29 */, "source must be global TOC");
   178   int offset = (get_imm(inst1_addr, 0) << 16) + get_imm(inst2_addr, 0);
   179   // -1 is a special case
   180   if (offset == -1) {
   181     return (address)(intptr_t)-1;
   182   } else {
   183     return global_toc() + offset;
   184   }
   185 }
   187 #ifdef _LP64
   188 // Patch compressed oops or klass constants.
   189 // Assembler sequence is
   190 // 1) compressed oops:
   191 //    lis  rx = const.hi
   192 //    ori rx = rx | const.lo
   193 // 2) compressed klass:
   194 //    lis  rx = const.hi
   195 //    clrldi rx = rx & 0xFFFFffff // clearMS32b, optional
   196 //    ori rx = rx | const.lo
   197 // Clrldi will be passed by.
   198 int MacroAssembler::patch_set_narrow_oop(address a, address bound, narrowOop data) {
   199   assert(UseCompressedOops, "Should only patch compressed oops");
   201   const address inst2_addr = a;
   202   const int inst2 = *(int *)inst2_addr;
   204   // The relocation points to the second instruction, the ori,
   205   // and the ori reads and writes the same register dst.
   206   const int dst = inv_rta_field(inst2);
   207   assert(is_ori(inst2) && inv_rs_field(inst2) == dst, "must be ori reading and writing dst");
   208   // Now, find the preceding addis which writes to dst.
   209   int inst1 = 0;
   210   address inst1_addr = inst2_addr - BytesPerInstWord;
   211   bool inst1_found = false;
   212   while (inst1_addr >= bound) {
   213     inst1 = *(int *)inst1_addr;
   214     if (is_lis(inst1) && inv_rs_field(inst1) == dst) { inst1_found = true; break; }
   215     inst1_addr -= BytesPerInstWord;
   216   }
   217   assert(inst1_found, "inst is not lis");
   219   int xc = (data >> 16) & 0xffff;
   220   int xd = (data >>  0) & 0xffff;
   222   set_imm((int *)inst1_addr, (short)(xc)); // see enc_load_con_narrow_hi/_lo
   223   set_imm((int *)inst2_addr,        (xd)); // unsigned int
   224   return (int)((intptr_t)inst2_addr - (intptr_t)inst1_addr);
   225 }
   227 // Get compressed oop or klass constant.
   228 narrowOop MacroAssembler::get_narrow_oop(address a, address bound) {
   229   assert(UseCompressedOops, "Should only patch compressed oops");
   231   const address inst2_addr = a;
   232   const int inst2 = *(int *)inst2_addr;
   234   // The relocation points to the second instruction, the ori,
   235   // and the ori reads and writes the same register dst.
   236   const int dst = inv_rta_field(inst2);
   237   assert(is_ori(inst2) && inv_rs_field(inst2) == dst, "must be ori reading and writing dst");
   238   // Now, find the preceding lis which writes to dst.
   239   int inst1 = 0;
   240   address inst1_addr = inst2_addr - BytesPerInstWord;
   241   bool inst1_found = false;
   243   while (inst1_addr >= bound) {
   244     inst1 = *(int *) inst1_addr;
   245     if (is_lis(inst1) && inv_rs_field(inst1) == dst) { inst1_found = true; break;}
   246     inst1_addr -= BytesPerInstWord;
   247   }
   248   assert(inst1_found, "inst is not lis");
   250   uint xl = ((unsigned int) (get_imm(inst2_addr, 0) & 0xffff));
   251   uint xh = (((get_imm(inst1_addr, 0)) & 0xffff) << 16);
   253   return (int) (xl | xh);
   254 }
   255 #endif // _LP64
   257 void MacroAssembler::load_const_from_method_toc(Register dst, AddressLiteral& a, Register toc) {
   258   int toc_offset = 0;
   259   // Use RelocationHolder::none for the constant pool entry, otherwise
   260   // we will end up with a failing NativeCall::verify(x) where x is
   261   // the address of the constant pool entry.
   262   // FIXME: We should insert relocation information for oops at the constant
   263   // pool entries instead of inserting it at the loads; patching of a constant
   264   // pool entry should be less expensive.
   265   address oop_address = address_constant((address)a.value(), RelocationHolder::none);
   266   // Relocate at the pc of the load.
   267   relocate(a.rspec());
   268   toc_offset = (int)(oop_address - code()->consts()->start());
   269   ld_largeoffset_unchecked(dst, toc_offset, toc, true);
   270 }
   272 bool MacroAssembler::is_load_const_from_method_toc_at(address a) {
   273   const address inst1_addr = a;
   274   const int inst1 = *(int *)inst1_addr;
   276    // The relocation points to the ld or the addis.
   277    return (is_ld(inst1)) ||
   278           (is_addis(inst1) && inv_ra_field(inst1) != 0);
   279 }
   281 int MacroAssembler::get_offset_of_load_const_from_method_toc_at(address a) {
   282   assert(is_load_const_from_method_toc_at(a), "must be load_const_from_method_toc");
   284   const address inst1_addr = a;
   285   const int inst1 = *(int *)inst1_addr;
   287   if (is_ld(inst1)) {
   288     return inv_d1_field(inst1);
   289   } else if (is_addis(inst1)) {
   290     const int dst = inv_rt_field(inst1);
   292     // Now, find the succeeding ld which reads and writes to dst.
   293     address inst2_addr = inst1_addr + BytesPerInstWord;
   294     int inst2 = 0;
   295     while (true) {
   296       inst2 = *(int *) inst2_addr;
   297       if (is_ld(inst2) && inv_ra_field(inst2) == dst && inv_rt_field(inst2) == dst) {
   298         // Stop, found the ld which reads and writes dst.
   299         break;
   300       }
   301       inst2_addr += BytesPerInstWord;
   302     }
   303     return (inv_d1_field(inst1) << 16) + inv_d1_field(inst2);
   304   }
   305   ShouldNotReachHere();
   306   return 0;
   307 }
   309 // Get the constant from a `load_const' sequence.
   310 long MacroAssembler::get_const(address a) {
   311   assert(is_load_const_at(a), "not a load of a constant");
   312   const int *p = (const int*) a;
   313   unsigned long x = (((unsigned long) (get_imm(a,0) & 0xffff)) << 48);
   314   if (is_ori(*(p+1))) {
   315     x |= (((unsigned long) (get_imm(a,1) & 0xffff)) << 32);
   316     x |= (((unsigned long) (get_imm(a,3) & 0xffff)) << 16);
   317     x |= (((unsigned long) (get_imm(a,4) & 0xffff)));
   318   } else if (is_lis(*(p+1))) {
   319     x |= (((unsigned long) (get_imm(a,2) & 0xffff)) << 32);
   320     x |= (((unsigned long) (get_imm(a,1) & 0xffff)) << 16);
   321     x |= (((unsigned long) (get_imm(a,3) & 0xffff)));
   322   } else {
   323     ShouldNotReachHere();
   324     return (long) 0;
   325   }
   326   return (long) x;
   327 }
   329 // Patch the 64 bit constant of a `load_const' sequence. This is a low
   330 // level procedure. It neither flushes the instruction cache nor is it
   331 // mt safe.
   332 void MacroAssembler::patch_const(address a, long x) {
   333   assert(is_load_const_at(a), "not a load of a constant");
   334   int *p = (int*) a;
   335   if (is_ori(*(p+1))) {
   336     set_imm(0 + p, (x >> 48) & 0xffff);
   337     set_imm(1 + p, (x >> 32) & 0xffff);
   338     set_imm(3 + p, (x >> 16) & 0xffff);
   339     set_imm(4 + p, x & 0xffff);
   340   } else if (is_lis(*(p+1))) {
   341     set_imm(0 + p, (x >> 48) & 0xffff);
   342     set_imm(2 + p, (x >> 32) & 0xffff);
   343     set_imm(1 + p, (x >> 16) & 0xffff);
   344     set_imm(3 + p, x & 0xffff);
   345   } else {
   346     ShouldNotReachHere();
   347   }
   348 }
   350 AddressLiteral MacroAssembler::allocate_metadata_address(Metadata* obj) {
   351   assert(oop_recorder() != NULL, "this assembler needs a Recorder");
   352   int index = oop_recorder()->allocate_metadata_index(obj);
   353   RelocationHolder rspec = metadata_Relocation::spec(index);
   354   return AddressLiteral((address)obj, rspec);
   355 }
   357 AddressLiteral MacroAssembler::constant_metadata_address(Metadata* obj) {
   358   assert(oop_recorder() != NULL, "this assembler needs a Recorder");
   359   int index = oop_recorder()->find_index(obj);
   360   RelocationHolder rspec = metadata_Relocation::spec(index);
   361   return AddressLiteral((address)obj, rspec);
   362 }
   364 AddressLiteral MacroAssembler::allocate_oop_address(jobject obj) {
   365   assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
   366   int oop_index = oop_recorder()->allocate_oop_index(obj);
   367   return AddressLiteral(address(obj), oop_Relocation::spec(oop_index));
   368 }
   370 AddressLiteral MacroAssembler::constant_oop_address(jobject obj) {
   371   assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
   372   int oop_index = oop_recorder()->find_index(obj);
   373   return AddressLiteral(address(obj), oop_Relocation::spec(oop_index));
   374 }
   376 RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr,
   377                                                       Register tmp, int offset) {
   378   intptr_t value = *delayed_value_addr;
   379   if (value != 0) {
   380     return RegisterOrConstant(value + offset);
   381   }
   383   // Load indirectly to solve generation ordering problem.
   384   // static address, no relocation
   385   int simm16_offset = load_const_optimized(tmp, delayed_value_addr, noreg, true);
   386   ld(tmp, simm16_offset, tmp); // must be aligned ((xa & 3) == 0)
   388   if (offset != 0) {
   389     addi(tmp, tmp, offset);
   390   }
   392   return RegisterOrConstant(tmp);
   393 }
   395 #ifndef PRODUCT
   396 void MacroAssembler::pd_print_patched_instruction(address branch) {
   397   Unimplemented(); // TODO: PPC port
   398 }
   399 #endif // ndef PRODUCT
   401 // Conditional far branch for destinations encodable in 24+2 bits.
   402 void MacroAssembler::bc_far(int boint, int biint, Label& dest, int optimize) {
   404   // If requested by flag optimize, relocate the bc_far as a
   405   // runtime_call and prepare for optimizing it when the code gets
   406   // relocated.
   407   if (optimize == bc_far_optimize_on_relocate) {
   408     relocate(relocInfo::runtime_call_type);
   409   }
   411   // variant 2:
   412   //
   413   //    b!cxx SKIP
   414   //    bxx   DEST
   415   //  SKIP:
   416   //
   418   const int opposite_boint = add_bhint_to_boint(opposite_bhint(inv_boint_bhint(boint)),
   419                                                 opposite_bcond(inv_boint_bcond(boint)));
   421   // We emit two branches.
   422   // First, a conditional branch which jumps around the far branch.
   423   const address not_taken_pc = pc() + 2 * BytesPerInstWord;
   424   const address bc_pc        = pc();
   425   bc(opposite_boint, biint, not_taken_pc);
   427   const int bc_instr = *(int*)bc_pc;
   428   assert(not_taken_pc == (address)inv_bd_field(bc_instr, (intptr_t)bc_pc), "postcondition");
   429   assert(opposite_boint == inv_bo_field(bc_instr), "postcondition");
   430   assert(boint == add_bhint_to_boint(opposite_bhint(inv_boint_bhint(inv_bo_field(bc_instr))),
   431                                      opposite_bcond(inv_boint_bcond(inv_bo_field(bc_instr)))),
   432          "postcondition");
   433   assert(biint == inv_bi_field(bc_instr), "postcondition");
   435   // Second, an unconditional far branch which jumps to dest.
   436   // Note: target(dest) remembers the current pc (see CodeSection::target)
   437   //       and returns the current pc if the label is not bound yet; when
   438   //       the label gets bound, the unconditional far branch will be patched.
   439   const address target_pc = target(dest);
   440   const address b_pc  = pc();
   441   b(target_pc);
   443   assert(not_taken_pc == pc(),                     "postcondition");
   444   assert(dest.is_bound() || target_pc == b_pc, "postcondition");
   445 }
   447 bool MacroAssembler::is_bc_far_at(address instruction_addr) {
   448   return is_bc_far_variant1_at(instruction_addr) ||
   449          is_bc_far_variant2_at(instruction_addr) ||
   450          is_bc_far_variant3_at(instruction_addr);
   451 }
   453 address MacroAssembler::get_dest_of_bc_far_at(address instruction_addr) {
   454   if (is_bc_far_variant1_at(instruction_addr)) {
   455     const address instruction_1_addr = instruction_addr;
   456     const int instruction_1 = *(int*)instruction_1_addr;
   457     return (address)inv_bd_field(instruction_1, (intptr_t)instruction_1_addr);
   458   } else if (is_bc_far_variant2_at(instruction_addr)) {
   459     const address instruction_2_addr = instruction_addr + 4;
   460     return bxx_destination(instruction_2_addr);
   461   } else if (is_bc_far_variant3_at(instruction_addr)) {
   462     return instruction_addr + 8;
   463   }
   464   // variant 4 ???
   465   ShouldNotReachHere();
   466   return NULL;
   467 }
   468 void MacroAssembler::set_dest_of_bc_far_at(address instruction_addr, address dest) {
   470   if (is_bc_far_variant3_at(instruction_addr)) {
   471     // variant 3, far cond branch to the next instruction, already patched to nops:
   472     //
   473     //    nop
   474     //    endgroup
   475     //  SKIP/DEST:
   476     //
   477     return;
   478   }
   480   // first, extract boint and biint from the current branch
   481   int boint = 0;
   482   int biint = 0;
   484   ResourceMark rm;
   485   const int code_size = 2 * BytesPerInstWord;
   486   CodeBuffer buf(instruction_addr, code_size);
   487   MacroAssembler masm(&buf);
   488   if (is_bc_far_variant2_at(instruction_addr) && dest == instruction_addr + 8) {
   489     // Far branch to next instruction: Optimize it by patching nops (produce variant 3).
   490     masm.nop();
   491     masm.endgroup();
   492   } else {
   493     if (is_bc_far_variant1_at(instruction_addr)) {
   494       // variant 1, the 1st instruction contains the destination address:
   495       //
   496       //    bcxx  DEST
   497       //    endgroup
   498       //
   499       const int instruction_1 = *(int*)(instruction_addr);
   500       boint = inv_bo_field(instruction_1);
   501       biint = inv_bi_field(instruction_1);
   502     } else if (is_bc_far_variant2_at(instruction_addr)) {
   503       // variant 2, the 2nd instruction contains the destination address:
   504       //
   505       //    b!cxx SKIP
   506       //    bxx   DEST
   507       //  SKIP:
   508       //
   509       const int instruction_1 = *(int*)(instruction_addr);
   510       boint = add_bhint_to_boint(opposite_bhint(inv_boint_bhint(inv_bo_field(instruction_1))),
   511           opposite_bcond(inv_boint_bcond(inv_bo_field(instruction_1))));
   512       biint = inv_bi_field(instruction_1);
   513     } else {
   514       // variant 4???
   515       ShouldNotReachHere();
   516     }
   518     // second, set the new branch destination and optimize the code
   519     if (dest != instruction_addr + 4 && // the bc_far is still unbound!
   520         masm.is_within_range_of_bcxx(dest, instruction_addr)) {
   521       // variant 1:
   522       //
   523       //    bcxx  DEST
   524       //    endgroup
   525       //
   526       masm.bc(boint, biint, dest);
   527       masm.endgroup();
   528     } else {
   529       // variant 2:
   530       //
   531       //    b!cxx SKIP
   532       //    bxx   DEST
   533       //  SKIP:
   534       //
   535       const int opposite_boint = add_bhint_to_boint(opposite_bhint(inv_boint_bhint(boint)),
   536                                                     opposite_bcond(inv_boint_bcond(boint)));
   537       const address not_taken_pc = masm.pc() + 2 * BytesPerInstWord;
   538       masm.bc(opposite_boint, biint, not_taken_pc);
   539       masm.b(dest);
   540     }
   541   }
   542   ICache::ppc64_flush_icache_bytes(instruction_addr, code_size);
   543 }
   545 // Emit a NOT mt-safe patchable 64 bit absolute call/jump.
   546 void MacroAssembler::bxx64_patchable(address dest, relocInfo::relocType rt, bool link) {
   547   // get current pc
   548   uint64_t start_pc = (uint64_t) pc();
   550   const address pc_of_bl = (address) (start_pc + (6*BytesPerInstWord)); // bl is last
   551   const address pc_of_b  = (address) (start_pc + (0*BytesPerInstWord)); // b is first
   553   // relocate here
   554   if (rt != relocInfo::none) {
   555     relocate(rt);
   556   }
   558   if ( ReoptimizeCallSequences &&
   559        (( link && is_within_range_of_b(dest, pc_of_bl)) ||
   560         (!link && is_within_range_of_b(dest, pc_of_b)))) {
   561     // variant 2:
   562     // Emit an optimized, pc-relative call/jump.
   564     if (link) {
   565       // some padding
   566       nop();
   567       nop();
   568       nop();
   569       nop();
   570       nop();
   571       nop();
   573       // do the call
   574       assert(pc() == pc_of_bl, "just checking");
   575       bl(dest, relocInfo::none);
   576     } else {
   577       // do the jump
   578       assert(pc() == pc_of_b, "just checking");
   579       b(dest, relocInfo::none);
   581       // some padding
   582       nop();
   583       nop();
   584       nop();
   585       nop();
   586       nop();
   587       nop();
   588     }
   590     // Assert that we can identify the emitted call/jump.
   591     assert(is_bxx64_patchable_variant2_at((address)start_pc, link),
   592            "can't identify emitted call");
   593   } else {
   594     // variant 1:
   595 #if defined(ABI_ELFv2)
   596     nop();
   597     calculate_address_from_global_toc(R12, dest, true, true, false);
   598     mtctr(R12);
   599     nop();
   600     nop();
   601 #else
   602     mr(R0, R11);  // spill R11 -> R0.
   604     // Load the destination address into CTR,
   605     // calculate destination relative to global toc.
   606     calculate_address_from_global_toc(R11, dest, true, true, false);
   608     mtctr(R11);
   609     mr(R11, R0);  // spill R11 <- R0.
   610     nop();
   611 #endif
   613     // do the call/jump
   614     if (link) {
   615       bctrl();
   616     } else{
   617       bctr();
   618     }
   619     // Assert that we can identify the emitted call/jump.
   620     assert(is_bxx64_patchable_variant1b_at((address)start_pc, link),
   621            "can't identify emitted call");
   622   }
   624   // Assert that we can identify the emitted call/jump.
   625   assert(is_bxx64_patchable_at((address)start_pc, link),
   626          "can't identify emitted call");
   627   assert(get_dest_of_bxx64_patchable_at((address)start_pc, link) == dest,
   628          "wrong encoding of dest address");
   629 }
   631 // Identify a bxx64_patchable instruction.
   632 bool MacroAssembler::is_bxx64_patchable_at(address instruction_addr, bool link) {
   633   return is_bxx64_patchable_variant1b_at(instruction_addr, link)
   634     //|| is_bxx64_patchable_variant1_at(instruction_addr, link)
   635       || is_bxx64_patchable_variant2_at(instruction_addr, link);
   636 }
   638 // Does the call64_patchable instruction use a pc-relative encoding of
   639 // the call destination?
   640 bool MacroAssembler::is_bxx64_patchable_pcrelative_at(address instruction_addr, bool link) {
   641   // variant 2 is pc-relative
   642   return is_bxx64_patchable_variant2_at(instruction_addr, link);
   643 }
   645 // Identify variant 1.
   646 bool MacroAssembler::is_bxx64_patchable_variant1_at(address instruction_addr, bool link) {
   647   unsigned int* instr = (unsigned int*) instruction_addr;
   648   return (link ? is_bctrl(instr[6]) : is_bctr(instr[6])) // bctr[l]
   649       && is_mtctr(instr[5]) // mtctr
   650     && is_load_const_at(instruction_addr);
   651 }
   653 // Identify variant 1b: load destination relative to global toc.
   654 bool MacroAssembler::is_bxx64_patchable_variant1b_at(address instruction_addr, bool link) {
   655   unsigned int* instr = (unsigned int*) instruction_addr;
   656   return (link ? is_bctrl(instr[6]) : is_bctr(instr[6])) // bctr[l]
   657     && is_mtctr(instr[3]) // mtctr
   658     && is_calculate_address_from_global_toc_at(instruction_addr + 2*BytesPerInstWord, instruction_addr);
   659 }
   661 // Identify variant 2.
   662 bool MacroAssembler::is_bxx64_patchable_variant2_at(address instruction_addr, bool link) {
   663   unsigned int* instr = (unsigned int*) instruction_addr;
   664   if (link) {
   665     return is_bl (instr[6])  // bl dest is last
   666       && is_nop(instr[0])  // nop
   667       && is_nop(instr[1])  // nop
   668       && is_nop(instr[2])  // nop
   669       && is_nop(instr[3])  // nop
   670       && is_nop(instr[4])  // nop
   671       && is_nop(instr[5]); // nop
   672   } else {
   673     return is_b  (instr[0])  // b  dest is first
   674       && is_nop(instr[1])  // nop
   675       && is_nop(instr[2])  // nop
   676       && is_nop(instr[3])  // nop
   677       && is_nop(instr[4])  // nop
   678       && is_nop(instr[5])  // nop
   679       && is_nop(instr[6]); // nop
   680   }
   681 }
   683 // Set dest address of a bxx64_patchable instruction.
   684 void MacroAssembler::set_dest_of_bxx64_patchable_at(address instruction_addr, address dest, bool link) {
   685   ResourceMark rm;
   686   int code_size = MacroAssembler::bxx64_patchable_size;
   687   CodeBuffer buf(instruction_addr, code_size);
   688   MacroAssembler masm(&buf);
   689   masm.bxx64_patchable(dest, relocInfo::none, link);
   690   ICache::ppc64_flush_icache_bytes(instruction_addr, code_size);
   691 }
   693 // Get dest address of a bxx64_patchable instruction.
   694 address MacroAssembler::get_dest_of_bxx64_patchable_at(address instruction_addr, bool link) {
   695   if (is_bxx64_patchable_variant1_at(instruction_addr, link)) {
   696     return (address) (unsigned long) get_const(instruction_addr);
   697   } else if (is_bxx64_patchable_variant2_at(instruction_addr, link)) {
   698     unsigned int* instr = (unsigned int*) instruction_addr;
   699     if (link) {
   700       const int instr_idx = 6; // bl is last
   701       int branchoffset = branch_destination(instr[instr_idx], 0);
   702       return instruction_addr + branchoffset + instr_idx*BytesPerInstWord;
   703     } else {
   704       const int instr_idx = 0; // b is first
   705       int branchoffset = branch_destination(instr[instr_idx], 0);
   706       return instruction_addr + branchoffset + instr_idx*BytesPerInstWord;
   707     }
   708   // Load dest relative to global toc.
   709   } else if (is_bxx64_patchable_variant1b_at(instruction_addr, link)) {
   710     return get_address_of_calculate_address_from_global_toc_at(instruction_addr + 2*BytesPerInstWord,
   711                                                                instruction_addr);
   712   } else {
   713     ShouldNotReachHere();
   714     return NULL;
   715   }
   716 }
   718 // Uses ordering which corresponds to ABI:
   719 //    _savegpr0_14:  std  r14,-144(r1)
   720 //    _savegpr0_15:  std  r15,-136(r1)
   721 //    _savegpr0_16:  std  r16,-128(r1)
   722 void MacroAssembler::save_nonvolatile_gprs(Register dst, int offset) {
   723   std(R14, offset, dst);   offset += 8;
   724   std(R15, offset, dst);   offset += 8;
   725   std(R16, offset, dst);   offset += 8;
   726   std(R17, offset, dst);   offset += 8;
   727   std(R18, offset, dst);   offset += 8;
   728   std(R19, offset, dst);   offset += 8;
   729   std(R20, offset, dst);   offset += 8;
   730   std(R21, offset, dst);   offset += 8;
   731   std(R22, offset, dst);   offset += 8;
   732   std(R23, offset, dst);   offset += 8;
   733   std(R24, offset, dst);   offset += 8;
   734   std(R25, offset, dst);   offset += 8;
   735   std(R26, offset, dst);   offset += 8;
   736   std(R27, offset, dst);   offset += 8;
   737   std(R28, offset, dst);   offset += 8;
   738   std(R29, offset, dst);   offset += 8;
   739   std(R30, offset, dst);   offset += 8;
   740   std(R31, offset, dst);   offset += 8;
   742   stfd(F14, offset, dst);   offset += 8;
   743   stfd(F15, offset, dst);   offset += 8;
   744   stfd(F16, offset, dst);   offset += 8;
   745   stfd(F17, offset, dst);   offset += 8;
   746   stfd(F18, offset, dst);   offset += 8;
   747   stfd(F19, offset, dst);   offset += 8;
   748   stfd(F20, offset, dst);   offset += 8;
   749   stfd(F21, offset, dst);   offset += 8;
   750   stfd(F22, offset, dst);   offset += 8;
   751   stfd(F23, offset, dst);   offset += 8;
   752   stfd(F24, offset, dst);   offset += 8;
   753   stfd(F25, offset, dst);   offset += 8;
   754   stfd(F26, offset, dst);   offset += 8;
   755   stfd(F27, offset, dst);   offset += 8;
   756   stfd(F28, offset, dst);   offset += 8;
   757   stfd(F29, offset, dst);   offset += 8;
   758   stfd(F30, offset, dst);   offset += 8;
   759   stfd(F31, offset, dst);
   760 }
   762 // Uses ordering which corresponds to ABI:
   763 //    _restgpr0_14:  ld   r14,-144(r1)
   764 //    _restgpr0_15:  ld   r15,-136(r1)
   765 //    _restgpr0_16:  ld   r16,-128(r1)
   766 void MacroAssembler::restore_nonvolatile_gprs(Register src, int offset) {
   767   ld(R14, offset, src);   offset += 8;
   768   ld(R15, offset, src);   offset += 8;
   769   ld(R16, offset, src);   offset += 8;
   770   ld(R17, offset, src);   offset += 8;
   771   ld(R18, offset, src);   offset += 8;
   772   ld(R19, offset, src);   offset += 8;
   773   ld(R20, offset, src);   offset += 8;
   774   ld(R21, offset, src);   offset += 8;
   775   ld(R22, offset, src);   offset += 8;
   776   ld(R23, offset, src);   offset += 8;
   777   ld(R24, offset, src);   offset += 8;
   778   ld(R25, offset, src);   offset += 8;
   779   ld(R26, offset, src);   offset += 8;
   780   ld(R27, offset, src);   offset += 8;
   781   ld(R28, offset, src);   offset += 8;
   782   ld(R29, offset, src);   offset += 8;
   783   ld(R30, offset, src);   offset += 8;
   784   ld(R31, offset, src);   offset += 8;
   786   // FP registers
   787   lfd(F14, offset, src);   offset += 8;
   788   lfd(F15, offset, src);   offset += 8;
   789   lfd(F16, offset, src);   offset += 8;
   790   lfd(F17, offset, src);   offset += 8;
   791   lfd(F18, offset, src);   offset += 8;
   792   lfd(F19, offset, src);   offset += 8;
   793   lfd(F20, offset, src);   offset += 8;
   794   lfd(F21, offset, src);   offset += 8;
   795   lfd(F22, offset, src);   offset += 8;
   796   lfd(F23, offset, src);   offset += 8;
   797   lfd(F24, offset, src);   offset += 8;
   798   lfd(F25, offset, src);   offset += 8;
   799   lfd(F26, offset, src);   offset += 8;
   800   lfd(F27, offset, src);   offset += 8;
   801   lfd(F28, offset, src);   offset += 8;
   802   lfd(F29, offset, src);   offset += 8;
   803   lfd(F30, offset, src);   offset += 8;
   804   lfd(F31, offset, src);
   805 }
   807 // For verify_oops.
   808 void MacroAssembler::save_volatile_gprs(Register dst, int offset) {
   809   std(R2,  offset, dst);   offset += 8;
   810   std(R3,  offset, dst);   offset += 8;
   811   std(R4,  offset, dst);   offset += 8;
   812   std(R5,  offset, dst);   offset += 8;
   813   std(R6,  offset, dst);   offset += 8;
   814   std(R7,  offset, dst);   offset += 8;
   815   std(R8,  offset, dst);   offset += 8;
   816   std(R9,  offset, dst);   offset += 8;
   817   std(R10, offset, dst);   offset += 8;
   818   std(R11, offset, dst);   offset += 8;
   819   std(R12, offset, dst);
   820 }
   822 // For verify_oops.
   823 void MacroAssembler::restore_volatile_gprs(Register src, int offset) {
   824   ld(R2,  offset, src);   offset += 8;
   825   ld(R3,  offset, src);   offset += 8;
   826   ld(R4,  offset, src);   offset += 8;
   827   ld(R5,  offset, src);   offset += 8;
   828   ld(R6,  offset, src);   offset += 8;
   829   ld(R7,  offset, src);   offset += 8;
   830   ld(R8,  offset, src);   offset += 8;
   831   ld(R9,  offset, src);   offset += 8;
   832   ld(R10, offset, src);   offset += 8;
   833   ld(R11, offset, src);   offset += 8;
   834   ld(R12, offset, src);
   835 }
   837 void MacroAssembler::save_LR_CR(Register tmp) {
   838   mfcr(tmp);
   839   std(tmp, _abi(cr), R1_SP);
   840   mflr(tmp);
   841   std(tmp, _abi(lr), R1_SP);
   842   // Tmp must contain lr on exit! (see return_addr and prolog in ppc64.ad)
   843 }
   845 void MacroAssembler::restore_LR_CR(Register tmp) {
   846   assert(tmp != R1_SP, "must be distinct");
   847   ld(tmp, _abi(lr), R1_SP);
   848   mtlr(tmp);
   849   ld(tmp, _abi(cr), R1_SP);
   850   mtcr(tmp);
   851 }
   853 address MacroAssembler::get_PC_trash_LR(Register result) {
   854   Label L;
   855   bl(L);
   856   bind(L);
   857   address lr_pc = pc();
   858   mflr(result);
   859   return lr_pc;
   860 }
   862 void MacroAssembler::resize_frame(Register offset, Register tmp) {
   863 #ifdef ASSERT
   864   assert_different_registers(offset, tmp, R1_SP);
   865   andi_(tmp, offset, frame::alignment_in_bytes-1);
   866   asm_assert_eq("resize_frame: unaligned", 0x204);
   867 #endif
   869   // tmp <- *(SP)
   870   ld(tmp, _abi(callers_sp), R1_SP);
   871   // addr <- SP + offset;
   872   // *(addr) <- tmp;
   873   // SP <- addr
   874   stdux(tmp, R1_SP, offset);
   875 }
   877 void MacroAssembler::resize_frame(int offset, Register tmp) {
   878   assert(is_simm(offset, 16), "too big an offset");
   879   assert_different_registers(tmp, R1_SP);
   880   assert((offset & (frame::alignment_in_bytes-1))==0, "resize_frame: unaligned");
   881   // tmp <- *(SP)
   882   ld(tmp, _abi(callers_sp), R1_SP);
   883   // addr <- SP + offset;
   884   // *(addr) <- tmp;
   885   // SP <- addr
   886   stdu(tmp, offset, R1_SP);
   887 }
   889 void MacroAssembler::resize_frame_absolute(Register addr, Register tmp1, Register tmp2) {
   890   // (addr == tmp1) || (addr == tmp2) is allowed here!
   891   assert(tmp1 != tmp2, "must be distinct");
   893   // compute offset w.r.t. current stack pointer
   894   // tmp_1 <- addr - SP (!)
   895   subf(tmp1, R1_SP, addr);
   897   // atomically update SP keeping back link.
   898   resize_frame(tmp1/* offset */, tmp2/* tmp */);
   899 }
   901 void MacroAssembler::push_frame(Register bytes, Register tmp) {
   902 #ifdef ASSERT
   903   assert(bytes != R0, "r0 not allowed here");
   904   andi_(R0, bytes, frame::alignment_in_bytes-1);
   905   asm_assert_eq("push_frame(Reg, Reg): unaligned", 0x203);
   906 #endif
   907   neg(tmp, bytes);
   908   stdux(R1_SP, R1_SP, tmp);
   909 }
   911 // Push a frame of size `bytes'.
   912 void MacroAssembler::push_frame(unsigned int bytes, Register tmp) {
   913   long offset = align_addr(bytes, frame::alignment_in_bytes);
   914   if (is_simm(-offset, 16)) {
   915     stdu(R1_SP, -offset, R1_SP);
   916   } else {
   917     load_const(tmp, -offset);
   918     stdux(R1_SP, R1_SP, tmp);
   919   }
   920 }
   922 // Push a frame of size `bytes' plus abi_reg_args on top.
   923 void MacroAssembler::push_frame_reg_args(unsigned int bytes, Register tmp) {
   924   push_frame(bytes + frame::abi_reg_args_size, tmp);
   925 }
   927 // Setup up a new C frame with a spill area for non-volatile GPRs and
   928 // additional space for local variables.
   929 void MacroAssembler::push_frame_reg_args_nonvolatiles(unsigned int bytes,
   930                                                       Register tmp) {
   931   push_frame(bytes + frame::abi_reg_args_size + frame::spill_nonvolatiles_size, tmp);
   932 }
   934 // Pop current C frame.
   935 void MacroAssembler::pop_frame() {
   936   ld(R1_SP, _abi(callers_sp), R1_SP);
   937 }
   939 #if defined(ABI_ELFv2)
   940 address MacroAssembler::branch_to(Register r_function_entry, bool and_link) {
   941   // TODO(asmundak): make sure the caller uses R12 as function descriptor
   942   // most of the times.
   943   if (R12 != r_function_entry) {
   944     mr(R12, r_function_entry);
   945   }
   946   mtctr(R12);
   947   // Do a call or a branch.
   948   if (and_link) {
   949     bctrl();
   950   } else {
   951     bctr();
   952   }
   953   _last_calls_return_pc = pc();
   955   return _last_calls_return_pc;
   956 }
   958 // Call a C function via a function descriptor and use full C
   959 // calling conventions. Updates and returns _last_calls_return_pc.
   960 address MacroAssembler::call_c(Register r_function_entry) {
   961   return branch_to(r_function_entry, /*and_link=*/true);
   962 }
   964 // For tail calls: only branch, don't link, so callee returns to caller of this function.
   965 address MacroAssembler::call_c_and_return_to_caller(Register r_function_entry) {
   966   return branch_to(r_function_entry, /*and_link=*/false);
   967 }
   969 address MacroAssembler::call_c(address function_entry, relocInfo::relocType rt) {
   970   load_const(R12, function_entry, R0);
   971   return branch_to(R12,  /*and_link=*/true);
   972 }
   974 #else
   975 // Generic version of a call to C function via a function descriptor
   976 // with variable support for C calling conventions (TOC, ENV, etc.).
   977 // Updates and returns _last_calls_return_pc.
   978 address MacroAssembler::branch_to(Register function_descriptor, bool and_link, bool save_toc_before_call,
   979                                   bool restore_toc_after_call, bool load_toc_of_callee, bool load_env_of_callee) {
   980   // we emit standard ptrgl glue code here
   981   assert((function_descriptor != R0), "function_descriptor cannot be R0");
   983   // retrieve necessary entries from the function descriptor
   984   ld(R0, in_bytes(FunctionDescriptor::entry_offset()), function_descriptor);
   985   mtctr(R0);
   987   if (load_toc_of_callee) {
   988     ld(R2_TOC, in_bytes(FunctionDescriptor::toc_offset()), function_descriptor);
   989   }
   990   if (load_env_of_callee) {
   991     ld(R11, in_bytes(FunctionDescriptor::env_offset()), function_descriptor);
   992   } else if (load_toc_of_callee) {
   993     li(R11, 0);
   994   }
   996   // do a call or a branch
   997   if (and_link) {
   998     bctrl();
   999   } else {
  1000     bctr();
  1002   _last_calls_return_pc = pc();
  1004   return _last_calls_return_pc;
  1007 // Call a C function via a function descriptor and use full C calling
  1008 // conventions.
  1009 // We don't use the TOC in generated code, so there is no need to save
  1010 // and restore its value.
  1011 address MacroAssembler::call_c(Register fd) {
  1012   return branch_to(fd, /*and_link=*/true,
  1013                        /*save toc=*/false,
  1014                        /*restore toc=*/false,
  1015                        /*load toc=*/true,
  1016                        /*load env=*/true);
  1019 address MacroAssembler::call_c_and_return_to_caller(Register fd) {
  1020   return branch_to(fd, /*and_link=*/false,
  1021                        /*save toc=*/false,
  1022                        /*restore toc=*/false,
  1023                        /*load toc=*/true,
  1024                        /*load env=*/true);
  1027 address MacroAssembler::call_c(const FunctionDescriptor* fd, relocInfo::relocType rt) {
  1028   if (rt != relocInfo::none) {
  1029     // this call needs to be relocatable
  1030     if (!ReoptimizeCallSequences
  1031         || (rt != relocInfo::runtime_call_type && rt != relocInfo::none)
  1032         || fd == NULL   // support code-size estimation
  1033         || !fd->is_friend_function()
  1034         || fd->entry() == NULL) {
  1035       // it's not a friend function as defined by class FunctionDescriptor,
  1036       // so do a full call-c here.
  1037       load_const(R11, (address)fd, R0);
  1039       bool has_env = (fd != NULL && fd->env() != NULL);
  1040       return branch_to(R11, /*and_link=*/true,
  1041                             /*save toc=*/false,
  1042                             /*restore toc=*/false,
  1043                             /*load toc=*/true,
  1044                             /*load env=*/has_env);
  1045     } else {
  1046       // It's a friend function. Load the entry point and don't care about
  1047       // toc and env. Use an optimizable call instruction, but ensure the
  1048       // same code-size as in the case of a non-friend function.
  1049       nop();
  1050       nop();
  1051       nop();
  1052       bl64_patchable(fd->entry(), rt);
  1053       _last_calls_return_pc = pc();
  1054       return _last_calls_return_pc;
  1056   } else {
  1057     // This call does not need to be relocatable, do more aggressive
  1058     // optimizations.
  1059     if (!ReoptimizeCallSequences
  1060       || !fd->is_friend_function()) {
  1061       // It's not a friend function as defined by class FunctionDescriptor,
  1062       // so do a full call-c here.
  1063       load_const(R11, (address)fd, R0);
  1064       return branch_to(R11, /*and_link=*/true,
  1065                             /*save toc=*/false,
  1066                             /*restore toc=*/false,
  1067                             /*load toc=*/true,
  1068                             /*load env=*/true);
  1069     } else {
  1070       // it's a friend function, load the entry point and don't care about
  1071       // toc and env.
  1072       address dest = fd->entry();
  1073       if (is_within_range_of_b(dest, pc())) {
  1074         bl(dest);
  1075       } else {
  1076         bl64_patchable(dest, rt);
  1078       _last_calls_return_pc = pc();
  1079       return _last_calls_return_pc;
  1084 // Call a C function.  All constants needed reside in TOC.
  1085 //
  1086 // Read the address to call from the TOC.
  1087 // Read env from TOC, if fd specifies an env.
  1088 // Read new TOC from TOC.
  1089 address MacroAssembler::call_c_using_toc(const FunctionDescriptor* fd,
  1090                                          relocInfo::relocType rt, Register toc) {
  1091   if (!ReoptimizeCallSequences
  1092     || (rt != relocInfo::runtime_call_type && rt != relocInfo::none)
  1093     || !fd->is_friend_function()) {
  1094     // It's not a friend function as defined by class FunctionDescriptor,
  1095     // so do a full call-c here.
  1096     assert(fd->entry() != NULL, "function must be linked");
  1098     AddressLiteral fd_entry(fd->entry());
  1099     load_const_from_method_toc(R11, fd_entry, toc);
  1100     mtctr(R11);
  1101     if (fd->env() == NULL) {
  1102       li(R11, 0);
  1103       nop();
  1104     } else {
  1105       AddressLiteral fd_env(fd->env());
  1106       load_const_from_method_toc(R11, fd_env, toc);
  1108     AddressLiteral fd_toc(fd->toc());
  1109     load_toc_from_toc(R2_TOC, fd_toc, toc);
  1110     // R2_TOC is killed.
  1111     bctrl();
  1112     _last_calls_return_pc = pc();
  1113   } else {
  1114     // It's a friend function, load the entry point and don't care about
  1115     // toc and env. Use an optimizable call instruction, but ensure the
  1116     // same code-size as in the case of a non-friend function.
  1117     nop();
  1118     bl64_patchable(fd->entry(), rt);
  1119     _last_calls_return_pc = pc();
  1121   return _last_calls_return_pc;
  1123 #endif // ABI_ELFv2
  1125 void MacroAssembler::call_VM_base(Register oop_result,
  1126                                   Register last_java_sp,
  1127                                   address  entry_point,
  1128                                   bool     check_exceptions) {
  1129   BLOCK_COMMENT("call_VM {");
  1130   // Determine last_java_sp register.
  1131   if (!last_java_sp->is_valid()) {
  1132     last_java_sp = R1_SP;
  1134   set_top_ijava_frame_at_SP_as_last_Java_frame(last_java_sp, R11_scratch1);
  1136   // ARG1 must hold thread address.
  1137   mr(R3_ARG1, R16_thread);
  1138 #if defined(ABI_ELFv2)
  1139   address return_pc = call_c(entry_point, relocInfo::none);
  1140 #else
  1141   address return_pc = call_c((FunctionDescriptor*)entry_point, relocInfo::none);
  1142 #endif
  1144   reset_last_Java_frame();
  1146   // Check for pending exceptions.
  1147   if (check_exceptions) {
  1148     // We don't check for exceptions here.
  1149     ShouldNotReachHere();
  1152   // Get oop result if there is one and reset the value in the thread.
  1153   if (oop_result->is_valid()) {
  1154     get_vm_result(oop_result);
  1157   _last_calls_return_pc = return_pc;
  1158   BLOCK_COMMENT("} call_VM");
  1161 void MacroAssembler::call_VM_leaf_base(address entry_point) {
  1162   BLOCK_COMMENT("call_VM_leaf {");
  1163 #if defined(ABI_ELFv2)
  1164   call_c(entry_point, relocInfo::none);
  1165 #else
  1166   call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, entry_point), relocInfo::none);
  1167 #endif
  1168   BLOCK_COMMENT("} call_VM_leaf");
  1171 void MacroAssembler::call_VM(Register oop_result, address entry_point, bool check_exceptions) {
  1172   call_VM_base(oop_result, noreg, entry_point, check_exceptions);
  1175 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1,
  1176                              bool check_exceptions) {
  1177   // R3_ARG1 is reserved for the thread.
  1178   mr_if_needed(R4_ARG2, arg_1);
  1179   call_VM(oop_result, entry_point, check_exceptions);
  1182 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2,
  1183                              bool check_exceptions) {
  1184   // R3_ARG1 is reserved for the thread
  1185   mr_if_needed(R4_ARG2, arg_1);
  1186   assert(arg_2 != R4_ARG2, "smashed argument");
  1187   mr_if_needed(R5_ARG3, arg_2);
  1188   call_VM(oop_result, entry_point, check_exceptions);
  1191 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg_3,
  1192                              bool check_exceptions) {
  1193   // R3_ARG1 is reserved for the thread
  1194   mr_if_needed(R4_ARG2, arg_1);
  1195   assert(arg_2 != R4_ARG2, "smashed argument");
  1196   mr_if_needed(R5_ARG3, arg_2);
  1197   mr_if_needed(R6_ARG4, arg_3);
  1198   call_VM(oop_result, entry_point, check_exceptions);
  1201 void MacroAssembler::call_VM_leaf(address entry_point) {
  1202   call_VM_leaf_base(entry_point);
  1205 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1) {
  1206   mr_if_needed(R3_ARG1, arg_1);
  1207   call_VM_leaf(entry_point);
  1210 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1, Register arg_2) {
  1211   mr_if_needed(R3_ARG1, arg_1);
  1212   assert(arg_2 != R3_ARG1, "smashed argument");
  1213   mr_if_needed(R4_ARG2, arg_2);
  1214   call_VM_leaf(entry_point);
  1217 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3) {
  1218   mr_if_needed(R3_ARG1, arg_1);
  1219   assert(arg_2 != R3_ARG1, "smashed argument");
  1220   mr_if_needed(R4_ARG2, arg_2);
  1221   assert(arg_3 != R3_ARG1 && arg_3 != R4_ARG2, "smashed argument");
  1222   mr_if_needed(R5_ARG3, arg_3);
  1223   call_VM_leaf(entry_point);
  1226 // Check whether instruction is a read access to the polling page
  1227 // which was emitted by load_from_polling_page(..).
  1228 bool MacroAssembler::is_load_from_polling_page(int instruction, void* ucontext,
  1229                                                address* polling_address_ptr) {
  1230   if (!is_ld(instruction))
  1231     return false; // It's not a ld. Fail.
  1233   int rt = inv_rt_field(instruction);
  1234   int ra = inv_ra_field(instruction);
  1235   int ds = inv_ds_field(instruction);
  1236   if (!(ds == 0 && ra != 0 && rt == 0)) {
  1237     return false; // It's not a ld(r0, X, ra). Fail.
  1240   if (!ucontext) {
  1241     // Set polling address.
  1242     if (polling_address_ptr != NULL) {
  1243       *polling_address_ptr = NULL;
  1245     return true; // No ucontext given. Can't check value of ra. Assume true.
  1248 #ifdef LINUX
  1249   // Ucontext given. Check that register ra contains the address of
  1250   // the safepoing polling page.
  1251   ucontext_t* uc = (ucontext_t*) ucontext;
  1252   // Set polling address.
  1253   address addr = (address)uc->uc_mcontext.regs->gpr[ra] + (ssize_t)ds;
  1254   if (polling_address_ptr != NULL) {
  1255     *polling_address_ptr = addr;
  1257   return os::is_poll_address(addr);
  1258 #else
  1259   // Not on Linux, ucontext must be NULL.
  1260   ShouldNotReachHere();
  1261   return false;
  1262 #endif
  1265 bool MacroAssembler::is_memory_serialization(int instruction, JavaThread* thread, void* ucontext) {
  1266 #ifdef LINUX
  1267   ucontext_t* uc = (ucontext_t*) ucontext;
  1269   if (is_stwx(instruction) || is_stwux(instruction)) {
  1270     int ra = inv_ra_field(instruction);
  1271     int rb = inv_rb_field(instruction);
  1273     // look up content of ra and rb in ucontext
  1274     address ra_val=(address)uc->uc_mcontext.regs->gpr[ra];
  1275     long rb_val=(long)uc->uc_mcontext.regs->gpr[rb];
  1276     return os::is_memory_serialize_page(thread, ra_val+rb_val);
  1277   } else if (is_stw(instruction) || is_stwu(instruction)) {
  1278     int ra = inv_ra_field(instruction);
  1279     int d1 = inv_d1_field(instruction);
  1281     // look up content of ra in ucontext
  1282     address ra_val=(address)uc->uc_mcontext.regs->gpr[ra];
  1283     return os::is_memory_serialize_page(thread, ra_val+d1);
  1284   } else {
  1285     return false;
  1287 #else
  1288   // workaround not needed on !LINUX :-)
  1289   ShouldNotCallThis();
  1290   return false;
  1291 #endif
  1294 void MacroAssembler::bang_stack_with_offset(int offset) {
  1295   // When increasing the stack, the old stack pointer will be written
  1296   // to the new top of stack according to the PPC64 abi.
  1297   // Therefore, stack banging is not necessary when increasing
  1298   // the stack by <= os::vm_page_size() bytes.
  1299   // When increasing the stack by a larger amount, this method is
  1300   // called repeatedly to bang the intermediate pages.
  1302   // Stack grows down, caller passes positive offset.
  1303   assert(offset > 0, "must bang with positive offset");
  1305   long stdoffset = -offset;
  1307   if (is_simm(stdoffset, 16)) {
  1308     // Signed 16 bit offset, a simple std is ok.
  1309     if (UseLoadInstructionsForStackBangingPPC64) {
  1310       ld(R0, (int)(signed short)stdoffset, R1_SP);
  1311     } else {
  1312       std(R0,(int)(signed short)stdoffset, R1_SP);
  1314   } else if (is_simm(stdoffset, 31)) {
  1315     const int hi = MacroAssembler::largeoffset_si16_si16_hi(stdoffset);
  1316     const int lo = MacroAssembler::largeoffset_si16_si16_lo(stdoffset);
  1318     Register tmp = R11;
  1319     addis(tmp, R1_SP, hi);
  1320     if (UseLoadInstructionsForStackBangingPPC64) {
  1321       ld(R0,  lo, tmp);
  1322     } else {
  1323       std(R0, lo, tmp);
  1325   } else {
  1326     ShouldNotReachHere();
  1330 // If instruction is a stack bang of the form
  1331 //    std    R0,    x(Ry),       (see bang_stack_with_offset())
  1332 //    stdu   R1_SP, x(R1_SP),    (see push_frame(), resize_frame())
  1333 // or stdux  R1_SP, Rx, R1_SP    (see push_frame(), resize_frame())
  1334 // return the banged address. Otherwise, return 0.
  1335 address MacroAssembler::get_stack_bang_address(int instruction, void *ucontext) {
  1336 #ifdef LINUX
  1337   ucontext_t* uc = (ucontext_t*) ucontext;
  1338   int rs = inv_rs_field(instruction);
  1339   int ra = inv_ra_field(instruction);
  1340   if (   (is_ld(instruction)   && rs == 0 &&  UseLoadInstructionsForStackBangingPPC64)
  1341       || (is_std(instruction)  && rs == 0 && !UseLoadInstructionsForStackBangingPPC64)
  1342       || (is_stdu(instruction) && rs == 1)) {
  1343     int ds = inv_ds_field(instruction);
  1344     // return banged address
  1345     return ds+(address)uc->uc_mcontext.regs->gpr[ra];
  1346   } else if (is_stdux(instruction) && rs == 1) {
  1347     int rb = inv_rb_field(instruction);
  1348     address sp = (address)uc->uc_mcontext.regs->gpr[1];
  1349     long rb_val = (long)uc->uc_mcontext.regs->gpr[rb];
  1350     return ra != 1 || rb_val >= 0 ? NULL         // not a stack bang
  1351                                   : sp + rb_val; // banged address
  1353   return NULL; // not a stack bang
  1354 #else
  1355   // workaround not needed on !LINUX :-)
  1356   ShouldNotCallThis();
  1357   return NULL;
  1358 #endif
  1361 // CmpxchgX sets condition register to cmpX(current, compare).
  1362 void MacroAssembler::cmpxchgw(ConditionRegister flag, Register dest_current_value,
  1363                               Register compare_value, Register exchange_value,
  1364                               Register addr_base, int semantics, bool cmpxchgx_hint,
  1365                               Register int_flag_success, bool contention_hint) {
  1366   Label retry;
  1367   Label failed;
  1368   Label done;
  1370   // Save one branch if result is returned via register and
  1371   // result register is different from the other ones.
  1372   bool use_result_reg    = (int_flag_success != noreg);
  1373   bool preset_result_reg = (int_flag_success != dest_current_value && int_flag_success != compare_value &&
  1374                             int_flag_success != exchange_value && int_flag_success != addr_base);
  1376   // release/fence semantics
  1377   if (semantics & MemBarRel) {
  1378     release();
  1381   if (use_result_reg && preset_result_reg) {
  1382     li(int_flag_success, 0); // preset (assume cas failed)
  1385   // Add simple guard in order to reduce risk of starving under high contention (recommended by IBM).
  1386   if (contention_hint) { // Don't try to reserve if cmp fails.
  1387     lwz(dest_current_value, 0, addr_base);
  1388     cmpw(flag, dest_current_value, compare_value);
  1389     bne(flag, failed);
  1392   // atomic emulation loop
  1393   bind(retry);
  1395   lwarx(dest_current_value, addr_base, cmpxchgx_hint);
  1396   cmpw(flag, dest_current_value, compare_value);
  1397   if (UseStaticBranchPredictionInCompareAndSwapPPC64) {
  1398     bne_predict_not_taken(flag, failed);
  1399   } else {
  1400     bne(                  flag, failed);
  1402   // branch to done  => (flag == ne), (dest_current_value != compare_value)
  1403   // fall through    => (flag == eq), (dest_current_value == compare_value)
  1405   stwcx_(exchange_value, addr_base);
  1406   if (UseStaticBranchPredictionInCompareAndSwapPPC64) {
  1407     bne_predict_not_taken(CCR0, retry); // StXcx_ sets CCR0.
  1408   } else {
  1409     bne(                  CCR0, retry); // StXcx_ sets CCR0.
  1411   // fall through    => (flag == eq), (dest_current_value == compare_value), (swapped)
  1413   // Result in register (must do this at the end because int_flag_success can be the
  1414   // same register as one above).
  1415   if (use_result_reg) {
  1416     li(int_flag_success, 1);
  1419   if (semantics & MemBarFenceAfter) {
  1420     fence();
  1421   } else if (semantics & MemBarAcq) {
  1422     isync();
  1425   if (use_result_reg && !preset_result_reg) {
  1426     b(done);
  1429   bind(failed);
  1430   if (use_result_reg && !preset_result_reg) {
  1431     li(int_flag_success, 0);
  1434   bind(done);
  1435   // (flag == ne) => (dest_current_value != compare_value), (!swapped)
  1436   // (flag == eq) => (dest_current_value == compare_value), ( swapped)
  1439 // Preforms atomic compare exchange:
  1440 //   if (compare_value == *addr_base)
  1441 //     *addr_base = exchange_value
  1442 //     int_flag_success = 1;
  1443 //   else
  1444 //     int_flag_success = 0;
  1445 //
  1446 // ConditionRegister flag       = cmp(compare_value, *addr_base)
  1447 // Register dest_current_value  = *addr_base
  1448 // Register compare_value       Used to compare with value in memory
  1449 // Register exchange_value      Written to memory if compare_value == *addr_base
  1450 // Register addr_base           The memory location to compareXChange
  1451 // Register int_flag_success    Set to 1 if exchange_value was written to *addr_base
  1452 //
  1453 // To avoid the costly compare exchange the value is tested beforehand.
  1454 // Several special cases exist to avoid that unnecessary information is generated.
  1455 //
  1456 void MacroAssembler::cmpxchgd(ConditionRegister flag,
  1457                               Register dest_current_value, Register compare_value, Register exchange_value,
  1458                               Register addr_base, int semantics, bool cmpxchgx_hint,
  1459                               Register int_flag_success, Label* failed_ext, bool contention_hint) {
  1460   Label retry;
  1461   Label failed_int;
  1462   Label& failed = (failed_ext != NULL) ? *failed_ext : failed_int;
  1463   Label done;
  1465   // Save one branch if result is returned via register and result register is different from the other ones.
  1466   bool use_result_reg    = (int_flag_success!=noreg);
  1467   bool preset_result_reg = (int_flag_success!=dest_current_value && int_flag_success!=compare_value &&
  1468                             int_flag_success!=exchange_value && int_flag_success!=addr_base);
  1469   assert(int_flag_success == noreg || failed_ext == NULL, "cannot have both");
  1471   // release/fence semantics
  1472   if (semantics & MemBarRel) {
  1473     release();
  1476   if (use_result_reg && preset_result_reg) {
  1477     li(int_flag_success, 0); // preset (assume cas failed)
  1480   // Add simple guard in order to reduce risk of starving under high contention (recommended by IBM).
  1481   if (contention_hint) { // Don't try to reserve if cmp fails.
  1482     ld(dest_current_value, 0, addr_base);
  1483     cmpd(flag, dest_current_value, compare_value);
  1484     bne(flag, failed);
  1487   // atomic emulation loop
  1488   bind(retry);
  1490   ldarx(dest_current_value, addr_base, cmpxchgx_hint);
  1491   cmpd(flag, dest_current_value, compare_value);
  1492   if (UseStaticBranchPredictionInCompareAndSwapPPC64) {
  1493     bne_predict_not_taken(flag, failed);
  1494   } else {
  1495     bne(                  flag, failed);
  1498   stdcx_(exchange_value, addr_base);
  1499   if (UseStaticBranchPredictionInCompareAndSwapPPC64) {
  1500     bne_predict_not_taken(CCR0, retry); // stXcx_ sets CCR0
  1501   } else {
  1502     bne(                  CCR0, retry); // stXcx_ sets CCR0
  1505   // result in register (must do this at the end because int_flag_success can be the same register as one above)
  1506   if (use_result_reg) {
  1507     li(int_flag_success, 1);
  1510   // POWER6 doesn't need isync in CAS.
  1511   // Always emit isync to be on the safe side.
  1512   if (semantics & MemBarFenceAfter) {
  1513     fence();
  1514   } else if (semantics & MemBarAcq) {
  1515     isync();
  1518   if (use_result_reg && !preset_result_reg) {
  1519     b(done);
  1522   bind(failed_int);
  1523   if (use_result_reg && !preset_result_reg) {
  1524     li(int_flag_success, 0);
  1527   bind(done);
  1528   // (flag == ne) => (dest_current_value != compare_value), (!swapped)
  1529   // (flag == eq) => (dest_current_value == compare_value), ( swapped)
  1532 // Look up the method for a megamorphic invokeinterface call.
  1533 // The target method is determined by <intf_klass, itable_index>.
  1534 // The receiver klass is in recv_klass.
  1535 // On success, the result will be in method_result, and execution falls through.
  1536 // On failure, execution transfers to the given label.
  1537 void MacroAssembler::lookup_interface_method(Register recv_klass,
  1538                                              Register intf_klass,
  1539                                              RegisterOrConstant itable_index,
  1540                                              Register method_result,
  1541                                              Register scan_temp,
  1542                                              Register sethi_temp,
  1543                                              Label& L_no_such_interface) {
  1544   assert_different_registers(recv_klass, intf_klass, method_result, scan_temp);
  1545   assert(itable_index.is_constant() || itable_index.as_register() == method_result,
  1546          "caller must use same register for non-constant itable index as for method");
  1548   // Compute start of first itableOffsetEntry (which is at the end of the vtable).
  1549   int vtable_base = InstanceKlass::vtable_start_offset() * wordSize;
  1550   int itentry_off = itableMethodEntry::method_offset_in_bytes();
  1551   int logMEsize   = exact_log2(itableMethodEntry::size() * wordSize);
  1552   int scan_step   = itableOffsetEntry::size() * wordSize;
  1553   int log_vte_size= exact_log2(vtableEntry::size() * wordSize);
  1555   lwz(scan_temp, InstanceKlass::vtable_length_offset() * wordSize, recv_klass);
  1556   // %%% We should store the aligned, prescaled offset in the klassoop.
  1557   // Then the next several instructions would fold away.
  1559   sldi(scan_temp, scan_temp, log_vte_size);
  1560   addi(scan_temp, scan_temp, vtable_base);
  1561   add(scan_temp, recv_klass, scan_temp);
  1563   // Adjust recv_klass by scaled itable_index, so we can free itable_index.
  1564   if (itable_index.is_register()) {
  1565     Register itable_offset = itable_index.as_register();
  1566     sldi(itable_offset, itable_offset, logMEsize);
  1567     if (itentry_off) addi(itable_offset, itable_offset, itentry_off);
  1568     add(recv_klass, itable_offset, recv_klass);
  1569   } else {
  1570     long itable_offset = (long)itable_index.as_constant();
  1571     load_const_optimized(sethi_temp, (itable_offset<<logMEsize)+itentry_off); // static address, no relocation
  1572     add(recv_klass, sethi_temp, recv_klass);
  1575   // for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) {
  1576   //   if (scan->interface() == intf) {
  1577   //     result = (klass + scan->offset() + itable_index);
  1578   //   }
  1579   // }
  1580   Label search, found_method;
  1582   for (int peel = 1; peel >= 0; peel--) {
  1583     // %%%% Could load both offset and interface in one ldx, if they were
  1584     // in the opposite order. This would save a load.
  1585     ld(method_result, itableOffsetEntry::interface_offset_in_bytes(), scan_temp);
  1587     // Check that this entry is non-null. A null entry means that
  1588     // the receiver class doesn't implement the interface, and wasn't the
  1589     // same as when the caller was compiled.
  1590     cmpd(CCR0, method_result, intf_klass);
  1592     if (peel) {
  1593       beq(CCR0, found_method);
  1594     } else {
  1595       bne(CCR0, search);
  1596       // (invert the test to fall through to found_method...)
  1599     if (!peel) break;
  1601     bind(search);
  1603     cmpdi(CCR0, method_result, 0);
  1604     beq(CCR0, L_no_such_interface);
  1605     addi(scan_temp, scan_temp, scan_step);
  1608   bind(found_method);
  1610   // Got a hit.
  1611   int ito_offset = itableOffsetEntry::offset_offset_in_bytes();
  1612   lwz(scan_temp, ito_offset, scan_temp);
  1613   ldx(method_result, scan_temp, recv_klass);
  1616 // virtual method calling
  1617 void MacroAssembler::lookup_virtual_method(Register recv_klass,
  1618                                            RegisterOrConstant vtable_index,
  1619                                            Register method_result) {
  1621   assert_different_registers(recv_klass, method_result, vtable_index.register_or_noreg());
  1623   const int base = InstanceKlass::vtable_start_offset() * wordSize;
  1624   assert(vtableEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
  1626   if (vtable_index.is_register()) {
  1627     sldi(vtable_index.as_register(), vtable_index.as_register(), LogBytesPerWord);
  1628     add(recv_klass, vtable_index.as_register(), recv_klass);
  1629   } else {
  1630     addi(recv_klass, recv_klass, vtable_index.as_constant() << LogBytesPerWord);
  1632   ld(R19_method, base + vtableEntry::method_offset_in_bytes(), recv_klass);
  1635 /////////////////////////////////////////// subtype checking ////////////////////////////////////////////
  1637 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass,
  1638                                                    Register super_klass,
  1639                                                    Register temp1_reg,
  1640                                                    Register temp2_reg,
  1641                                                    Label& L_success,
  1642                                                    Label& L_failure) {
  1644   const Register check_cache_offset = temp1_reg;
  1645   const Register cached_super       = temp2_reg;
  1647   assert_different_registers(sub_klass, super_klass, check_cache_offset, cached_super);
  1649   int sco_offset = in_bytes(Klass::super_check_offset_offset());
  1650   int sc_offset  = in_bytes(Klass::secondary_super_cache_offset());
  1652   // If the pointers are equal, we are done (e.g., String[] elements).
  1653   // This self-check enables sharing of secondary supertype arrays among
  1654   // non-primary types such as array-of-interface. Otherwise, each such
  1655   // type would need its own customized SSA.
  1656   // We move this check to the front of the fast path because many
  1657   // type checks are in fact trivially successful in this manner,
  1658   // so we get a nicely predicted branch right at the start of the check.
  1659   cmpd(CCR0, sub_klass, super_klass);
  1660   beq(CCR0, L_success);
  1662   // Check the supertype display:
  1663   lwz(check_cache_offset, sco_offset, super_klass);
  1664   // The loaded value is the offset from KlassOopDesc.
  1666   ldx(cached_super, check_cache_offset, sub_klass);
  1667   cmpd(CCR0, cached_super, super_klass);
  1668   beq(CCR0, L_success);
  1670   // This check has worked decisively for primary supers.
  1671   // Secondary supers are sought in the super_cache ('super_cache_addr').
  1672   // (Secondary supers are interfaces and very deeply nested subtypes.)
  1673   // This works in the same check above because of a tricky aliasing
  1674   // between the super_cache and the primary super display elements.
  1675   // (The 'super_check_addr' can address either, as the case requires.)
  1676   // Note that the cache is updated below if it does not help us find
  1677   // what we need immediately.
  1678   // So if it was a primary super, we can just fail immediately.
  1679   // Otherwise, it's the slow path for us (no success at this point).
  1681   cmpwi(CCR0, check_cache_offset, sc_offset);
  1682   bne(CCR0, L_failure);
  1683   // bind(slow_path); // fallthru
  1686 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
  1687                                                    Register super_klass,
  1688                                                    Register temp1_reg,
  1689                                                    Register temp2_reg,
  1690                                                    Label* L_success,
  1691                                                    Register result_reg) {
  1692   const Register array_ptr = temp1_reg; // current value from cache array
  1693   const Register temp      = temp2_reg;
  1695   assert_different_registers(sub_klass, super_klass, array_ptr, temp);
  1697   int source_offset = in_bytes(Klass::secondary_supers_offset());
  1698   int target_offset = in_bytes(Klass::secondary_super_cache_offset());
  1700   int length_offset = Array<Klass*>::length_offset_in_bytes();
  1701   int base_offset   = Array<Klass*>::base_offset_in_bytes();
  1703   Label hit, loop, failure, fallthru;
  1705   ld(array_ptr, source_offset, sub_klass);
  1707   //assert(4 == arrayOopDesc::length_length_in_bytes(), "precondition violated.");
  1708   lwz(temp, length_offset, array_ptr);
  1709   cmpwi(CCR0, temp, 0);
  1710   beq(CCR0, result_reg!=noreg ? failure : fallthru); // length 0
  1712   mtctr(temp); // load ctr
  1714   bind(loop);
  1715   // Oops in table are NO MORE compressed.
  1716   ld(temp, base_offset, array_ptr);
  1717   cmpd(CCR0, temp, super_klass);
  1718   beq(CCR0, hit);
  1719   addi(array_ptr, array_ptr, BytesPerWord);
  1720   bdnz(loop);
  1722   bind(failure);
  1723   if (result_reg!=noreg) li(result_reg, 1); // load non-zero result (indicates a miss)
  1724   b(fallthru);
  1726   bind(hit);
  1727   std(super_klass, target_offset, sub_klass); // save result to cache
  1728   if (result_reg != noreg) li(result_reg, 0); // load zero result (indicates a hit)
  1729   if (L_success != NULL) b(*L_success);
  1731   bind(fallthru);
  1734 // Try fast path, then go to slow one if not successful
  1735 void MacroAssembler::check_klass_subtype(Register sub_klass,
  1736                          Register super_klass,
  1737                          Register temp1_reg,
  1738                          Register temp2_reg,
  1739                          Label& L_success) {
  1740   Label L_failure;
  1741   check_klass_subtype_fast_path(sub_klass, super_klass, temp1_reg, temp2_reg, L_success, L_failure);
  1742   check_klass_subtype_slow_path(sub_klass, super_klass, temp1_reg, temp2_reg, &L_success);
  1743   bind(L_failure); // Fallthru if not successful.
  1746 void MacroAssembler::check_method_handle_type(Register mtype_reg, Register mh_reg,
  1747                                               Register temp_reg,
  1748                                               Label& wrong_method_type) {
  1749   assert_different_registers(mtype_reg, mh_reg, temp_reg);
  1750   // Compare method type against that of the receiver.
  1751   load_heap_oop_not_null(temp_reg, delayed_value(java_lang_invoke_MethodHandle::type_offset_in_bytes, temp_reg), mh_reg);
  1752   cmpd(CCR0, temp_reg, mtype_reg);
  1753   bne(CCR0, wrong_method_type);
  1756 RegisterOrConstant MacroAssembler::argument_offset(RegisterOrConstant arg_slot,
  1757                                                    Register temp_reg,
  1758                                                    int extra_slot_offset) {
  1759   // cf. TemplateTable::prepare_invoke(), if (load_receiver).
  1760   int stackElementSize = Interpreter::stackElementSize;
  1761   int offset = extra_slot_offset * stackElementSize;
  1762   if (arg_slot.is_constant()) {
  1763     offset += arg_slot.as_constant() * stackElementSize;
  1764     return offset;
  1765   } else {
  1766     assert(temp_reg != noreg, "must specify");
  1767     sldi(temp_reg, arg_slot.as_register(), exact_log2(stackElementSize));
  1768     if (offset != 0)
  1769       addi(temp_reg, temp_reg, offset);
  1770     return temp_reg;
  1774 void MacroAssembler::biased_locking_enter(ConditionRegister cr_reg, Register obj_reg,
  1775                                           Register mark_reg, Register temp_reg,
  1776                                           Register temp2_reg, Label& done, Label* slow_case) {
  1777   assert(UseBiasedLocking, "why call this otherwise?");
  1779 #ifdef ASSERT
  1780   assert_different_registers(obj_reg, mark_reg, temp_reg, temp2_reg);
  1781 #endif
  1783   Label cas_label;
  1785   // Branch to done if fast path fails and no slow_case provided.
  1786   Label *slow_case_int = (slow_case != NULL) ? slow_case : &done;
  1788   // Biased locking
  1789   // See whether the lock is currently biased toward our thread and
  1790   // whether the epoch is still valid
  1791   // Note that the runtime guarantees sufficient alignment of JavaThread
  1792   // pointers to allow age to be placed into low bits
  1793   assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits,
  1794          "biased locking makes assumptions about bit layout");
  1796   if (PrintBiasedLockingStatistics) {
  1797     load_const(temp_reg, (address) BiasedLocking::total_entry_count_addr(), temp2_reg);
  1798     lwz(temp2_reg, 0, temp_reg);
  1799     addi(temp2_reg, temp2_reg, 1);
  1800     stw(temp2_reg, 0, temp_reg);
  1803   andi(temp_reg, mark_reg, markOopDesc::biased_lock_mask_in_place);
  1804   cmpwi(cr_reg, temp_reg, markOopDesc::biased_lock_pattern);
  1805   bne(cr_reg, cas_label);
  1807   load_klass(temp_reg, obj_reg);
  1809   load_const_optimized(temp2_reg, ~((int) markOopDesc::age_mask_in_place));
  1810   ld(temp_reg, in_bytes(Klass::prototype_header_offset()), temp_reg);
  1811   orr(temp_reg, R16_thread, temp_reg);
  1812   xorr(temp_reg, mark_reg, temp_reg);
  1813   andr(temp_reg, temp_reg, temp2_reg);
  1814   cmpdi(cr_reg, temp_reg, 0);
  1815   if (PrintBiasedLockingStatistics) {
  1816     Label l;
  1817     bne(cr_reg, l);
  1818     load_const(mark_reg, (address) BiasedLocking::biased_lock_entry_count_addr());
  1819     lwz(temp2_reg, 0, mark_reg);
  1820     addi(temp2_reg, temp2_reg, 1);
  1821     stw(temp2_reg, 0, mark_reg);
  1822     // restore mark_reg
  1823     ld(mark_reg, oopDesc::mark_offset_in_bytes(), obj_reg);
  1824     bind(l);
  1826   beq(cr_reg, done);
  1828   Label try_revoke_bias;
  1829   Label try_rebias;
  1831   // At this point we know that the header has the bias pattern and
  1832   // that we are not the bias owner in the current epoch. We need to
  1833   // figure out more details about the state of the header in order to
  1834   // know what operations can be legally performed on the object's
  1835   // header.
  1837   // If the low three bits in the xor result aren't clear, that means
  1838   // the prototype header is no longer biased and we have to revoke
  1839   // the bias on this object.
  1840   andi(temp2_reg, temp_reg, markOopDesc::biased_lock_mask_in_place);
  1841   cmpwi(cr_reg, temp2_reg, 0);
  1842   bne(cr_reg, try_revoke_bias);
  1844   // Biasing is still enabled for this data type. See whether the
  1845   // epoch of the current bias is still valid, meaning that the epoch
  1846   // bits of the mark word are equal to the epoch bits of the
  1847   // prototype header. (Note that the prototype header's epoch bits
  1848   // only change at a safepoint.) If not, attempt to rebias the object
  1849   // toward the current thread. Note that we must be absolutely sure
  1850   // that the current epoch is invalid in order to do this because
  1851   // otherwise the manipulations it performs on the mark word are
  1852   // illegal.
  1854   int shift_amount = 64 - markOopDesc::epoch_shift;
  1855   // rotate epoch bits to right (little) end and set other bits to 0
  1856   // [ big part | epoch | little part ] -> [ 0..0 | epoch ]
  1857   rldicl_(temp2_reg, temp_reg, shift_amount, 64 - markOopDesc::epoch_bits);
  1858   // branch if epoch bits are != 0, i.e. they differ, because the epoch has been incremented
  1859   bne(CCR0, try_rebias);
  1861   // The epoch of the current bias is still valid but we know nothing
  1862   // about the owner; it might be set or it might be clear. Try to
  1863   // acquire the bias of the object using an atomic operation. If this
  1864   // fails we will go in to the runtime to revoke the object's bias.
  1865   // Note that we first construct the presumed unbiased header so we
  1866   // don't accidentally blow away another thread's valid bias.
  1867   andi(mark_reg, mark_reg, (markOopDesc::biased_lock_mask_in_place |
  1868                                 markOopDesc::age_mask_in_place |
  1869                                 markOopDesc::epoch_mask_in_place));
  1870   orr(temp_reg, R16_thread, mark_reg);
  1872   assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
  1874   // CmpxchgX sets cr_reg to cmpX(temp2_reg, mark_reg).
  1875   fence(); // TODO: replace by MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq ?
  1876   cmpxchgd(/*flag=*/cr_reg, /*current_value=*/temp2_reg,
  1877            /*compare_value=*/mark_reg, /*exchange_value=*/temp_reg,
  1878            /*where=*/obj_reg,
  1879            MacroAssembler::MemBarAcq,
  1880            MacroAssembler::cmpxchgx_hint_acquire_lock(),
  1881            noreg, slow_case_int); // bail out if failed
  1883   // If the biasing toward our thread failed, this means that
  1884   // another thread succeeded in biasing it toward itself and we
  1885   // need to revoke that bias. The revocation will occur in the
  1886   // interpreter runtime in the slow case.
  1887   if (PrintBiasedLockingStatistics) {
  1888     load_const(temp_reg, (address) BiasedLocking::anonymously_biased_lock_entry_count_addr(), temp2_reg);
  1889     lwz(temp2_reg, 0, temp_reg);
  1890     addi(temp2_reg, temp2_reg, 1);
  1891     stw(temp2_reg, 0, temp_reg);
  1893   b(done);
  1895   bind(try_rebias);
  1896   // At this point we know the epoch has expired, meaning that the
  1897   // current "bias owner", if any, is actually invalid. Under these
  1898   // circumstances _only_, we are allowed to use the current header's
  1899   // value as the comparison value when doing the cas to acquire the
  1900   // bias in the current epoch. In other words, we allow transfer of
  1901   // the bias from one thread to another directly in this situation.
  1902   andi(temp_reg, mark_reg, markOopDesc::age_mask_in_place);
  1903   orr(temp_reg, R16_thread, temp_reg);
  1904   load_klass(temp2_reg, obj_reg);
  1905   ld(temp2_reg, in_bytes(Klass::prototype_header_offset()), temp2_reg);
  1906   orr(temp_reg, temp_reg, temp2_reg);
  1908   assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
  1910   // CmpxchgX sets cr_reg to cmpX(temp2_reg, mark_reg).
  1911   fence(); // TODO: replace by MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq ?
  1912   cmpxchgd(/*flag=*/cr_reg, /*current_value=*/temp2_reg,
  1913                  /*compare_value=*/mark_reg, /*exchange_value=*/temp_reg,
  1914                  /*where=*/obj_reg,
  1915                  MacroAssembler::MemBarAcq,
  1916                  MacroAssembler::cmpxchgx_hint_acquire_lock(),
  1917                  noreg, slow_case_int); // bail out if failed
  1919   // If the biasing toward our thread failed, this means that
  1920   // another thread succeeded in biasing it toward itself and we
  1921   // need to revoke that bias. The revocation will occur in the
  1922   // interpreter runtime in the slow case.
  1923   if (PrintBiasedLockingStatistics) {
  1924     load_const(temp_reg, (address) BiasedLocking::rebiased_lock_entry_count_addr(), temp2_reg);
  1925     lwz(temp2_reg, 0, temp_reg);
  1926     addi(temp2_reg, temp2_reg, 1);
  1927     stw(temp2_reg, 0, temp_reg);
  1929   b(done);
  1931   bind(try_revoke_bias);
  1932   // The prototype mark in the klass doesn't have the bias bit set any
  1933   // more, indicating that objects of this data type are not supposed
  1934   // to be biased any more. We are going to try to reset the mark of
  1935   // this object to the prototype value and fall through to the
  1936   // CAS-based locking scheme. Note that if our CAS fails, it means
  1937   // that another thread raced us for the privilege of revoking the
  1938   // bias of this particular object, so it's okay to continue in the
  1939   // normal locking code.
  1940   load_klass(temp_reg, obj_reg);
  1941   ld(temp_reg, in_bytes(Klass::prototype_header_offset()), temp_reg);
  1942   andi(temp2_reg, mark_reg, markOopDesc::age_mask_in_place);
  1943   orr(temp_reg, temp_reg, temp2_reg);
  1945   assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
  1947   // CmpxchgX sets cr_reg to cmpX(temp2_reg, mark_reg).
  1948   fence(); // TODO: replace by MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq ?
  1949   cmpxchgd(/*flag=*/cr_reg, /*current_value=*/temp2_reg,
  1950                  /*compare_value=*/mark_reg, /*exchange_value=*/temp_reg,
  1951                  /*where=*/obj_reg,
  1952                  MacroAssembler::MemBarAcq,
  1953                  MacroAssembler::cmpxchgx_hint_acquire_lock());
  1955   // reload markOop in mark_reg before continuing with lightweight locking
  1956   ld(mark_reg, oopDesc::mark_offset_in_bytes(), obj_reg);
  1958   // Fall through to the normal CAS-based lock, because no matter what
  1959   // the result of the above CAS, some thread must have succeeded in
  1960   // removing the bias bit from the object's header.
  1961   if (PrintBiasedLockingStatistics) {
  1962     Label l;
  1963     bne(cr_reg, l);
  1964     load_const(temp_reg, (address) BiasedLocking::revoked_lock_entry_count_addr(), temp2_reg);
  1965     lwz(temp2_reg, 0, temp_reg);
  1966     addi(temp2_reg, temp2_reg, 1);
  1967     stw(temp2_reg, 0, temp_reg);
  1968     bind(l);
  1971   bind(cas_label);
  1974 void MacroAssembler::biased_locking_exit (ConditionRegister cr_reg, Register mark_addr, Register temp_reg, Label& done) {
  1975   // Check for biased locking unlock case, which is a no-op
  1976   // Note: we do not have to check the thread ID for two reasons.
  1977   // First, the interpreter checks for IllegalMonitorStateException at
  1978   // a higher level. Second, if the bias was revoked while we held the
  1979   // lock, the object could not be rebiased toward another thread, so
  1980   // the bias bit would be clear.
  1982   ld(temp_reg, 0, mark_addr);
  1983   andi(temp_reg, temp_reg, markOopDesc::biased_lock_mask_in_place);
  1985   cmpwi(cr_reg, temp_reg, markOopDesc::biased_lock_pattern);
  1986   beq(cr_reg, done);
  1989 // "The box" is the space on the stack where we copy the object mark.
  1990 void MacroAssembler::compiler_fast_lock_object(ConditionRegister flag, Register oop, Register box,
  1991                                                Register temp, Register displaced_header, Register current_header) {
  1992   assert_different_registers(oop, box, temp, displaced_header, current_header);
  1993   assert(flag != CCR0, "bad condition register");
  1994   Label cont;
  1995   Label object_has_monitor;
  1996   Label cas_failed;
  1998   // Load markOop from object into displaced_header.
  1999   ld(displaced_header, oopDesc::mark_offset_in_bytes(), oop);
  2002   // Always do locking in runtime.
  2003   if (EmitSync & 0x01) {
  2004     cmpdi(flag, oop, 0); // Oop can't be 0 here => always false.
  2005     return;
  2008   if (UseBiasedLocking) {
  2009     biased_locking_enter(flag, oop, displaced_header, temp, current_header, cont);
  2012   // Handle existing monitor.
  2013   if ((EmitSync & 0x02) == 0) {
  2014     // The object has an existing monitor iff (mark & monitor_value) != 0.
  2015     andi_(temp, displaced_header, markOopDesc::monitor_value);
  2016     bne(CCR0, object_has_monitor);
  2019   // Set displaced_header to be (markOop of object | UNLOCK_VALUE).
  2020   ori(displaced_header, displaced_header, markOopDesc::unlocked_value);
  2022   // Load Compare Value application register.
  2024   // Initialize the box. (Must happen before we update the object mark!)
  2025   std(displaced_header, BasicLock::displaced_header_offset_in_bytes(), box);
  2027   // Must fence, otherwise, preceding store(s) may float below cmpxchg.
  2028   // Compare object markOop with mark and if equal exchange scratch1 with object markOop.
  2029   // CmpxchgX sets cr_reg to cmpX(current, displaced).
  2030   membar(Assembler::StoreStore);
  2031   cmpxchgd(/*flag=*/flag,
  2032            /*current_value=*/current_header,
  2033            /*compare_value=*/displaced_header,
  2034            /*exchange_value=*/box,
  2035            /*where=*/oop,
  2036            MacroAssembler::MemBarAcq,
  2037            MacroAssembler::cmpxchgx_hint_acquire_lock(),
  2038            noreg,
  2039            &cas_failed);
  2040   assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
  2042   // If the compare-and-exchange succeeded, then we found an unlocked
  2043   // object and we have now locked it.
  2044   b(cont);
  2046   bind(cas_failed);
  2047   // We did not see an unlocked object so try the fast recursive case.
  2049   // Check if the owner is self by comparing the value in the markOop of object
  2050   // (current_header) with the stack pointer.
  2051   sub(current_header, current_header, R1_SP);
  2052   load_const_optimized(temp, (address) (~(os::vm_page_size()-1) |
  2053                                         markOopDesc::lock_mask_in_place));
  2055   and_(R0/*==0?*/, current_header, temp);
  2056   // If condition is true we are cont and hence we can store 0 as the
  2057   // displaced header in the box, which indicates that it is a recursive lock.
  2058   mcrf(flag,CCR0);
  2059   std(R0/*==0, perhaps*/, BasicLock::displaced_header_offset_in_bytes(), box);
  2061   // Handle existing monitor.
  2062   if ((EmitSync & 0x02) == 0) {
  2063     b(cont);
  2065     bind(object_has_monitor);
  2066     // The object's monitor m is unlocked iff m->owner == NULL,
  2067     // otherwise m->owner may contain a thread or a stack address.
  2068     //
  2069     // Try to CAS m->owner from NULL to current thread.
  2070     addi(temp, displaced_header, ObjectMonitor::owner_offset_in_bytes()-markOopDesc::monitor_value);
  2071     li(displaced_header, 0);
  2072     // CmpxchgX sets flag to cmpX(current, displaced).
  2073     cmpxchgd(/*flag=*/flag,
  2074              /*current_value=*/current_header,
  2075              /*compare_value=*/displaced_header,
  2076              /*exchange_value=*/R16_thread,
  2077              /*where=*/temp,
  2078              MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq,
  2079              MacroAssembler::cmpxchgx_hint_acquire_lock());
  2081     // Store a non-null value into the box.
  2082     std(box, BasicLock::displaced_header_offset_in_bytes(), box);
  2084 #   ifdef ASSERT
  2085     bne(flag, cont);
  2086     // We have acquired the monitor, check some invariants.
  2087     addi(/*monitor=*/temp, temp, -ObjectMonitor::owner_offset_in_bytes());
  2088     // Invariant 1: _recursions should be 0.
  2089     //assert(ObjectMonitor::recursions_size_in_bytes() == 8, "unexpected size");
  2090     asm_assert_mem8_is_zero(ObjectMonitor::recursions_offset_in_bytes(), temp,
  2091                             "monitor->_recursions should be 0", -1);
  2092     // Invariant 2: OwnerIsThread shouldn't be 0.
  2093     //assert(ObjectMonitor::OwnerIsThread_size_in_bytes() == 4, "unexpected size");
  2094     //asm_assert_mem4_isnot_zero(ObjectMonitor::OwnerIsThread_offset_in_bytes(), temp,
  2095     //                           "monitor->OwnerIsThread shouldn't be 0", -1);
  2096 #   endif
  2099   bind(cont);
  2100   // flag == EQ indicates success
  2101   // flag == NE indicates failure
  2104 void MacroAssembler::compiler_fast_unlock_object(ConditionRegister flag, Register oop, Register box,
  2105                                                  Register temp, Register displaced_header, Register current_header) {
  2106   assert_different_registers(oop, box, temp, displaced_header, current_header);
  2107   assert(flag != CCR0, "bad condition register");
  2108   Label cont;
  2109   Label object_has_monitor;
  2111   // Always do locking in runtime.
  2112   if (EmitSync & 0x01) {
  2113     cmpdi(flag, oop, 0); // Oop can't be 0 here => always false.
  2114     return;
  2117   if (UseBiasedLocking) {
  2118     biased_locking_exit(flag, oop, current_header, cont);
  2121   // Find the lock address and load the displaced header from the stack.
  2122   ld(displaced_header, BasicLock::displaced_header_offset_in_bytes(), box);
  2124   // If the displaced header is 0, we have a recursive unlock.
  2125   cmpdi(flag, displaced_header, 0);
  2126   beq(flag, cont);
  2128   // Handle existing monitor.
  2129   if ((EmitSync & 0x02) == 0) {
  2130     // The object has an existing monitor iff (mark & monitor_value) != 0.
  2131     ld(current_header, oopDesc::mark_offset_in_bytes(), oop);
  2132     andi(temp, current_header, markOopDesc::monitor_value);
  2133     cmpdi(flag, temp, 0);
  2134     bne(flag, object_has_monitor);
  2138   // Check if it is still a light weight lock, this is is true if we see
  2139   // the stack address of the basicLock in the markOop of the object.
  2140   // Cmpxchg sets flag to cmpd(current_header, box).
  2141   cmpxchgd(/*flag=*/flag,
  2142            /*current_value=*/current_header,
  2143            /*compare_value=*/box,
  2144            /*exchange_value=*/displaced_header,
  2145            /*where=*/oop,
  2146            MacroAssembler::MemBarRel,
  2147            MacroAssembler::cmpxchgx_hint_release_lock(),
  2148            noreg,
  2149            &cont);
  2151   assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
  2153   // Handle existing monitor.
  2154   if ((EmitSync & 0x02) == 0) {
  2155     b(cont);
  2157     bind(object_has_monitor);
  2158     addi(current_header, current_header, -markOopDesc::monitor_value); // monitor
  2159     ld(temp,             ObjectMonitor::owner_offset_in_bytes(), current_header);
  2160     ld(displaced_header, ObjectMonitor::recursions_offset_in_bytes(), current_header);
  2161     xorr(temp, R16_thread, temp);      // Will be 0 if we are the owner.
  2162     orr(temp, temp, displaced_header); // Will be 0 if there are 0 recursions.
  2163     cmpdi(flag, temp, 0);
  2164     bne(flag, cont);
  2166     ld(temp,             ObjectMonitor::EntryList_offset_in_bytes(), current_header);
  2167     ld(displaced_header, ObjectMonitor::cxq_offset_in_bytes(), current_header);
  2168     orr(temp, temp, displaced_header); // Will be 0 if both are 0.
  2169     cmpdi(flag, temp, 0);
  2170     bne(flag, cont);
  2171     release();
  2172     std(temp, ObjectMonitor::owner_offset_in_bytes(), current_header);
  2175   bind(cont);
  2176   // flag == EQ indicates success
  2177   // flag == NE indicates failure
  2180 // Write serialization page so VM thread can do a pseudo remote membar.
  2181 // We use the current thread pointer to calculate a thread specific
  2182 // offset to write to within the page. This minimizes bus traffic
  2183 // due to cache line collision.
  2184 void MacroAssembler::serialize_memory(Register thread, Register tmp1, Register tmp2) {
  2185   srdi(tmp2, thread, os::get_serialize_page_shift_count());
  2187   int mask = os::vm_page_size() - sizeof(int);
  2188   if (Assembler::is_simm(mask, 16)) {
  2189     andi(tmp2, tmp2, mask);
  2190   } else {
  2191     lis(tmp1, (int)((signed short) (mask >> 16)));
  2192     ori(tmp1, tmp1, mask & 0x0000ffff);
  2193     andr(tmp2, tmp2, tmp1);
  2196   load_const(tmp1, (long) os::get_memory_serialize_page());
  2197   release();
  2198   stwx(R0, tmp1, tmp2);
  2202 // GC barrier helper macros
  2204 // Write the card table byte if needed.
  2205 void MacroAssembler::card_write_barrier_post(Register Rstore_addr, Register Rnew_val, Register Rtmp) {
  2206   CardTableModRefBS* bs = (CardTableModRefBS*) Universe::heap()->barrier_set();
  2207   assert(bs->kind() == BarrierSet::CardTableModRef ||
  2208          bs->kind() == BarrierSet::CardTableExtension, "wrong barrier");
  2209 #ifdef ASSERT
  2210   cmpdi(CCR0, Rnew_val, 0);
  2211   asm_assert_ne("null oop not allowed", 0x321);
  2212 #endif
  2213   card_table_write(bs->byte_map_base, Rtmp, Rstore_addr);
  2216 // Write the card table byte.
  2217 void MacroAssembler::card_table_write(jbyte* byte_map_base, Register Rtmp, Register Robj) {
  2218   assert_different_registers(Robj, Rtmp, R0);
  2219   load_const_optimized(Rtmp, (address)byte_map_base, R0);
  2220   srdi(Robj, Robj, CardTableModRefBS::card_shift);
  2221   li(R0, 0); // dirty
  2222   if (UseConcMarkSweepGC) membar(Assembler::StoreStore);
  2223   stbx(R0, Rtmp, Robj);
  2226 #if INCLUDE_ALL_GCS
  2227 // General G1 pre-barrier generator.
  2228 // Goal: record the previous value if it is not null.
  2229 void MacroAssembler::g1_write_barrier_pre(Register Robj, RegisterOrConstant offset, Register Rpre_val,
  2230                                           Register Rtmp1, Register Rtmp2, bool needs_frame) {
  2231   Label runtime, filtered;
  2233   // Is marking active?
  2234   if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
  2235     lwz(Rtmp1, in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_active()), R16_thread);
  2236   } else {
  2237     guarantee(in_bytes(PtrQueue::byte_width_of_active()) == 1, "Assumption");
  2238     lbz(Rtmp1, in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_active()), R16_thread);
  2240   cmpdi(CCR0, Rtmp1, 0);
  2241   beq(CCR0, filtered);
  2243   // Do we need to load the previous value?
  2244   if (Robj != noreg) {
  2245     // Load the previous value...
  2246     if (UseCompressedOops) {
  2247       lwz(Rpre_val, offset, Robj);
  2248     } else {
  2249       ld(Rpre_val, offset, Robj);
  2251     // Previous value has been loaded into Rpre_val.
  2253   assert(Rpre_val != noreg, "must have a real register");
  2255   // Is the previous value null?
  2256   cmpdi(CCR0, Rpre_val, 0);
  2257   beq(CCR0, filtered);
  2259   if (Robj != noreg && UseCompressedOops) {
  2260     decode_heap_oop_not_null(Rpre_val);
  2263   // OK, it's not filtered, so we'll need to call enqueue. In the normal
  2264   // case, pre_val will be a scratch G-reg, but there are some cases in
  2265   // which it's an O-reg. In the first case, do a normal call. In the
  2266   // latter, do a save here and call the frameless version.
  2268   // Can we store original value in the thread's buffer?
  2269   // Is index == 0?
  2270   // (The index field is typed as size_t.)
  2271   const Register Rbuffer = Rtmp1, Rindex = Rtmp2;
  2273   ld(Rindex, in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_index()), R16_thread);
  2274   cmpdi(CCR0, Rindex, 0);
  2275   beq(CCR0, runtime); // If index == 0, goto runtime.
  2276   ld(Rbuffer, in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_buf()), R16_thread);
  2278   addi(Rindex, Rindex, -wordSize); // Decrement index.
  2279   std(Rindex, in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_index()), R16_thread);
  2281   // Record the previous value.
  2282   stdx(Rpre_val, Rbuffer, Rindex);
  2283   b(filtered);
  2285   bind(runtime);
  2287   // VM call need frame to access(write) O register.
  2288   if (needs_frame) {
  2289     save_LR_CR(Rtmp1);
  2290     push_frame_reg_args(0, Rtmp2);
  2293   if (Rpre_val->is_volatile() && Robj == noreg) mr(R31, Rpre_val); // Save pre_val across C call if it was preloaded.
  2294   call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), Rpre_val, R16_thread);
  2295   if (Rpre_val->is_volatile() && Robj == noreg) mr(Rpre_val, R31); // restore
  2297   if (needs_frame) {
  2298     pop_frame();
  2299     restore_LR_CR(Rtmp1);
  2302   bind(filtered);
  2305 // General G1 post-barrier generator
  2306 // Store cross-region card.
  2307 void MacroAssembler::g1_write_barrier_post(Register Rstore_addr, Register Rnew_val, Register Rtmp1, Register Rtmp2, Register Rtmp3, Label *filtered_ext) {
  2308   Label runtime, filtered_int;
  2309   Label& filtered = (filtered_ext != NULL) ? *filtered_ext : filtered_int;
  2310   assert_different_registers(Rstore_addr, Rnew_val, Rtmp1, Rtmp2);
  2312   G1SATBCardTableModRefBS* bs = (G1SATBCardTableModRefBS*) Universe::heap()->barrier_set();
  2313   assert(bs->kind() == BarrierSet::G1SATBCT ||
  2314          bs->kind() == BarrierSet::G1SATBCTLogging, "wrong barrier");
  2316   // Does store cross heap regions?
  2317   if (G1RSBarrierRegionFilter) {
  2318     xorr(Rtmp1, Rstore_addr, Rnew_val);
  2319     srdi_(Rtmp1, Rtmp1, HeapRegion::LogOfHRGrainBytes);
  2320     beq(CCR0, filtered);
  2323   // Crosses regions, storing NULL?
  2324 #ifdef ASSERT
  2325   cmpdi(CCR0, Rnew_val, 0);
  2326   asm_assert_ne("null oop not allowed (G1)", 0x322); // Checked by caller on PPC64, so following branch is obsolete:
  2327   //beq(CCR0, filtered);
  2328 #endif
  2330   // Storing region crossing non-NULL, is card already dirty?
  2331   assert(sizeof(*bs->byte_map_base) == sizeof(jbyte), "adjust this code");
  2332   const Register Rcard_addr = Rtmp1;
  2333   Register Rbase = Rtmp2;
  2334   load_const_optimized(Rbase, (address)bs->byte_map_base, /*temp*/ Rtmp3);
  2336   srdi(Rcard_addr, Rstore_addr, CardTableModRefBS::card_shift);
  2338   // Get the address of the card.
  2339   lbzx(/*card value*/ Rtmp3, Rbase, Rcard_addr);
  2340   cmpwi(CCR0, Rtmp3, (int)G1SATBCardTableModRefBS::g1_young_card_val());
  2341   beq(CCR0, filtered);
  2343   membar(Assembler::StoreLoad);
  2344   lbzx(/*card value*/ Rtmp3, Rbase, Rcard_addr);  // Reload after membar.
  2345   cmpwi(CCR0, Rtmp3 /* card value */, CardTableModRefBS::dirty_card_val());
  2346   beq(CCR0, filtered);
  2348   // Storing a region crossing, non-NULL oop, card is clean.
  2349   // Dirty card and log.
  2350   li(Rtmp3, CardTableModRefBS::dirty_card_val());
  2351   //release(); // G1: oops are allowed to get visible after dirty marking.
  2352   stbx(Rtmp3, Rbase, Rcard_addr);
  2354   add(Rcard_addr, Rbase, Rcard_addr); // This is the address which needs to get enqueued.
  2355   Rbase = noreg; // end of lifetime
  2357   const Register Rqueue_index = Rtmp2,
  2358                  Rqueue_buf   = Rtmp3;
  2359   ld(Rqueue_index, in_bytes(JavaThread::dirty_card_queue_offset() + PtrQueue::byte_offset_of_index()), R16_thread);
  2360   cmpdi(CCR0, Rqueue_index, 0);
  2361   beq(CCR0, runtime); // index == 0 then jump to runtime
  2362   ld(Rqueue_buf, in_bytes(JavaThread::dirty_card_queue_offset() + PtrQueue::byte_offset_of_buf()), R16_thread);
  2364   addi(Rqueue_index, Rqueue_index, -wordSize); // decrement index
  2365   std(Rqueue_index, in_bytes(JavaThread::dirty_card_queue_offset() + PtrQueue::byte_offset_of_index()), R16_thread);
  2367   stdx(Rcard_addr, Rqueue_buf, Rqueue_index); // store card
  2368   b(filtered);
  2370   bind(runtime);
  2372   // Save the live input values.
  2373   call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), Rcard_addr, R16_thread);
  2375   bind(filtered_int);
  2377 #endif // INCLUDE_ALL_GCS
  2379 // Values for last_Java_pc, and last_Java_sp must comply to the rules
  2380 // in frame_ppc.hpp.
  2381 void MacroAssembler::set_last_Java_frame(Register last_Java_sp, Register last_Java_pc) {
  2382   // Always set last_Java_pc and flags first because once last_Java_sp
  2383   // is visible has_last_Java_frame is true and users will look at the
  2384   // rest of the fields. (Note: flags should always be zero before we
  2385   // get here so doesn't need to be set.)
  2387   // Verify that last_Java_pc was zeroed on return to Java
  2388   asm_assert_mem8_is_zero(in_bytes(JavaThread::last_Java_pc_offset()), R16_thread,
  2389                           "last_Java_pc not zeroed before leaving Java", 0x200);
  2391   // When returning from calling out from Java mode the frame anchor's
  2392   // last_Java_pc will always be set to NULL. It is set here so that
  2393   // if we are doing a call to native (not VM) that we capture the
  2394   // known pc and don't have to rely on the native call having a
  2395   // standard frame linkage where we can find the pc.
  2396   if (last_Java_pc != noreg)
  2397     std(last_Java_pc, in_bytes(JavaThread::last_Java_pc_offset()), R16_thread);
  2399   // Set last_Java_sp last.
  2400   std(last_Java_sp, in_bytes(JavaThread::last_Java_sp_offset()), R16_thread);
  2403 void MacroAssembler::reset_last_Java_frame(void) {
  2404   asm_assert_mem8_isnot_zero(in_bytes(JavaThread::last_Java_sp_offset()),
  2405                              R16_thread, "SP was not set, still zero", 0x202);
  2407   BLOCK_COMMENT("reset_last_Java_frame {");
  2408   li(R0, 0);
  2410   // _last_Java_sp = 0
  2411   std(R0, in_bytes(JavaThread::last_Java_sp_offset()), R16_thread);
  2413   // _last_Java_pc = 0
  2414   std(R0, in_bytes(JavaThread::last_Java_pc_offset()), R16_thread);
  2415   BLOCK_COMMENT("} reset_last_Java_frame");
  2418 void MacroAssembler::set_top_ijava_frame_at_SP_as_last_Java_frame(Register sp, Register tmp1) {
  2419   assert_different_registers(sp, tmp1);
  2421   // sp points to a TOP_IJAVA_FRAME, retrieve frame's PC via
  2422   // TOP_IJAVA_FRAME_ABI.
  2423   // FIXME: assert that we really have a TOP_IJAVA_FRAME here!
  2424 #ifdef CC_INTERP
  2425   ld(tmp1/*pc*/, _top_ijava_frame_abi(frame_manager_lr), sp);
  2426 #else
  2427   address entry = pc();
  2428   load_const_optimized(tmp1, entry);
  2429 #endif
  2431   set_last_Java_frame(/*sp=*/sp, /*pc=*/tmp1);
  2434 void MacroAssembler::get_vm_result(Register oop_result) {
  2435   // Read:
  2436   //   R16_thread
  2437   //   R16_thread->in_bytes(JavaThread::vm_result_offset())
  2438   //
  2439   // Updated:
  2440   //   oop_result
  2441   //   R16_thread->in_bytes(JavaThread::vm_result_offset())
  2443   ld(oop_result, in_bytes(JavaThread::vm_result_offset()), R16_thread);
  2444   li(R0, 0);
  2445   std(R0, in_bytes(JavaThread::vm_result_offset()), R16_thread);
  2447   verify_oop(oop_result);
  2450 void MacroAssembler::get_vm_result_2(Register metadata_result) {
  2451   // Read:
  2452   //   R16_thread
  2453   //   R16_thread->in_bytes(JavaThread::vm_result_2_offset())
  2454   //
  2455   // Updated:
  2456   //   metadata_result
  2457   //   R16_thread->in_bytes(JavaThread::vm_result_2_offset())
  2459   ld(metadata_result, in_bytes(JavaThread::vm_result_2_offset()), R16_thread);
  2460   li(R0, 0);
  2461   std(R0, in_bytes(JavaThread::vm_result_2_offset()), R16_thread);
  2465 void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
  2466   Register current = (src != noreg) ? src : dst; // Klass is in dst if no src provided.
  2467   if (Universe::narrow_klass_base() != 0) {
  2468     // Use dst as temp if it is free.
  2469     load_const(R0, Universe::narrow_klass_base(), (dst != current && dst != R0) ? dst : noreg);
  2470     sub(dst, current, R0);
  2471     current = dst;
  2473   if (Universe::narrow_klass_shift() != 0) {
  2474     srdi(dst, current, Universe::narrow_klass_shift());
  2475     current = dst;
  2477   mr_if_needed(dst, current); // Move may be required.
  2480 void MacroAssembler::store_klass(Register dst_oop, Register klass, Register ck) {
  2481   if (UseCompressedClassPointers) {
  2482     encode_klass_not_null(ck, klass);
  2483     stw(ck, oopDesc::klass_offset_in_bytes(), dst_oop);
  2484   } else {
  2485     std(klass, oopDesc::klass_offset_in_bytes(), dst_oop);
  2489 void MacroAssembler::store_klass_gap(Register dst_oop, Register val) {
  2490   if (UseCompressedClassPointers) {
  2491     if (val == noreg) {
  2492       val = R0;
  2493       li(val, 0);
  2495     stw(val, oopDesc::klass_gap_offset_in_bytes(), dst_oop); // klass gap if compressed
  2499 int MacroAssembler::instr_size_for_decode_klass_not_null() {
  2500   if (!UseCompressedClassPointers) return 0;
  2501   int num_instrs = 1;  // shift or move
  2502   if (Universe::narrow_klass_base() != 0) num_instrs = 7;  // shift + load const + add
  2503   return num_instrs * BytesPerInstWord;
  2506 void MacroAssembler::decode_klass_not_null(Register dst, Register src) {
  2507   assert(dst != R0, "Dst reg may not be R0, as R0 is used here.");
  2508   if (src == noreg) src = dst;
  2509   Register shifted_src = src;
  2510   if (Universe::narrow_klass_shift() != 0 ||
  2511       Universe::narrow_klass_base() == 0 && src != dst) {  // Move required.
  2512     shifted_src = dst;
  2513     sldi(shifted_src, src, Universe::narrow_klass_shift());
  2515   if (Universe::narrow_klass_base() != 0) {
  2516     load_const(R0, Universe::narrow_klass_base());
  2517     add(dst, shifted_src, R0);
  2521 void MacroAssembler::load_klass(Register dst, Register src) {
  2522   if (UseCompressedClassPointers) {
  2523     lwz(dst, oopDesc::klass_offset_in_bytes(), src);
  2524     // Attention: no null check here!
  2525     decode_klass_not_null(dst, dst);
  2526   } else {
  2527     ld(dst, oopDesc::klass_offset_in_bytes(), src);
  2531 void MacroAssembler::load_klass_with_trap_null_check(Register dst, Register src) {
  2532   if (!os::zero_page_read_protected()) {
  2533     if (TrapBasedNullChecks) {
  2534       trap_null_check(src);
  2537   load_klass(dst, src);
  2540 void MacroAssembler::reinit_heapbase(Register d, Register tmp) {
  2541   if (Universe::heap() != NULL) {
  2542     load_const_optimized(R30, Universe::narrow_ptrs_base(), tmp);
  2543   } else {
  2544     // Heap not yet allocated. Load indirectly.
  2545     int simm16_offset = load_const_optimized(R30, Universe::narrow_ptrs_base_addr(), tmp, true);
  2546     ld(R30, simm16_offset, R30);
  2550 // Clear Array
  2551 // Kills both input registers. tmp == R0 is allowed.
  2552 void MacroAssembler::clear_memory_doubleword(Register base_ptr, Register cnt_dwords, Register tmp) {
  2553   // Procedure for large arrays (uses data cache block zero instruction).
  2554     Label startloop, fast, fastloop, small_rest, restloop, done;
  2555     const int cl_size         = VM_Version::get_cache_line_size(),
  2556               cl_dwords       = cl_size>>3,
  2557               cl_dw_addr_bits = exact_log2(cl_dwords),
  2558               dcbz_min        = 1;                     // Min count of dcbz executions, needs to be >0.
  2560 //2:
  2561     cmpdi(CCR1, cnt_dwords, ((dcbz_min+1)<<cl_dw_addr_bits)-1); // Big enough? (ensure >=dcbz_min lines included).
  2562     blt(CCR1, small_rest);                                      // Too small.
  2563     rldicl_(tmp, base_ptr, 64-3, 64-cl_dw_addr_bits);           // Extract dword offset within first cache line.
  2564     beq(CCR0, fast);                                            // Already 128byte aligned.
  2566     subfic(tmp, tmp, cl_dwords);
  2567     mtctr(tmp);                        // Set ctr to hit 128byte boundary (0<ctr<cl_dwords).
  2568     subf(cnt_dwords, tmp, cnt_dwords); // rest.
  2569     li(tmp, 0);
  2570 //10:
  2571   bind(startloop);                     // Clear at the beginning to reach 128byte boundary.
  2572     std(tmp, 0, base_ptr);             // Clear 8byte aligned block.
  2573     addi(base_ptr, base_ptr, 8);
  2574     bdnz(startloop);
  2575 //13:
  2576   bind(fast);                                  // Clear 128byte blocks.
  2577     srdi(tmp, cnt_dwords, cl_dw_addr_bits);    // Loop count for 128byte loop (>0).
  2578     andi(cnt_dwords, cnt_dwords, cl_dwords-1); // Rest in dwords.
  2579     mtctr(tmp);                                // Load counter.
  2580 //16:
  2581   bind(fastloop);
  2582     dcbz(base_ptr);                    // Clear 128byte aligned block.
  2583     addi(base_ptr, base_ptr, cl_size);
  2584     bdnz(fastloop);
  2585     if (InsertEndGroupPPC64) { endgroup(); } else { nop(); }
  2586 //20:
  2587   bind(small_rest);
  2588     cmpdi(CCR0, cnt_dwords, 0);        // size 0?
  2589     beq(CCR0, done);                   // rest == 0
  2590     li(tmp, 0);
  2591     mtctr(cnt_dwords);                 // Load counter.
  2592 //24:
  2593   bind(restloop);                      // Clear rest.
  2594     std(tmp, 0, base_ptr);             // Clear 8byte aligned block.
  2595     addi(base_ptr, base_ptr, 8);
  2596     bdnz(restloop);
  2597 //27:
  2598   bind(done);
  2601 /////////////////////////////////////////// String intrinsics ////////////////////////////////////////////
  2603 // Search for a single jchar in an jchar[].
  2604 //
  2605 // Assumes that result differs from all other registers.
  2606 //
  2607 // Haystack, needle are the addresses of jchar-arrays.
  2608 // NeedleChar is needle[0] if it is known at compile time.
  2609 // Haycnt is the length of the haystack. We assume haycnt >=1.
  2610 //
  2611 // Preserves haystack, haycnt, kills all other registers.
  2612 //
  2613 // If needle == R0, we search for the constant needleChar.
  2614 void MacroAssembler::string_indexof_1(Register result, Register haystack, Register haycnt,
  2615                                       Register needle, jchar needleChar,
  2616                                       Register tmp1, Register tmp2) {
  2618   assert_different_registers(result, haystack, haycnt, needle, tmp1, tmp2);
  2620   Label L_InnerLoop, L_FinalCheck, L_Found1, L_Found2, L_Found3, L_NotFound, L_End;
  2621   Register needle0 = needle, // Contains needle[0].
  2622            addr = tmp1,
  2623            ch1 = tmp2,
  2624            ch2 = R0;
  2626 //2 (variable) or 3 (const):
  2627    if (needle != R0) lhz(needle0, 0, needle); // Preload needle character, needle has len==1.
  2628    dcbtct(haystack, 0x00);                        // Indicate R/O access to haystack.
  2630    srwi_(tmp2, haycnt, 1);   // Shift right by exact_log2(UNROLL_FACTOR).
  2631    mr(addr, haystack);
  2632    beq(CCR0, L_FinalCheck);
  2633    mtctr(tmp2);              // Move to count register.
  2634 //8:
  2635   bind(L_InnerLoop);             // Main work horse (2x unrolled search loop).
  2636    lhz(ch1, 0, addr);        // Load characters from haystack.
  2637    lhz(ch2, 2, addr);
  2638    (needle != R0) ? cmpw(CCR0, ch1, needle0) : cmplwi(CCR0, ch1, needleChar);
  2639    (needle != R0) ? cmpw(CCR1, ch2, needle0) : cmplwi(CCR1, ch2, needleChar);
  2640    beq(CCR0, L_Found1);   // Did we find the needle?
  2641    beq(CCR1, L_Found2);
  2642    addi(addr, addr, 4);
  2643    bdnz(L_InnerLoop);
  2644 //16:
  2645   bind(L_FinalCheck);
  2646    andi_(R0, haycnt, 1);
  2647    beq(CCR0, L_NotFound);
  2648    lhz(ch1, 0, addr);        // One position left at which we have to compare.
  2649    (needle != R0) ? cmpw(CCR1, ch1, needle0) : cmplwi(CCR1, ch1, needleChar);
  2650    beq(CCR1, L_Found3);
  2651 //21:
  2652   bind(L_NotFound);
  2653    li(result, -1);           // Not found.
  2654    b(L_End);
  2656   bind(L_Found2);
  2657    addi(addr, addr, 2);
  2658 //24:
  2659   bind(L_Found1);
  2660   bind(L_Found3);                  // Return index ...
  2661    subf(addr, haystack, addr); // relative to haystack,
  2662    srdi(result, addr, 1);      // in characters.
  2663   bind(L_End);
  2667 // Implementation of IndexOf for jchar arrays.
  2668 //
  2669 // The length of haystack and needle are not constant, i.e. passed in a register.
  2670 //
  2671 // Preserves registers haystack, needle.
  2672 // Kills registers haycnt, needlecnt.
  2673 // Assumes that result differs from all other registers.
  2674 // Haystack, needle are the addresses of jchar-arrays.
  2675 // Haycnt, needlecnt are the lengths of them, respectively.
  2676 //
  2677 // Needlecntval must be zero or 15-bit unsigned immediate and > 1.
  2678 void MacroAssembler::string_indexof(Register result, Register haystack, Register haycnt,
  2679                                     Register needle, ciTypeArray* needle_values, Register needlecnt, int needlecntval,
  2680                                     Register tmp1, Register tmp2, Register tmp3, Register tmp4) {
  2682   // Ensure 0<needlecnt<=haycnt in ideal graph as prerequisite!
  2683   Label L_TooShort, L_Found, L_NotFound, L_End;
  2684   Register last_addr = haycnt, // Kill haycnt at the beginning.
  2685            addr      = tmp1,
  2686            n_start   = tmp2,
  2687            ch1       = tmp3,
  2688            ch2       = R0;
  2690   // **************************************************************************************************
  2691   // Prepare for main loop: optimized for needle count >=2, bail out otherwise.
  2692   // **************************************************************************************************
  2694 //1 (variable) or 3 (const):
  2695    dcbtct(needle, 0x00);    // Indicate R/O access to str1.
  2696    dcbtct(haystack, 0x00);  // Indicate R/O access to str2.
  2698   // Compute last haystack addr to use if no match gets found.
  2699   if (needlecntval == 0) { // variable needlecnt
  2700 //3:
  2701    subf(ch1, needlecnt, haycnt);      // Last character index to compare is haycnt-needlecnt.
  2702    addi(addr, haystack, -2);          // Accesses use pre-increment.
  2703    cmpwi(CCR6, needlecnt, 2);
  2704    blt(CCR6, L_TooShort);          // Variable needlecnt: handle short needle separately.
  2705    slwi(ch1, ch1, 1);                 // Scale to number of bytes.
  2706    lwz(n_start, 0, needle);           // Load first 2 characters of needle.
  2707    add(last_addr, haystack, ch1);     // Point to last address to compare (haystack+2*(haycnt-needlecnt)).
  2708    addi(needlecnt, needlecnt, -2);    // Rest of needle.
  2709   } else { // constant needlecnt
  2710   guarantee(needlecntval != 1, "IndexOf with single-character needle must be handled separately");
  2711   assert((needlecntval & 0x7fff) == needlecntval, "wrong immediate");
  2712 //5:
  2713    addi(ch1, haycnt, -needlecntval);  // Last character index to compare is haycnt-needlecnt.
  2714    lwz(n_start, 0, needle);           // Load first 2 characters of needle.
  2715    addi(addr, haystack, -2);          // Accesses use pre-increment.
  2716    slwi(ch1, ch1, 1);                 // Scale to number of bytes.
  2717    add(last_addr, haystack, ch1);     // Point to last address to compare (haystack+2*(haycnt-needlecnt)).
  2718    li(needlecnt, needlecntval-2);     // Rest of needle.
  2721   // Main Loop (now we have at least 3 characters).
  2722 //11:
  2723   Label L_OuterLoop, L_InnerLoop, L_FinalCheck, L_Comp1, L_Comp2, L_Comp3;
  2724   bind(L_OuterLoop); // Search for 1st 2 characters.
  2725   Register addr_diff = tmp4;
  2726    subf(addr_diff, addr, last_addr); // Difference between already checked address and last address to check.
  2727    addi(addr, addr, 2);              // This is the new address we want to use for comparing.
  2728    srdi_(ch2, addr_diff, 2);
  2729    beq(CCR0, L_FinalCheck);       // 2 characters left?
  2730    mtctr(ch2);                       // addr_diff/4
  2731 //16:
  2732   bind(L_InnerLoop);                // Main work horse (2x unrolled search loop)
  2733    lwz(ch1, 0, addr);           // Load 2 characters of haystack (ignore alignment).
  2734    lwz(ch2, 2, addr);
  2735    cmpw(CCR0, ch1, n_start); // Compare 2 characters (1 would be sufficient but try to reduce branches to CompLoop).
  2736    cmpw(CCR1, ch2, n_start);
  2737    beq(CCR0, L_Comp1);       // Did we find the needle start?
  2738    beq(CCR1, L_Comp2);
  2739    addi(addr, addr, 4);
  2740    bdnz(L_InnerLoop);
  2741 //24:
  2742   bind(L_FinalCheck);
  2743    rldicl_(addr_diff, addr_diff, 64-1, 63); // Remaining characters not covered by InnerLoop: (addr_diff>>1)&1.
  2744    beq(CCR0, L_NotFound);
  2745    lwz(ch1, 0, addr);                       // One position left at which we have to compare.
  2746    cmpw(CCR1, ch1, n_start);
  2747    beq(CCR1, L_Comp3);
  2748 //29:
  2749   bind(L_NotFound);
  2750    li(result, -1); // not found
  2751    b(L_End);
  2754    // **************************************************************************************************
  2755    // Special Case: unfortunately, the variable needle case can be called with needlecnt<2
  2756    // **************************************************************************************************
  2757 //31:
  2758  if ((needlecntval>>1) !=1 ) { // Const needlecnt is 2 or 3? Reduce code size.
  2759   int nopcnt = 5;
  2760   if (needlecntval !=0 ) ++nopcnt; // Balance alignment (other case: see below).
  2761   if (needlecntval == 0) {         // We have to handle these cases separately.
  2762   Label L_OneCharLoop;
  2763   bind(L_TooShort);
  2764    mtctr(haycnt);
  2765    lhz(n_start, 0, needle);    // First character of needle
  2766   bind(L_OneCharLoop);
  2767    lhzu(ch1, 2, addr);
  2768    cmpw(CCR1, ch1, n_start);
  2769    beq(CCR1, L_Found);      // Did we find the one character needle?
  2770    bdnz(L_OneCharLoop);
  2771    li(result, -1);             // Not found.
  2772    b(L_End);
  2773   } // 8 instructions, so no impact on alignment.
  2774   for (int x = 0; x < nopcnt; ++x) nop();
  2777   // **************************************************************************************************
  2778   // Regular Case Part II: compare rest of needle (first 2 characters have been compared already)
  2779   // **************************************************************************************************
  2781   // Compare the rest
  2782 //36 if needlecntval==0, else 37:
  2783   bind(L_Comp2);
  2784    addi(addr, addr, 2); // First comparison has failed, 2nd one hit.
  2785   bind(L_Comp1);            // Addr points to possible needle start.
  2786   bind(L_Comp3);            // Could have created a copy and use a different return address but saving code size here.
  2787   if (needlecntval != 2) {  // Const needlecnt==2?
  2788    if (needlecntval != 3) {
  2789     if (needlecntval == 0) beq(CCR6, L_Found); // Variable needlecnt==2?
  2790     Register ind_reg = tmp4;
  2791     li(ind_reg, 2*2);   // First 2 characters are already compared, use index 2.
  2792     mtctr(needlecnt);   // Decremented by 2, still > 0.
  2793 //40:
  2794    Label L_CompLoop;
  2795    bind(L_CompLoop);
  2796     lhzx(ch2, needle, ind_reg);
  2797     lhzx(ch1, addr, ind_reg);
  2798     cmpw(CCR1, ch1, ch2);
  2799     bne(CCR1, L_OuterLoop);
  2800     addi(ind_reg, ind_reg, 2);
  2801     bdnz(L_CompLoop);
  2802    } else { // No loop required if there's only one needle character left.
  2803     lhz(ch2, 2*2, needle);
  2804     lhz(ch1, 2*2, addr);
  2805     cmpw(CCR1, ch1, ch2);
  2806     bne(CCR1, L_OuterLoop);
  2809   // Return index ...
  2810 //46:
  2811   bind(L_Found);
  2812    subf(addr, haystack, addr); // relative to haystack, ...
  2813    srdi(result, addr, 1);      // in characters.
  2814 //48:
  2815   bind(L_End);
  2818 // Implementation of Compare for jchar arrays.
  2819 //
  2820 // Kills the registers str1, str2, cnt1, cnt2.
  2821 // Kills cr0, ctr.
  2822 // Assumes that result differes from the input registers.
  2823 void MacroAssembler::string_compare(Register str1_reg, Register str2_reg, Register cnt1_reg, Register cnt2_reg,
  2824                                     Register result_reg, Register tmp_reg) {
  2825    assert_different_registers(result_reg, str1_reg, str2_reg, cnt1_reg, cnt2_reg, tmp_reg);
  2827    Label Ldone, Lslow_case, Lslow_loop, Lfast_loop;
  2828    Register cnt_diff = R0,
  2829             limit_reg = cnt1_reg,
  2830             chr1_reg = result_reg,
  2831             chr2_reg = cnt2_reg,
  2832             addr_diff = str2_reg;
  2834    // Offset 0 should be 32 byte aligned.
  2835 //-4:
  2836     dcbtct(str1_reg, 0x00);  // Indicate R/O access to str1.
  2837     dcbtct(str2_reg, 0x00);  // Indicate R/O access to str2.
  2838 //-2:
  2839    // Compute min(cnt1, cnt2) and check if 0 (bail out if we don't need to compare characters).
  2840     subf(result_reg, cnt2_reg, cnt1_reg);  // difference between cnt1/2
  2841     subf_(addr_diff, str1_reg, str2_reg);  // alias?
  2842     beq(CCR0, Ldone);                   // return cnt difference if both ones are identical
  2843     srawi(limit_reg, result_reg, 31);      // generate signmask (cnt1/2 must be non-negative so cnt_diff can't overflow)
  2844     mr(cnt_diff, result_reg);
  2845     andr(limit_reg, result_reg, limit_reg); // difference or zero (negative): cnt1<cnt2 ? cnt1-cnt2 : 0
  2846     add_(limit_reg, cnt2_reg, limit_reg);  // min(cnt1, cnt2)==0?
  2847     beq(CCR0, Ldone);                   // return cnt difference if one has 0 length
  2849     lhz(chr1_reg, 0, str1_reg);            // optional: early out if first characters mismatch
  2850     lhzx(chr2_reg, str1_reg, addr_diff);   // optional: early out if first characters mismatch
  2851     addi(tmp_reg, limit_reg, -1);          // min(cnt1, cnt2)-1
  2852     subf_(result_reg, chr2_reg, chr1_reg); // optional: early out if first characters mismatch
  2853     bne(CCR0, Ldone);                   // optional: early out if first characters mismatch
  2855    // Set loop counter by scaling down tmp_reg
  2856     srawi_(chr2_reg, tmp_reg, exact_log2(4)); // (min(cnt1, cnt2)-1)/4
  2857     ble(CCR0, Lslow_case);                 // need >4 characters for fast loop
  2858     andi(limit_reg, tmp_reg, 4-1);            // remaining characters
  2860    // Adapt str1_reg str2_reg for the first loop iteration
  2861     mtctr(chr2_reg);                 // (min(cnt1, cnt2)-1)/4
  2862     addi(limit_reg, limit_reg, 4+1); // compare last 5-8 characters in slow_case if mismatch found in fast_loop
  2863 //16:
  2864    // Compare the rest of the characters
  2865    bind(Lfast_loop);
  2866     ld(chr1_reg, 0, str1_reg);
  2867     ldx(chr2_reg, str1_reg, addr_diff);
  2868     cmpd(CCR0, chr2_reg, chr1_reg);
  2869     bne(CCR0, Lslow_case); // return chr1_reg
  2870     addi(str1_reg, str1_reg, 4*2);
  2871     bdnz(Lfast_loop);
  2872     addi(limit_reg, limit_reg, -4); // no mismatch found in fast_loop, only 1-4 characters missing
  2873 //23:
  2874    bind(Lslow_case);
  2875     mtctr(limit_reg);
  2876 //24:
  2877    bind(Lslow_loop);
  2878     lhz(chr1_reg, 0, str1_reg);
  2879     lhzx(chr2_reg, str1_reg, addr_diff);
  2880     subf_(result_reg, chr2_reg, chr1_reg);
  2881     bne(CCR0, Ldone); // return chr1_reg
  2882     addi(str1_reg, str1_reg, 1*2);
  2883     bdnz(Lslow_loop);
  2884 //30:
  2885    // If strings are equal up to min length, return the length difference.
  2886     mr(result_reg, cnt_diff);
  2887     nop(); // alignment
  2888 //32:
  2889    // Otherwise, return the difference between the first mismatched chars.
  2890    bind(Ldone);
  2894 // Compare char[] arrays.
  2895 //
  2896 // str1_reg   USE only
  2897 // str2_reg   USE only
  2898 // cnt_reg    USE_DEF, due to tmp reg shortage
  2899 // result_reg DEF only, might compromise USE only registers
  2900 void MacroAssembler::char_arrays_equals(Register str1_reg, Register str2_reg, Register cnt_reg, Register result_reg,
  2901                                         Register tmp1_reg, Register tmp2_reg, Register tmp3_reg, Register tmp4_reg,
  2902                                         Register tmp5_reg) {
  2904   // Str1 may be the same register as str2 which can occur e.g. after scalar replacement.
  2905   assert_different_registers(result_reg, str1_reg, cnt_reg, tmp1_reg, tmp2_reg, tmp3_reg, tmp4_reg, tmp5_reg);
  2906   assert_different_registers(result_reg, str2_reg, cnt_reg, tmp1_reg, tmp2_reg, tmp3_reg, tmp4_reg, tmp5_reg);
  2908   // Offset 0 should be 32 byte aligned.
  2909   Label Linit_cbc, Lcbc, Lloop, Ldone_true, Ldone_false;
  2910   Register index_reg = tmp5_reg;
  2911   Register cbc_iter  = tmp4_reg;
  2913 //-1:
  2914   dcbtct(str1_reg, 0x00);  // Indicate R/O access to str1.
  2915   dcbtct(str2_reg, 0x00);  // Indicate R/O access to str2.
  2916 //1:
  2917   andi(cbc_iter, cnt_reg, 4-1);            // Remaining iterations after 4 java characters per iteration loop.
  2918   li(index_reg, 0); // init
  2919   li(result_reg, 0); // assume false
  2920   srwi_(tmp2_reg, cnt_reg, exact_log2(4)); // Div: 4 java characters per iteration (main loop).
  2922   cmpwi(CCR1, cbc_iter, 0);             // CCR1 = (cbc_iter==0)
  2923   beq(CCR0, Linit_cbc);                 // too short
  2924     mtctr(tmp2_reg);
  2925 //8:
  2926     bind(Lloop);
  2927       ldx(tmp1_reg, str1_reg, index_reg);
  2928       ldx(tmp2_reg, str2_reg, index_reg);
  2929       cmpd(CCR0, tmp1_reg, tmp2_reg);
  2930       bne(CCR0, Ldone_false);  // Unequal char pair found -> done.
  2931       addi(index_reg, index_reg, 4*sizeof(jchar));
  2932       bdnz(Lloop);
  2933 //14:
  2934   bind(Linit_cbc);
  2935   beq(CCR1, Ldone_true);
  2936     mtctr(cbc_iter);
  2937 //16:
  2938     bind(Lcbc);
  2939       lhzx(tmp1_reg, str1_reg, index_reg);
  2940       lhzx(tmp2_reg, str2_reg, index_reg);
  2941       cmpw(CCR0, tmp1_reg, tmp2_reg);
  2942       bne(CCR0, Ldone_false);  // Unequal char pair found -> done.
  2943       addi(index_reg, index_reg, 1*sizeof(jchar));
  2944       bdnz(Lcbc);
  2945     nop();
  2946   bind(Ldone_true);
  2947   li(result_reg, 1);
  2948 //24:
  2949   bind(Ldone_false);
  2953 void MacroAssembler::char_arrays_equalsImm(Register str1_reg, Register str2_reg, int cntval, Register result_reg,
  2954                                            Register tmp1_reg, Register tmp2_reg) {
  2955   // Str1 may be the same register as str2 which can occur e.g. after scalar replacement.
  2956   assert_different_registers(result_reg, str1_reg, tmp1_reg, tmp2_reg);
  2957   assert_different_registers(result_reg, str2_reg, tmp1_reg, tmp2_reg);
  2958   assert(sizeof(jchar) == 2, "must be");
  2959   assert(cntval >= 0 && ((cntval & 0x7fff) == cntval), "wrong immediate");
  2961   Label Ldone_false;
  2963   if (cntval < 16) { // short case
  2964     if (cntval != 0) li(result_reg, 0); // assume false
  2966     const int num_bytes = cntval*sizeof(jchar);
  2967     int index = 0;
  2968     for (int next_index; (next_index = index + 8) <= num_bytes; index = next_index) {
  2969       ld(tmp1_reg, index, str1_reg);
  2970       ld(tmp2_reg, index, str2_reg);
  2971       cmpd(CCR0, tmp1_reg, tmp2_reg);
  2972       bne(CCR0, Ldone_false);
  2974     if (cntval & 2) {
  2975       lwz(tmp1_reg, index, str1_reg);
  2976       lwz(tmp2_reg, index, str2_reg);
  2977       cmpw(CCR0, tmp1_reg, tmp2_reg);
  2978       bne(CCR0, Ldone_false);
  2979       index += 4;
  2981     if (cntval & 1) {
  2982       lhz(tmp1_reg, index, str1_reg);
  2983       lhz(tmp2_reg, index, str2_reg);
  2984       cmpw(CCR0, tmp1_reg, tmp2_reg);
  2985       bne(CCR0, Ldone_false);
  2987     // fallthrough: true
  2988   } else {
  2989     Label Lloop;
  2990     Register index_reg = tmp1_reg;
  2991     const int loopcnt = cntval/4;
  2992     assert(loopcnt > 0, "must be");
  2993     // Offset 0 should be 32 byte aligned.
  2994     //2:
  2995     dcbtct(str1_reg, 0x00);  // Indicate R/O access to str1.
  2996     dcbtct(str2_reg, 0x00);  // Indicate R/O access to str2.
  2997     li(tmp2_reg, loopcnt);
  2998     li(index_reg, 0); // init
  2999     li(result_reg, 0); // assume false
  3000     mtctr(tmp2_reg);
  3001     //8:
  3002     bind(Lloop);
  3003     ldx(R0, str1_reg, index_reg);
  3004     ldx(tmp2_reg, str2_reg, index_reg);
  3005     cmpd(CCR0, R0, tmp2_reg);
  3006     bne(CCR0, Ldone_false);  // Unequal char pair found -> done.
  3007     addi(index_reg, index_reg, 4*sizeof(jchar));
  3008     bdnz(Lloop);
  3009     //14:
  3010     if (cntval & 2) {
  3011       lwzx(R0, str1_reg, index_reg);
  3012       lwzx(tmp2_reg, str2_reg, index_reg);
  3013       cmpw(CCR0, R0, tmp2_reg);
  3014       bne(CCR0, Ldone_false);
  3015       if (cntval & 1) addi(index_reg, index_reg, 2*sizeof(jchar));
  3017     if (cntval & 1) {
  3018       lhzx(R0, str1_reg, index_reg);
  3019       lhzx(tmp2_reg, str2_reg, index_reg);
  3020       cmpw(CCR0, R0, tmp2_reg);
  3021       bne(CCR0, Ldone_false);
  3023     // fallthru: true
  3025   li(result_reg, 1);
  3026   bind(Ldone_false);
  3030 void MacroAssembler::asm_assert(bool check_equal, const char *msg, int id) {
  3031 #ifdef ASSERT
  3032   Label ok;
  3033   if (check_equal) {
  3034     beq(CCR0, ok);
  3035   } else {
  3036     bne(CCR0, ok);
  3038   stop(msg, id);
  3039   bind(ok);
  3040 #endif
  3043 void MacroAssembler::asm_assert_mems_zero(bool check_equal, int size, int mem_offset,
  3044                                           Register mem_base, const char* msg, int id) {
  3045 #ifdef ASSERT
  3046   switch (size) {
  3047     case 4:
  3048       lwz(R0, mem_offset, mem_base);
  3049       cmpwi(CCR0, R0, 0);
  3050       break;
  3051     case 8:
  3052       ld(R0, mem_offset, mem_base);
  3053       cmpdi(CCR0, R0, 0);
  3054       break;
  3055     default:
  3056       ShouldNotReachHere();
  3058   asm_assert(check_equal, msg, id);
  3059 #endif // ASSERT
  3062 void MacroAssembler::verify_thread() {
  3063   if (VerifyThread) {
  3064     unimplemented("'VerifyThread' currently not implemented on PPC");
  3068 // READ: oop. KILL: R0. Volatile floats perhaps.
  3069 void MacroAssembler::verify_oop(Register oop, const char* msg) {
  3070   if (!VerifyOops) {
  3071     return;
  3074   address/* FunctionDescriptor** */fd = StubRoutines::verify_oop_subroutine_entry_address();
  3075   const Register tmp = R11; // Will be preserved.
  3076   const int nbytes_save = 11*8; // Volatile gprs except R0.
  3077   save_volatile_gprs(R1_SP, -nbytes_save); // except R0
  3079   if (oop == tmp) mr(R4_ARG2, oop);
  3080   save_LR_CR(tmp); // save in old frame
  3081   push_frame_reg_args(nbytes_save, tmp);
  3082   // load FunctionDescriptor** / entry_address *
  3083   load_const_optimized(tmp, fd, R0);
  3084   // load FunctionDescriptor* / entry_address
  3085   ld(tmp, 0, tmp);
  3086   if (oop != tmp) mr_if_needed(R4_ARG2, oop);
  3087   load_const_optimized(R3_ARG1, (address)msg, R0);
  3088   // Call destination for its side effect.
  3089   call_c(tmp);
  3091   pop_frame();
  3092   restore_LR_CR(tmp);
  3093   restore_volatile_gprs(R1_SP, -nbytes_save); // except R0
  3096 const char* stop_types[] = {
  3097   "stop",
  3098   "untested",
  3099   "unimplemented",
  3100   "shouldnotreachhere"
  3101 };
  3103 static void stop_on_request(int tp, const char* msg) {
  3104   tty->print("PPC assembly code requires stop: (%s) %s\n", stop_types[tp%/*stop_end*/4], msg);
  3105   guarantee(false, err_msg("PPC assembly code requires stop: %s", msg));
  3108 // Call a C-function that prints output.
  3109 void MacroAssembler::stop(int type, const char* msg, int id) {
  3110 #ifndef PRODUCT
  3111   block_comment(err_msg("stop: %s %s {", stop_types[type%stop_end], msg));
  3112 #else
  3113   block_comment("stop {");
  3114 #endif
  3116   // setup arguments
  3117   load_const_optimized(R3_ARG1, type);
  3118   load_const_optimized(R4_ARG2, (void *)msg, /*tmp=*/R0);
  3119   call_VM_leaf(CAST_FROM_FN_PTR(address, stop_on_request), R3_ARG1, R4_ARG2);
  3120   illtrap();
  3121   emit_int32(id);
  3122   block_comment("} stop;");
  3125 #ifndef PRODUCT
  3126 // Write pattern 0x0101010101010101 in memory region [low-before, high+after].
  3127 // Val, addr are temp registers.
  3128 // If low == addr, addr is killed.
  3129 // High is preserved.
  3130 void MacroAssembler::zap_from_to(Register low, int before, Register high, int after, Register val, Register addr) {
  3131   if (!ZapMemory) return;
  3133   assert_different_registers(low, val);
  3135   BLOCK_COMMENT("zap memory region {");
  3136   load_const_optimized(val, 0x0101010101010101);
  3137   int size = before + after;
  3138   if (low == high && size < 5 && size > 0) {
  3139     int offset = -before*BytesPerWord;
  3140     for (int i = 0; i < size; ++i) {
  3141       std(val, offset, low);
  3142       offset += (1*BytesPerWord);
  3144   } else {
  3145     addi(addr, low, -before*BytesPerWord);
  3146     assert_different_registers(high, val);
  3147     if (after) addi(high, high, after * BytesPerWord);
  3148     Label loop;
  3149     bind(loop);
  3150     std(val, 0, addr);
  3151     addi(addr, addr, 8);
  3152     cmpd(CCR6, addr, high);
  3153     ble(CCR6, loop);
  3154     if (after) addi(high, high, -after * BytesPerWord);  // Correct back to old value.
  3156   BLOCK_COMMENT("} zap memory region");
  3159 #endif // !PRODUCT
  3161 SkipIfEqualZero::SkipIfEqualZero(MacroAssembler* masm, Register temp, const bool* flag_addr) : _masm(masm), _label() {
  3162   int simm16_offset = masm->load_const_optimized(temp, (address)flag_addr, R0, true);
  3163   assert(sizeof(bool) == 1, "PowerPC ABI");
  3164   masm->lbz(temp, simm16_offset, temp);
  3165   masm->cmpwi(CCR0, temp, 0);
  3166   masm->beq(CCR0, _label);
  3169 SkipIfEqualZero::~SkipIfEqualZero() {
  3170   _masm->bind(_label);

mercurial