src/cpu/ppc/vm/macroAssembler_ppc.hpp

Thu, 07 Nov 2013 11:47:11 +0100

author
goetz
date
Thu, 07 Nov 2013 11:47:11 +0100
changeset 6477
eb178e97560c
parent 6458
ec28f9c041ff
child 6495
67fa91961822
permissions
-rw-r--r--

8027968: Adapt PPC to 8024927: Nashorn performance regression with CompressedOops
Reviewed-by: coleenp, kvn

goetz@6458 1 /*
goetz@6458 2 * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
goetz@6458 3 * Copyright 2012, 2013 SAP AG. All rights reserved.
goetz@6458 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
goetz@6458 5 *
goetz@6458 6 * This code is free software; you can redistribute it and/or modify it
goetz@6458 7 * under the terms of the GNU General Public License version 2 only, as
goetz@6458 8 * published by the Free Software Foundation.
goetz@6458 9 *
goetz@6458 10 * This code is distributed in the hope that it will be useful, but WITHOUT
goetz@6458 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
goetz@6458 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
goetz@6458 13 * version 2 for more details (a copy is included in the LICENSE file that
goetz@6458 14 * accompanied this code).
goetz@6458 15 *
goetz@6458 16 * You should have received a copy of the GNU General Public License version
goetz@6458 17 * 2 along with this work; if not, write to the Free Software Foundation,
goetz@6458 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
goetz@6458 19 *
goetz@6458 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
goetz@6458 21 * or visit www.oracle.com if you need additional information or have any
goetz@6458 22 * questions.
goetz@6458 23 *
goetz@6458 24 */
goetz@6458 25
goetz@6458 26 #ifndef CPU_PPC_VM_MACROASSEMBLER_PPC_HPP
goetz@6458 27 #define CPU_PPC_VM_MACROASSEMBLER_PPC_HPP
goetz@6458 28
goetz@6458 29 #include "asm/assembler.hpp"
goetz@6458 30
goetz@6458 31 // MacroAssembler extends Assembler by a few frequently used macros.
goetz@6458 32
goetz@6458 33 class ciTypeArray;
goetz@6458 34
goetz@6458 35 class MacroAssembler: public Assembler {
goetz@6458 36 public:
goetz@6458 37 MacroAssembler(CodeBuffer* code) : Assembler(code) {}
goetz@6458 38
goetz@6458 39 //
goetz@6458 40 // Optimized instruction emitters
goetz@6458 41 //
goetz@6458 42
goetz@6458 43 inline static int largeoffset_si16_si16_hi(int si31) { return (si31 + (1<<15)) >> 16; }
goetz@6458 44 inline static int largeoffset_si16_si16_lo(int si31) { return si31 - (((si31 + (1<<15)) >> 16) << 16); }
goetz@6458 45
goetz@6458 46 // load d = *[a+si31]
goetz@6458 47 // Emits several instructions if the offset is not encodable in one instruction.
goetz@6458 48 void ld_largeoffset_unchecked(Register d, int si31, Register a, int emit_filler_nop);
goetz@6458 49 void ld_largeoffset (Register d, int si31, Register a, int emit_filler_nop);
goetz@6458 50 inline static bool is_ld_largeoffset(address a);
goetz@6458 51 inline static int get_ld_largeoffset_offset(address a);
goetz@6458 52
goetz@6458 53 inline void round_to(Register r, int modulus);
goetz@6458 54
goetz@6458 55 // Load/store with type given by parameter.
goetz@6458 56 void load_sized_value( Register dst, RegisterOrConstant offs, Register base, size_t size_in_bytes, bool is_signed);
goetz@6458 57 void store_sized_value(Register dst, RegisterOrConstant offs, Register base, size_t size_in_bytes);
goetz@6458 58
goetz@6458 59 // Move register if destination register and target register are different
goetz@6458 60 inline void mr_if_needed(Register rd, Register rs);
goetz@6458 61
goetz@6458 62 // nop padding
goetz@6458 63 void align(int modulus);
goetz@6458 64
goetz@6458 65 //
goetz@6458 66 // Constants, loading constants, TOC support
goetz@6458 67 //
goetz@6458 68
goetz@6458 69 // Address of the global TOC.
goetz@6458 70 inline static address global_toc();
goetz@6458 71 // Offset of given address to the global TOC.
goetz@6458 72 inline static int offset_to_global_toc(const address addr);
goetz@6458 73
goetz@6458 74 // Address of TOC of the current method.
goetz@6458 75 inline address method_toc();
goetz@6458 76 // Offset of given address to TOC of the current method.
goetz@6458 77 inline int offset_to_method_toc(const address addr);
goetz@6458 78
goetz@6458 79 // Global TOC.
goetz@6458 80 void calculate_address_from_global_toc(Register dst, address addr,
goetz@6458 81 bool hi16 = true, bool lo16 = true,
goetz@6458 82 bool add_relocation = true, bool emit_dummy_addr = false);
goetz@6458 83 inline void calculate_address_from_global_toc_hi16only(Register dst, address addr) {
goetz@6458 84 calculate_address_from_global_toc(dst, addr, true, false);
goetz@6458 85 };
goetz@6458 86 inline void calculate_address_from_global_toc_lo16only(Register dst, address addr) {
goetz@6458 87 calculate_address_from_global_toc(dst, addr, false, true);
goetz@6458 88 };
goetz@6458 89
goetz@6458 90 inline static bool is_calculate_address_from_global_toc_at(address a, address bound);
goetz@6458 91 static int patch_calculate_address_from_global_toc_at(address a, address addr, address bound);
goetz@6458 92 static address get_address_of_calculate_address_from_global_toc_at(address a, address addr);
goetz@6458 93
goetz@6458 94 #ifdef _LP64
goetz@6458 95 // Patch narrow oop constant.
goetz@6458 96 inline static bool is_set_narrow_oop(address a, address bound);
goetz@6458 97 static int patch_set_narrow_oop(address a, address bound, narrowOop data);
goetz@6458 98 static narrowOop get_narrow_oop(address a, address bound);
goetz@6458 99 #endif
goetz@6458 100
goetz@6458 101 inline static bool is_load_const_at(address a);
goetz@6458 102
goetz@6458 103 // Emits an oop const to the constant pool, loads the constant, and
goetz@6458 104 // sets a relocation info with address current_pc.
goetz@6458 105 void load_const_from_method_toc(Register dst, AddressLiteral& a, Register toc);
goetz@6458 106 void load_toc_from_toc(Register dst, AddressLiteral& a, Register toc) {
goetz@6458 107 assert(dst == R2_TOC, "base register must be TOC");
goetz@6458 108 load_const_from_method_toc(dst, a, toc);
goetz@6458 109 }
goetz@6458 110
goetz@6458 111 static bool is_load_const_from_method_toc_at(address a);
goetz@6458 112 static int get_offset_of_load_const_from_method_toc_at(address a);
goetz@6458 113
goetz@6458 114 // Get the 64 bit constant from a `load_const' sequence.
goetz@6458 115 static long get_const(address load_const);
goetz@6458 116
goetz@6458 117 // Patch the 64 bit constant of a `load_const' sequence. This is a
goetz@6458 118 // low level procedure. It neither flushes the instruction cache nor
goetz@6458 119 // is it atomic.
goetz@6458 120 static void patch_const(address load_const, long x);
goetz@6458 121
goetz@6458 122 // Metadata in code that we have to keep track of.
goetz@6458 123 AddressLiteral allocate_metadata_address(Metadata* obj); // allocate_index
goetz@6458 124 AddressLiteral constant_metadata_address(Metadata* obj); // find_index
goetz@6458 125 // Oops used directly in compiled code are stored in the constant pool,
goetz@6458 126 // and loaded from there.
goetz@6458 127 // Allocate new entry for oop in constant pool. Generate relocation.
goetz@6458 128 AddressLiteral allocate_oop_address(jobject obj);
goetz@6458 129 // Find oop obj in constant pool. Return relocation with it's index.
goetz@6458 130 AddressLiteral constant_oop_address(jobject obj);
goetz@6458 131
goetz@6458 132 // Find oop in constant pool and emit instructions to load it.
goetz@6458 133 // Uses constant_oop_address.
goetz@6458 134 inline void set_oop_constant(jobject obj, Register d);
goetz@6458 135 // Same as load_address.
goetz@6458 136 inline void set_oop (AddressLiteral obj_addr, Register d);
goetz@6458 137
goetz@6458 138 // Read runtime constant: Issue load if constant not yet established,
goetz@6458 139 // else use real constant.
goetz@6458 140 virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr,
goetz@6458 141 Register tmp,
goetz@6458 142 int offset);
goetz@6458 143
goetz@6458 144 //
goetz@6458 145 // branch, jump
goetz@6458 146 //
goetz@6458 147
goetz@6458 148 inline void pd_patch_instruction(address branch, address target);
goetz@6458 149 NOT_PRODUCT(static void pd_print_patched_instruction(address branch);)
goetz@6458 150
goetz@6458 151 // Conditional far branch for destinations encodable in 24+2 bits.
goetz@6458 152 // Same interface as bc, e.g. no inverse boint-field.
goetz@6458 153 enum {
goetz@6458 154 bc_far_optimize_not = 0,
goetz@6458 155 bc_far_optimize_on_relocate = 1
goetz@6458 156 };
goetz@6458 157 // optimize: flag for telling the conditional far branch to optimize
goetz@6458 158 // itself when relocated.
goetz@6458 159 void bc_far(int boint, int biint, Label& dest, int optimize);
goetz@6458 160 // Relocation of conditional far branches.
goetz@6458 161 static bool is_bc_far_at(address instruction_addr);
goetz@6458 162 static address get_dest_of_bc_far_at(address instruction_addr);
goetz@6458 163 static void set_dest_of_bc_far_at(address instruction_addr, address dest);
goetz@6458 164 private:
goetz@6458 165 static bool inline is_bc_far_variant1_at(address instruction_addr);
goetz@6458 166 static bool inline is_bc_far_variant2_at(address instruction_addr);
goetz@6458 167 static bool inline is_bc_far_variant3_at(address instruction_addr);
goetz@6458 168 public:
goetz@6458 169
goetz@6458 170 // Convenience bc_far versions.
goetz@6458 171 inline void blt_far(ConditionRegister crx, Label& L, int optimize);
goetz@6458 172 inline void bgt_far(ConditionRegister crx, Label& L, int optimize);
goetz@6458 173 inline void beq_far(ConditionRegister crx, Label& L, int optimize);
goetz@6458 174 inline void bso_far(ConditionRegister crx, Label& L, int optimize);
goetz@6458 175 inline void bge_far(ConditionRegister crx, Label& L, int optimize);
goetz@6458 176 inline void ble_far(ConditionRegister crx, Label& L, int optimize);
goetz@6458 177 inline void bne_far(ConditionRegister crx, Label& L, int optimize);
goetz@6458 178 inline void bns_far(ConditionRegister crx, Label& L, int optimize);
goetz@6458 179
goetz@6458 180 // Emit, identify and patch a NOT mt-safe patchable 64 bit absolute call/jump.
goetz@6458 181 private:
goetz@6458 182 enum {
goetz@6458 183 bxx64_patchable_instruction_count = (2/*load_codecache_const*/ + 3/*5load_const*/ + 1/*mtctr*/ + 1/*bctrl*/),
goetz@6458 184 bxx64_patchable_size = bxx64_patchable_instruction_count * BytesPerInstWord,
goetz@6458 185 bxx64_patchable_ret_addr_offset = bxx64_patchable_size
goetz@6458 186 };
goetz@6458 187 void bxx64_patchable(address target, relocInfo::relocType rt, bool link);
goetz@6458 188 static bool is_bxx64_patchable_at( address instruction_addr, bool link);
goetz@6458 189 // Does the instruction use a pc-relative encoding of the destination?
goetz@6458 190 static bool is_bxx64_patchable_pcrelative_at( address instruction_addr, bool link);
goetz@6458 191 static bool is_bxx64_patchable_variant1_at( address instruction_addr, bool link);
goetz@6458 192 // Load destination relative to global toc.
goetz@6458 193 static bool is_bxx64_patchable_variant1b_at( address instruction_addr, bool link);
goetz@6458 194 static bool is_bxx64_patchable_variant2_at( address instruction_addr, bool link);
goetz@6458 195 static void set_dest_of_bxx64_patchable_at( address instruction_addr, address target, bool link);
goetz@6458 196 static address get_dest_of_bxx64_patchable_at(address instruction_addr, bool link);
goetz@6458 197
goetz@6458 198 public:
goetz@6458 199 // call
goetz@6458 200 enum {
goetz@6458 201 bl64_patchable_instruction_count = bxx64_patchable_instruction_count,
goetz@6458 202 bl64_patchable_size = bxx64_patchable_size,
goetz@6458 203 bl64_patchable_ret_addr_offset = bxx64_patchable_ret_addr_offset
goetz@6458 204 };
goetz@6458 205 inline void bl64_patchable(address target, relocInfo::relocType rt) {
goetz@6458 206 bxx64_patchable(target, rt, /*link=*/true);
goetz@6458 207 }
goetz@6458 208 inline static bool is_bl64_patchable_at(address instruction_addr) {
goetz@6458 209 return is_bxx64_patchable_at(instruction_addr, /*link=*/true);
goetz@6458 210 }
goetz@6458 211 inline static bool is_bl64_patchable_pcrelative_at(address instruction_addr) {
goetz@6458 212 return is_bxx64_patchable_pcrelative_at(instruction_addr, /*link=*/true);
goetz@6458 213 }
goetz@6458 214 inline static void set_dest_of_bl64_patchable_at(address instruction_addr, address target) {
goetz@6458 215 set_dest_of_bxx64_patchable_at(instruction_addr, target, /*link=*/true);
goetz@6458 216 }
goetz@6458 217 inline static address get_dest_of_bl64_patchable_at(address instruction_addr) {
goetz@6458 218 return get_dest_of_bxx64_patchable_at(instruction_addr, /*link=*/true);
goetz@6458 219 }
goetz@6458 220 // jump
goetz@6458 221 enum {
goetz@6458 222 b64_patchable_instruction_count = bxx64_patchable_instruction_count,
goetz@6458 223 b64_patchable_size = bxx64_patchable_size,
goetz@6458 224 };
goetz@6458 225 inline void b64_patchable(address target, relocInfo::relocType rt) {
goetz@6458 226 bxx64_patchable(target, rt, /*link=*/false);
goetz@6458 227 }
goetz@6458 228 inline static bool is_b64_patchable_at(address instruction_addr) {
goetz@6458 229 return is_bxx64_patchable_at(instruction_addr, /*link=*/false);
goetz@6458 230 }
goetz@6458 231 inline static bool is_b64_patchable_pcrelative_at(address instruction_addr) {
goetz@6458 232 return is_bxx64_patchable_pcrelative_at(instruction_addr, /*link=*/false);
goetz@6458 233 }
goetz@6458 234 inline static void set_dest_of_b64_patchable_at(address instruction_addr, address target) {
goetz@6458 235 set_dest_of_bxx64_patchable_at(instruction_addr, target, /*link=*/false);
goetz@6458 236 }
goetz@6458 237 inline static address get_dest_of_b64_patchable_at(address instruction_addr) {
goetz@6458 238 return get_dest_of_bxx64_patchable_at(instruction_addr, /*link=*/false);
goetz@6458 239 }
goetz@6458 240
goetz@6458 241 //
goetz@6458 242 // Support for frame handling
goetz@6458 243 //
goetz@6458 244
goetz@6458 245 // some ABI-related functions
goetz@6458 246 void save_nonvolatile_gprs( Register dst_base, int offset);
goetz@6458 247 void restore_nonvolatile_gprs(Register src_base, int offset);
goetz@6458 248 void save_volatile_gprs( Register dst_base, int offset);
goetz@6458 249 void restore_volatile_gprs(Register src_base, int offset);
goetz@6458 250 void save_LR_CR( Register tmp); // tmp contains LR on return.
goetz@6458 251 void restore_LR_CR(Register tmp);
goetz@6458 252
goetz@6458 253 // Get current PC using bl-next-instruction trick.
goetz@6458 254 address get_PC_trash_LR(Register result);
goetz@6458 255
goetz@6458 256 // Resize current frame either relatively wrt to current SP or absolute.
goetz@6458 257 void resize_frame(Register offset, Register tmp);
goetz@6458 258 void resize_frame(int offset, Register tmp);
goetz@6458 259 void resize_frame_absolute(Register addr, Register tmp1, Register tmp2);
goetz@6458 260
goetz@6458 261 // Push a frame of size bytes.
goetz@6458 262 void push_frame(Register bytes, Register tmp);
goetz@6458 263
goetz@6458 264 // Push a frame of size `bytes'. No abi space provided.
goetz@6458 265 void push_frame(unsigned int bytes, Register tmp);
goetz@6458 266
goetz@6458 267 // Push a frame of size `bytes' plus abi112 on top.
goetz@6458 268 void push_frame_abi112(unsigned int bytes, Register tmp);
goetz@6458 269
goetz@6458 270 // Setup up a new C frame with a spill area for non-volatile GPRs and additional
goetz@6458 271 // space for local variables
goetz@6458 272 void push_frame_abi112_nonvolatiles(unsigned int bytes, Register tmp);
goetz@6458 273
goetz@6458 274 // pop current C frame
goetz@6458 275 void pop_frame();
goetz@6458 276
goetz@6458 277 //
goetz@6458 278 // Calls
goetz@6458 279 //
goetz@6458 280
goetz@6458 281 private:
goetz@6458 282 address _last_calls_return_pc;
goetz@6458 283
goetz@6458 284 // Generic version of a call to C function via a function descriptor
goetz@6458 285 // with variable support for C calling conventions (TOC, ENV, etc.).
goetz@6458 286 // updates and returns _last_calls_return_pc.
goetz@6458 287 address branch_to(Register function_descriptor, bool and_link, bool save_toc_before_call,
goetz@6458 288 bool restore_toc_after_call, bool load_toc_of_callee, bool load_env_of_callee);
goetz@6458 289
goetz@6458 290 public:
goetz@6458 291
goetz@6458 292 // Get the pc where the last call will return to. returns _last_calls_return_pc.
goetz@6458 293 inline address last_calls_return_pc();
goetz@6458 294
goetz@6458 295 // Call a C function via a function descriptor and use full C
goetz@6458 296 // calling conventions. Updates and returns _last_calls_return_pc.
goetz@6458 297 address call_c(Register function_descriptor);
goetz@6458 298 address call_c(const FunctionDescriptor* function_descriptor, relocInfo::relocType rt);
goetz@6458 299 address call_c_using_toc(const FunctionDescriptor* function_descriptor, relocInfo::relocType rt,
goetz@6458 300 Register toc);
goetz@6458 301
goetz@6458 302 protected:
goetz@6458 303
goetz@6458 304 // It is imperative that all calls into the VM are handled via the
goetz@6458 305 // call_VM macros. They make sure that the stack linkage is setup
goetz@6458 306 // correctly. call_VM's correspond to ENTRY/ENTRY_X entry points
goetz@6458 307 // while call_VM_leaf's correspond to LEAF entry points.
goetz@6458 308 //
goetz@6458 309 // This is the base routine called by the different versions of
goetz@6458 310 // call_VM. The interpreter may customize this version by overriding
goetz@6458 311 // it for its purposes (e.g., to save/restore additional registers
goetz@6458 312 // when doing a VM call).
goetz@6458 313 //
goetz@6458 314 // If no last_java_sp is specified (noreg) then SP will be used instead.
goetz@6458 315 virtual void call_VM_base(
goetz@6458 316 // where an oop-result ends up if any; use noreg otherwise
goetz@6458 317 Register oop_result,
goetz@6458 318 // to set up last_Java_frame in stubs; use noreg otherwise
goetz@6458 319 Register last_java_sp,
goetz@6458 320 // the entry point
goetz@6458 321 address entry_point,
goetz@6458 322 // flag which indicates if exception should be checked
goetz@6458 323 bool check_exception=true
goetz@6458 324 );
goetz@6458 325
goetz@6458 326 // Support for VM calls. This is the base routine called by the
goetz@6458 327 // different versions of call_VM_leaf. The interpreter may customize
goetz@6458 328 // this version by overriding it for its purposes (e.g., to
goetz@6458 329 // save/restore additional registers when doing a VM call).
goetz@6458 330 void call_VM_leaf_base(address entry_point);
goetz@6458 331
goetz@6458 332 public:
goetz@6458 333 // Call into the VM.
goetz@6458 334 // Passes the thread pointer (in R3_ARG1) as a prepended argument.
goetz@6458 335 // Makes sure oop return values are visible to the GC.
goetz@6458 336 void call_VM(Register oop_result, address entry_point, bool check_exceptions = true);
goetz@6458 337 void call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions = true);
goetz@6458 338 void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
goetz@6458 339 void call_VM_leaf(address entry_point);
goetz@6458 340 void call_VM_leaf(address entry_point, Register arg_1);
goetz@6458 341 void call_VM_leaf(address entry_point, Register arg_1, Register arg_2);
goetz@6458 342 void call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3);
goetz@6458 343
goetz@6458 344 // Call a stub function via a function descriptor, but don't save
goetz@6458 345 // TOC before call, don't setup TOC and ENV for call, and don't
goetz@6458 346 // restore TOC after call. Updates and returns _last_calls_return_pc.
goetz@6458 347 inline address call_stub(Register function_entry);
goetz@6458 348 inline void call_stub_and_return_to(Register function_entry, Register return_pc);
goetz@6458 349
goetz@6458 350 //
goetz@6458 351 // Java utilities
goetz@6458 352 //
goetz@6458 353
goetz@6458 354 // Read from the polling page, its address is already in a register.
goetz@6458 355 inline void load_from_polling_page(Register polling_page_address, int offset = 0);
goetz@6458 356 // Check whether instruction is a read access to the polling page
goetz@6458 357 // which was emitted by load_from_polling_page(..).
goetz@6458 358 static bool is_load_from_polling_page(int instruction, void* ucontext/*may be NULL*/,
goetz@6458 359 address* polling_address_ptr = NULL);
goetz@6458 360
goetz@6458 361 // Check whether instruction is a write access to the memory
goetz@6458 362 // serialization page realized by one of the instructions stw, stwu,
goetz@6458 363 // stwx, or stwux.
goetz@6458 364 static bool is_memory_serialization(int instruction, JavaThread* thread, void* ucontext);
goetz@6458 365
goetz@6458 366 // Support for NULL-checks
goetz@6458 367 //
goetz@6458 368 // Generates code that causes a NULL OS exception if the content of reg is NULL.
goetz@6458 369 // If the accessed location is M[reg + offset] and the offset is known, provide the
goetz@6458 370 // offset. No explicit code generation is needed if the offset is within a certain
goetz@6458 371 // range (0 <= offset <= page_size).
goetz@6458 372
goetz@6458 373 // Stack overflow checking
goetz@6458 374 void bang_stack_with_offset(int offset);
goetz@6458 375
goetz@6458 376 // If instruction is a stack bang of the form ld, stdu, or
goetz@6458 377 // stdux, return the banged address. Otherwise, return 0.
goetz@6458 378 static address get_stack_bang_address(int instruction, void* ucontext);
goetz@6458 379
goetz@6458 380 // Atomics
goetz@6458 381 // CmpxchgX sets condition register to cmpX(current, compare).
goetz@6458 382 // (flag == ne) => (dest_current_value != compare_value), (!swapped)
goetz@6458 383 // (flag == eq) => (dest_current_value == compare_value), ( swapped)
goetz@6458 384 static inline bool cmpxchgx_hint_acquire_lock() { return true; }
goetz@6458 385 // The stxcx will probably not be succeeded by a releasing store.
goetz@6458 386 static inline bool cmpxchgx_hint_release_lock() { return false; }
goetz@6458 387 static inline bool cmpxchgx_hint_atomic_update() { return false; }
goetz@6458 388
goetz@6458 389 // Cmpxchg semantics
goetz@6458 390 enum {
goetz@6458 391 MemBarNone = 0,
goetz@6458 392 MemBarRel = 1,
goetz@6458 393 MemBarAcq = 2,
goetz@6458 394 MemBarFenceAfter = 4 // use powers of 2
goetz@6458 395 };
goetz@6458 396 void cmpxchgw(ConditionRegister flag,
goetz@6458 397 Register dest_current_value, Register compare_value, Register exchange_value, Register addr_base,
goetz@6458 398 int semantics, bool cmpxchgx_hint = false,
goetz@6458 399 Register int_flag_success = noreg, bool contention_hint = false);
goetz@6458 400 void cmpxchgd(ConditionRegister flag,
goetz@6458 401 Register dest_current_value, Register compare_value, Register exchange_value, Register addr_base,
goetz@6458 402 int semantics, bool cmpxchgx_hint = false,
goetz@6458 403 Register int_flag_success = noreg, Label* failed = NULL, bool contention_hint = false);
goetz@6458 404
goetz@6458 405 // interface method calling
goetz@6458 406 void lookup_interface_method(Register recv_klass,
goetz@6458 407 Register intf_klass,
goetz@6458 408 RegisterOrConstant itable_index,
goetz@6458 409 Register method_result,
goetz@6458 410 Register temp_reg, Register temp2_reg,
goetz@6458 411 Label& no_such_interface);
goetz@6458 412
goetz@6458 413 // virtual method calling
goetz@6458 414 void lookup_virtual_method(Register recv_klass,
goetz@6458 415 RegisterOrConstant vtable_index,
goetz@6458 416 Register method_result);
goetz@6458 417
goetz@6458 418 // Test sub_klass against super_klass, with fast and slow paths.
goetz@6458 419
goetz@6458 420 // The fast path produces a tri-state answer: yes / no / maybe-slow.
goetz@6458 421 // One of the three labels can be NULL, meaning take the fall-through.
goetz@6458 422 // If super_check_offset is -1, the value is loaded up from super_klass.
goetz@6458 423 // No registers are killed, except temp_reg and temp2_reg.
goetz@6458 424 // If super_check_offset is not -1, temp2_reg is not used and can be noreg.
goetz@6458 425 void check_klass_subtype_fast_path(Register sub_klass,
goetz@6458 426 Register super_klass,
goetz@6458 427 Register temp1_reg,
goetz@6458 428 Register temp2_reg,
goetz@6458 429 Label& L_success,
goetz@6458 430 Label& L_failure);
goetz@6458 431
goetz@6458 432 // The rest of the type check; must be wired to a corresponding fast path.
goetz@6458 433 // It does not repeat the fast path logic, so don't use it standalone.
goetz@6458 434 // The temp_reg can be noreg, if no temps are available.
goetz@6458 435 // It can also be sub_klass or super_klass, meaning it's OK to kill that one.
goetz@6458 436 // Updates the sub's secondary super cache as necessary.
goetz@6458 437 void check_klass_subtype_slow_path(Register sub_klass,
goetz@6458 438 Register super_klass,
goetz@6458 439 Register temp1_reg,
goetz@6458 440 Register temp2_reg,
goetz@6458 441 Label* L_success = NULL,
goetz@6458 442 Register result_reg = noreg);
goetz@6458 443
goetz@6458 444 // Simplified, combined version, good for typical uses.
goetz@6458 445 // Falls through on failure.
goetz@6458 446 void check_klass_subtype(Register sub_klass,
goetz@6458 447 Register super_klass,
goetz@6458 448 Register temp1_reg,
goetz@6458 449 Register temp2_reg,
goetz@6458 450 Label& L_success);
goetz@6458 451
goetz@6458 452 // Method handle support (JSR 292).
goetz@6458 453 void check_method_handle_type(Register mtype_reg, Register mh_reg, Register temp_reg, Label& wrong_method_type);
goetz@6458 454
goetz@6458 455 RegisterOrConstant argument_offset(RegisterOrConstant arg_slot, Register temp_reg, int extra_slot_offset = 0);
goetz@6458 456
goetz@6458 457 // Biased locking support
goetz@6458 458 // Upon entry,obj_reg must contain the target object, and mark_reg
goetz@6458 459 // must contain the target object's header.
goetz@6458 460 // Destroys mark_reg if an attempt is made to bias an anonymously
goetz@6458 461 // biased lock. In this case a failure will go either to the slow
goetz@6458 462 // case or fall through with the notEqual condition code set with
goetz@6458 463 // the expectation that the slow case in the runtime will be called.
goetz@6458 464 // In the fall-through case where the CAS-based lock is done,
goetz@6458 465 // mark_reg is not destroyed.
goetz@6458 466 void biased_locking_enter(ConditionRegister cr_reg, Register obj_reg, Register mark_reg, Register temp_reg,
goetz@6458 467 Register temp2_reg, Label& done, Label* slow_case = NULL);
goetz@6458 468 // Upon entry, the base register of mark_addr must contain the oop.
goetz@6458 469 // Destroys temp_reg.
goetz@6458 470 // If allow_delay_slot_filling is set to true, the next instruction
goetz@6458 471 // emitted after this one will go in an annulled delay slot if the
goetz@6458 472 // biased locking exit case failed.
goetz@6458 473 void biased_locking_exit(ConditionRegister cr_reg, Register mark_addr, Register temp_reg, Label& done);
goetz@6458 474
goetz@6458 475 void compiler_fast_lock_object( ConditionRegister flag, Register oop, Register box, Register tmp1, Register tmp2, Register tmp3);
goetz@6458 476 void compiler_fast_unlock_object(ConditionRegister flag, Register oop, Register box, Register tmp1, Register tmp2, Register tmp3);
goetz@6458 477
goetz@6458 478 // Support for serializing memory accesses between threads
goetz@6458 479 void serialize_memory(Register thread, Register tmp1, Register tmp2);
goetz@6458 480
goetz@6458 481 // GC barrier support.
goetz@6458 482 void card_write_barrier_post(Register Rstore_addr, Register Rnew_val, Register Rtmp);
goetz@6458 483 void card_table_write(jbyte* byte_map_base, Register Rtmp, Register Robj);
goetz@6458 484
goetz@6458 485 #ifndef SERIALGC
goetz@6458 486 // General G1 pre-barrier generator.
goetz@6458 487 void g1_write_barrier_pre(Register Robj, RegisterOrConstant offset, Register Rpre_val,
goetz@6458 488 Register Rtmp1, Register Rtmp2, bool needs_frame = false);
goetz@6458 489 // General G1 post-barrier generator
goetz@6458 490 void g1_write_barrier_post(Register Rstore_addr, Register Rnew_val, Register Rtmp1,
goetz@6458 491 Register Rtmp2, Register Rtmp3, Label *filtered_ext = NULL);
goetz@6458 492 #endif // SERIALGC
goetz@6458 493
goetz@6458 494 // Support for managing the JavaThread pointer (i.e.; the reference to
goetz@6458 495 // thread-local information).
goetz@6458 496
goetz@6458 497 // Support for last Java frame (but use call_VM instead where possible):
goetz@6458 498 // access R16_thread->last_Java_sp.
goetz@6458 499 void set_last_Java_frame(Register last_java_sp, Register last_Java_pc);
goetz@6458 500 void reset_last_Java_frame(void);
goetz@6458 501 void set_top_ijava_frame_at_SP_as_last_Java_frame(Register sp, Register tmp1);
goetz@6458 502
goetz@6458 503 // Read vm result from thread: oop_result = R16_thread->result;
goetz@6458 504 void get_vm_result (Register oop_result);
goetz@6458 505 void get_vm_result_2(Register metadata_result);
goetz@6458 506
goetz@6458 507 static bool needs_explicit_null_check(intptr_t offset);
goetz@6458 508
goetz@6458 509 // Trap-instruction-based checks.
goetz@6458 510 // Range checks can be distinguished from zero checks as they check 32 bit,
goetz@6458 511 // zero checks all 64 bits (tw, td).
goetz@6458 512 inline void trap_null_check(Register a, trap_to_bits cmp = traptoEqual);
goetz@6458 513 static bool is_trap_null_check(int x) {
goetz@6458 514 return is_tdi(x, traptoEqual, -1/*any reg*/, 0) ||
goetz@6458 515 is_tdi(x, traptoGreaterThanUnsigned, -1/*any reg*/, 0);
goetz@6458 516 }
goetz@6458 517
goetz@6458 518 inline void trap_zombie_not_entrant();
goetz@6458 519 static bool is_trap_zombie_not_entrant(int x) { return is_tdi(x, traptoUnconditional, 0/*reg 0*/, 1); }
goetz@6458 520
goetz@6458 521 inline void trap_should_not_reach_here();
goetz@6458 522 static bool is_trap_should_not_reach_here(int x) { return is_tdi(x, traptoUnconditional, 0/*reg 0*/, 2); }
goetz@6458 523
goetz@6458 524 inline void trap_ic_miss_check(Register a, Register b);
goetz@6458 525 static bool is_trap_ic_miss_check(int x) {
goetz@6458 526 return is_td(x, traptoGreaterThanUnsigned | traptoLessThanUnsigned, -1/*any reg*/, -1/*any reg*/);
goetz@6458 527 }
goetz@6458 528
goetz@6458 529 // Implicit or explicit null check, jumps to static address exception_entry.
goetz@6458 530 inline void null_check_throw(Register a, int offset, Register temp_reg, address exception_entry);
goetz@6458 531
goetz@6458 532 // Check accessed object for null. Use SIGTRAP-based null checks on AIX.
goetz@6458 533 inline void ld_with_trap_null_check(Register d, int si16, Register s1);
goetz@6458 534 // Variant for heap OOPs including decompression of compressed OOPs.
goetz@6458 535 inline void load_heap_oop_with_trap_null_check(Register d, RegisterOrConstant offs, Register s1);
goetz@6458 536
goetz@6458 537 // Load heap oop and decompress. Loaded oop may not be null.
goetz@6458 538 inline void load_heap_oop_not_null(Register d, RegisterOrConstant offs, Register s1 = noreg);
goetz@6458 539
goetz@6458 540 // Null allowed.
goetz@6458 541 inline void load_heap_oop(Register d, RegisterOrConstant offs, Register s1 = noreg);
goetz@6458 542
goetz@6458 543 // Encode/decode heap oop. Oop may not be null, else en/decoding goes wrong.
goetz@6458 544 inline void encode_heap_oop_not_null(Register d);
goetz@6458 545 inline void decode_heap_oop_not_null(Register d);
goetz@6458 546
goetz@6458 547 // Null allowed.
goetz@6458 548 inline void decode_heap_oop(Register d);
goetz@6458 549
goetz@6458 550 // Load/Store klass oop from klass field. Compress.
goetz@6458 551 void load_klass(Register dst, Register src);
goetz@6458 552 void load_klass_with_trap_null_check(Register dst, Register src);
goetz@6458 553 void store_klass(Register dst_oop, Register klass, Register tmp = R0);
goetz@6477 554 static int instr_size_for_decode_klass_not_null();
goetz@6458 555 void decode_klass_not_null(Register dst, Register src = noreg);
goetz@6458 556 void encode_klass_not_null(Register dst, Register src = noreg);
goetz@6458 557
goetz@6458 558 // Load common heap base into register.
goetz@6458 559 void reinit_heapbase(Register d, Register tmp = noreg);
goetz@6458 560
goetz@6458 561 // SIGTRAP-based range checks for arrays.
goetz@6458 562 inline void trap_range_check_l(Register a, Register b);
goetz@6458 563 inline void trap_range_check_l(Register a, int si16);
goetz@6458 564 static bool is_trap_range_check_l(int x) {
goetz@6458 565 return (is_tw (x, traptoLessThanUnsigned, -1/*any reg*/, -1/*any reg*/) ||
goetz@6458 566 is_twi(x, traptoLessThanUnsigned, -1/*any reg*/) );
goetz@6458 567 }
goetz@6458 568 inline void trap_range_check_le(Register a, int si16);
goetz@6458 569 static bool is_trap_range_check_le(int x) {
goetz@6458 570 return is_twi(x, traptoEqual | traptoLessThanUnsigned, -1/*any reg*/);
goetz@6458 571 }
goetz@6458 572 inline void trap_range_check_g(Register a, int si16);
goetz@6458 573 static bool is_trap_range_check_g(int x) {
goetz@6458 574 return is_twi(x, traptoGreaterThanUnsigned, -1/*any reg*/);
goetz@6458 575 }
goetz@6458 576 inline void trap_range_check_ge(Register a, Register b);
goetz@6458 577 inline void trap_range_check_ge(Register a, int si16);
goetz@6458 578 static bool is_trap_range_check_ge(int x) {
goetz@6458 579 return (is_tw (x, traptoEqual | traptoGreaterThanUnsigned, -1/*any reg*/, -1/*any reg*/) ||
goetz@6458 580 is_twi(x, traptoEqual | traptoGreaterThanUnsigned, -1/*any reg*/) );
goetz@6458 581 }
goetz@6458 582 static bool is_trap_range_check(int x) {
goetz@6458 583 return is_trap_range_check_l(x) || is_trap_range_check_le(x) ||
goetz@6458 584 is_trap_range_check_g(x) || is_trap_range_check_ge(x);
goetz@6458 585 }
goetz@6458 586
goetz@6458 587 // Needle of length 1.
goetz@6458 588 void string_indexof_1(Register result, Register haystack, Register haycnt,
goetz@6458 589 Register needle, jchar needleChar,
goetz@6458 590 Register tmp1, Register tmp2);
goetz@6458 591 // General indexof, eventually with constant needle length.
goetz@6458 592 void string_indexof(Register result, Register haystack, Register haycnt,
goetz@6458 593 Register needle, ciTypeArray* needle_values, Register needlecnt, int needlecntval,
goetz@6458 594 Register tmp1, Register tmp2, Register tmp3, Register tmp4);
goetz@6458 595 void string_compare(Register str1_reg, Register str2_reg, Register cnt1_reg, Register cnt2_reg,
goetz@6458 596 Register result_reg, Register tmp_reg);
goetz@6458 597 void char_arrays_equals(Register str1_reg, Register str2_reg, Register cnt_reg, Register result_reg,
goetz@6458 598 Register tmp1_reg, Register tmp2_reg, Register tmp3_reg, Register tmp4_reg,
goetz@6458 599 Register tmp5_reg);
goetz@6458 600 void char_arrays_equalsImm(Register str1_reg, Register str2_reg, int cntval, Register result_reg,
goetz@6458 601 Register tmp1_reg, Register tmp2_reg);
goetz@6458 602
goetz@6458 603 //
goetz@6458 604 // Debugging
goetz@6458 605 //
goetz@6458 606
goetz@6458 607 // assert on cr0
goetz@6458 608 void asm_assert(bool check_equal, const char* msg, int id);
goetz@6458 609 void asm_assert_eq(const char* msg, int id) { asm_assert(true, msg, id); }
goetz@6458 610 void asm_assert_ne(const char* msg, int id) { asm_assert(false, msg, id); }
goetz@6458 611
goetz@6458 612 private:
goetz@6458 613 void asm_assert_mems_zero(bool check_equal, int size, int mem_offset, Register mem_base,
goetz@6458 614 const char* msg, int id);
goetz@6458 615
goetz@6458 616 public:
goetz@6458 617
goetz@6458 618 void asm_assert_mem8_is_zero(int mem_offset, Register mem_base, const char* msg, int id) {
goetz@6458 619 asm_assert_mems_zero(true, 8, mem_offset, mem_base, msg, id);
goetz@6458 620 }
goetz@6458 621 void asm_assert_mem8_isnot_zero(int mem_offset, Register mem_base, const char* msg, int id) {
goetz@6458 622 asm_assert_mems_zero(false, 8, mem_offset, mem_base, msg, id);
goetz@6458 623 }
goetz@6458 624
goetz@6458 625 // Verify R16_thread contents.
goetz@6458 626 void verify_thread();
goetz@6458 627
goetz@6458 628 // Emit code to verify that reg contains a valid oop if +VerifyOops is set.
goetz@6458 629 void verify_oop(Register reg, const char* s = "broken oop");
goetz@6458 630
goetz@6458 631 // TODO: verify method and klass metadata (compare against vptr?)
goetz@6458 632 void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {}
goetz@6458 633 void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line){}
goetz@6458 634
goetz@6458 635 #define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__)
goetz@6458 636 #define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__)
goetz@6458 637
goetz@6458 638 private:
goetz@6458 639
goetz@6458 640 enum {
goetz@6458 641 stop_stop = 0,
goetz@6458 642 stop_untested = 1,
goetz@6458 643 stop_unimplemented = 2,
goetz@6458 644 stop_shouldnotreachhere = 3,
goetz@6458 645 stop_end = 4
goetz@6458 646 };
goetz@6458 647 void stop(int type, const char* msg, int id);
goetz@6458 648
goetz@6458 649 public:
goetz@6458 650 // Prints msg, dumps registers and stops execution.
goetz@6458 651 void stop (const char* msg = "", int id = 0) { stop(stop_stop, msg, id); }
goetz@6458 652 void untested (const char* msg = "", int id = 0) { stop(stop_untested, msg, id); }
goetz@6458 653 void unimplemented(const char* msg = "", int id = 0) { stop(stop_unimplemented, msg, id); }
goetz@6458 654 void should_not_reach_here() { stop(stop_shouldnotreachhere, "", -1); }
goetz@6458 655
goetz@6458 656 void zap_from_to(Register low, int before, Register high, int after, Register val, Register addr) PRODUCT_RETURN;
goetz@6458 657 };
goetz@6458 658
goetz@6458 659 #endif // CPU_PPC_VM_MACROASSEMBLER_PPC_HPP

mercurial